2011-05-30 07:00:31 +08:00
|
|
|
//
|
2011-06-28 10:11:32 +08:00
|
|
|
// Blackfriday Markdown Processor
|
|
|
|
// Available at http://github.com/russross/blackfriday
|
|
|
|
//
|
|
|
|
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
2011-06-29 01:30:10 +08:00
|
|
|
// Distributed under the Simplified BSD License.
|
2011-06-28 10:11:32 +08:00
|
|
|
// See README.md for details.
|
2011-05-30 07:00:31 +08:00
|
|
|
//
|
|
|
|
|
|
|
|
//
|
|
|
|
// Functions to parse block-level elements.
|
|
|
|
//
|
|
|
|
|
|
|
|
package blackfriday
|
|
|
|
|
2014-10-28 04:54:23 +08:00
|
|
|
import (
|
|
|
|
"bytes"
|
2014-11-30 12:41:11 +08:00
|
|
|
|
2015-01-11 10:23:29 +08:00
|
|
|
"github.com/shurcooL/sanitized_anchor_name"
|
2014-10-28 04:54:23 +08:00
|
|
|
)
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// Parse block-level data.
|
|
|
|
// Note: this function and many that it calls assume that
|
|
|
|
// the input buffer ends with a newline.
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) block(out *bytes.Buffer, data []byte) {
|
2011-06-30 10:15:58 +08:00
|
|
|
if len(data) == 0 || data[len(data)-1] != '\n' {
|
2011-07-06 04:22:21 +08:00
|
|
|
panic("block input is missing terminating newline")
|
2011-06-30 10:15:58 +08:00
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// this is called recursively: enforce a maximum depth
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.nesting >= p.maxNesting {
|
2011-05-30 07:00:31 +08:00
|
|
|
return
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
p.nesting++
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// parse out one block-level construct at a time
|
2011-05-30 07:00:31 +08:00
|
|
|
for len(data) > 0 {
|
2011-06-01 06:07:15 +08:00
|
|
|
// prefixed header:
|
|
|
|
//
|
|
|
|
// # Header 1
|
|
|
|
// ## Header 2
|
|
|
|
// ...
|
|
|
|
// ###### Header 6
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.isPrefixHeader(data) {
|
|
|
|
data = data[p.prefixHeader(out, data):]
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// block of preformatted HTML:
|
|
|
|
//
|
|
|
|
// <div>
|
|
|
|
// ...
|
|
|
|
// </div>
|
2011-06-30 01:13:17 +08:00
|
|
|
if data[0] == '<' {
|
2011-07-08 01:56:45 +08:00
|
|
|
if i := p.html(out, data, true); i > 0 {
|
2011-05-30 07:00:31 +08:00
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2014-08-02 10:54:21 +08:00
|
|
|
// title block
|
|
|
|
//
|
|
|
|
// % stuff
|
|
|
|
// % more stuff
|
|
|
|
// % even more stuff
|
|
|
|
if p.flags&EXTENSION_TITLEBLOCK != 0 {
|
|
|
|
if data[0] == '%' {
|
|
|
|
if i := p.titleBlock(out, data, true); i > 0 {
|
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// blank lines. note: returns the # of bytes to skip
|
2011-07-08 01:56:45 +08:00
|
|
|
if i := p.isEmpty(data); i > 0 {
|
2011-05-30 07:00:31 +08:00
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// indented code block:
|
2011-06-01 06:07:15 +08:00
|
|
|
//
|
2011-07-02 04:13:26 +08:00
|
|
|
// func max(a, b int) int {
|
|
|
|
// if a > b {
|
|
|
|
// return a
|
|
|
|
// }
|
|
|
|
// return b
|
|
|
|
// }
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.codePrefix(data) > 0 {
|
|
|
|
data = data[p.code(out, data):]
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// fenced code block:
|
|
|
|
//
|
|
|
|
// ``` go
|
|
|
|
// func fact(n int) int {
|
|
|
|
// if n <= 1 {
|
|
|
|
// return n
|
|
|
|
// }
|
|
|
|
// return n * fact(n-1)
|
|
|
|
// }
|
|
|
|
// ```
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
2014-03-31 03:57:58 +08:00
|
|
|
if i := p.fencedCode(out, data, true); i > 0 {
|
2011-05-30 07:00:31 +08:00
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// horizontal rule:
|
2011-06-01 06:07:15 +08:00
|
|
|
//
|
2011-07-02 04:13:26 +08:00
|
|
|
// ------
|
|
|
|
// or
|
|
|
|
// ******
|
|
|
|
// or
|
|
|
|
// ______
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.isHRule(data) {
|
|
|
|
p.r.HRule(out)
|
2011-07-02 04:13:26 +08:00
|
|
|
var i int
|
|
|
|
for i = 0; data[i] != '\n'; i++ {
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
data = data[i:]
|
|
|
|
continue
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// block quote:
|
|
|
|
//
|
|
|
|
// > A big quote I found somewhere
|
|
|
|
// > on the web
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.quotePrefix(data) > 0 {
|
|
|
|
data = data[p.quote(out, data):]
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// table:
|
2011-06-01 06:07:15 +08:00
|
|
|
//
|
2011-07-02 04:13:26 +08:00
|
|
|
// Name | Age | Phone
|
|
|
|
// ------|-----|---------
|
|
|
|
// Bob | 31 | 555-1234
|
|
|
|
// Alice | 27 | 555-4321
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.flags&EXTENSION_TABLES != 0 {
|
|
|
|
if i := p.table(out, data); i > 0 {
|
2011-07-02 04:13:26 +08:00
|
|
|
data = data[i:]
|
|
|
|
continue
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// an itemized/unordered list:
|
|
|
|
//
|
|
|
|
// * Item 1
|
|
|
|
// * Item 2
|
|
|
|
//
|
|
|
|
// also works with + or -
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.uliPrefix(data) > 0 {
|
|
|
|
data = data[p.list(out, data, 0):]
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// a numbered/ordered list:
|
|
|
|
//
|
|
|
|
// 1. Item 1
|
|
|
|
// 2. Item 2
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.oliPrefix(data) > 0 {
|
|
|
|
data = data[p.list(out, data, LIST_TYPE_ORDERED):]
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-05-29 19:30:49 +08:00
|
|
|
// definition lists:
|
|
|
|
//
|
|
|
|
// Term 1
|
|
|
|
// : Definition a
|
|
|
|
// : Definition b
|
|
|
|
//
|
|
|
|
// Term 2
|
|
|
|
// : Definition c
|
|
|
|
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
|
|
|
if p.dliPrefix(data) > 0 {
|
|
|
|
data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// anything else must look like a normal paragraph
|
|
|
|
// note: this finds underlined headers, too
|
2011-07-08 01:56:45 +08:00
|
|
|
data = data[p.paragraph(out, data):]
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.nesting--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) isPrefixHeader(data []byte) bool {
|
2011-05-30 07:00:31 +08:00
|
|
|
if data[0] != '#' {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.flags&EXTENSION_SPACE_HEADERS != 0 {
|
2011-05-30 07:00:31 +08:00
|
|
|
level := 0
|
2011-06-30 10:15:58 +08:00
|
|
|
for level < 6 && data[level] == '#' {
|
2011-05-30 07:00:31 +08:00
|
|
|
level++
|
|
|
|
}
|
2011-07-02 00:03:03 +08:00
|
|
|
if data[level] != ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
level := 0
|
2011-06-30 10:15:58 +08:00
|
|
|
for level < 6 && data[level] == '#' {
|
2011-05-30 07:00:31 +08:00
|
|
|
level++
|
|
|
|
}
|
2015-04-08 02:59:42 +08:00
|
|
|
i := skipChar(data, level, ' ')
|
|
|
|
end := skipUntilChar(data, i, '\n')
|
2011-05-30 07:00:31 +08:00
|
|
|
skip := end
|
2014-04-06 03:42:58 +08:00
|
|
|
id := ""
|
|
|
|
if p.flags&EXTENSION_HEADER_IDS != 0 {
|
|
|
|
j, k := 0, 0
|
2014-04-06 17:30:40 +08:00
|
|
|
// find start/end of header id
|
2014-08-02 10:54:21 +08:00
|
|
|
for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
|
2014-04-06 03:42:58 +08:00
|
|
|
}
|
|
|
|
for k = j + 1; k < end && data[k] != '}'; k++ {
|
|
|
|
}
|
2014-04-06 17:30:40 +08:00
|
|
|
// extract header id iff found
|
2014-04-06 03:42:58 +08:00
|
|
|
if j < end && k < end {
|
2014-08-02 10:54:21 +08:00
|
|
|
id = string(data[j+2 : k])
|
2014-04-06 03:42:58 +08:00
|
|
|
end = j
|
2014-04-06 03:59:03 +08:00
|
|
|
skip = k + 1
|
2014-04-06 17:30:40 +08:00
|
|
|
for end > 0 && data[end-1] == ' ' {
|
|
|
|
end--
|
|
|
|
}
|
2014-04-06 03:42:58 +08:00
|
|
|
}
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
for end > 0 && data[end-1] == '#' {
|
2015-04-08 02:12:29 +08:00
|
|
|
if isBackslashEscaped(data, end-1) {
|
|
|
|
break
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
end--
|
|
|
|
}
|
2011-07-02 00:03:03 +08:00
|
|
|
for end > 0 && data[end-1] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
end--
|
|
|
|
}
|
|
|
|
if end > i {
|
2014-10-28 04:54:23 +08:00
|
|
|
if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
|
Prevent generated header collisions, less naively.
> This is a rework of an earlier version of this code.
The automatic header ID generation code submitted in #125 has a subtle
bug where it will use the same ID for multiple headers with identical
text. In the case below, all the headers are rendered a `<h1
id="header">Header</h1>`.
```markdown
# Header
# Header
# Header
# Header
```
This change is a simple but robust approach that uses an incrementing
counter and pre-checking to prevent header collision. (The above would
be rendered as `header`, `header-1`, `header-2`, and `header-3`.) In
more complex cases, it will append a new counter suffix (`-1`), like so:
```markdown
# Header
# Header 1
# Header
# Header
```
This will generate `header`, `header-1`, `header-1-1`, and `header-1-2`.
This code has two additional changes over the prior version:
1. Rather than reimplementing @shurcooL’s anchor sanitization code, I
have imported it as from
`github.com/shurcooL/go/github_flavored_markdown/sanitized_anchor_name`.
2. The markdown block parser is now only interested in *generating* a
sanitized anchor name, not with ensuring its uniqueness. That code
has been moved to the HTML renderer. This means that if the HTML
renderer is modified to identify all unique headers prior to
rendering, the hackish nature of the collision detection can be
eliminated.
2014-11-02 06:35:35 +08:00
|
|
|
id = sanitized_anchor_name.Create(string(data[i:end]))
|
2014-10-28 04:54:23 +08:00
|
|
|
}
|
2011-06-30 01:13:17 +08:00
|
|
|
work := func() bool {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.inline(out, data[i:end])
|
2011-06-30 01:13:17 +08:00
|
|
|
return true
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2014-04-06 03:42:58 +08:00
|
|
|
p.r.Header(out, work, level, id)
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
return skip
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) isUnderlinedHeader(data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
// test of level 1 header
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[0] == '=' {
|
2015-04-08 02:59:42 +08:00
|
|
|
i := skipChar(data, 1, '=')
|
|
|
|
i = skipChar(data, i, ' ')
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] == '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 1
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// test of level 2 header
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[0] == '-' {
|
2015-04-08 02:59:42 +08:00
|
|
|
i := skipChar(data, 1, '-')
|
|
|
|
i = skipChar(data, i, ' ')
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] == '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 2
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2014-08-02 10:54:21 +08:00
|
|
|
func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
|
|
|
|
if data[0] != '%' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
splitData := bytes.Split(data, []byte("\n"))
|
|
|
|
var i int
|
|
|
|
for idx, b := range splitData {
|
|
|
|
if !bytes.HasPrefix(b, []byte("%")) {
|
|
|
|
i = idx // - 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data = bytes.Join(splitData[0:i], []byte("\n"))
|
|
|
|
p.r.TitleBlock(out, data)
|
|
|
|
|
|
|
|
return len(data)
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
var i, j int
|
|
|
|
|
|
|
|
// identify the opening tag
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[0] != '<' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
curtag, tagfound := p.htmlFindTag(data[1:])
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
// handle special cases
|
|
|
|
if !tagfound {
|
2011-06-30 10:15:58 +08:00
|
|
|
// check for an HTML comment
|
2011-07-08 01:56:45 +08:00
|
|
|
if size := p.htmlComment(out, data, doRender); size > 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
return size
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// check for an <hr> tag
|
2011-07-08 01:56:45 +08:00
|
|
|
if size := p.htmlHr(out, data, doRender); size > 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
return size
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2015-12-15 02:23:49 +08:00
|
|
|
// check for HTML CDATA
|
|
|
|
if size := p.htmlCDATA(out, data, doRender); size > 0 {
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
|
2011-05-30 07:00:31 +08:00
|
|
|
// no special case recognized
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// look for an unindented matching closing tag
|
2011-06-30 10:15:58 +08:00
|
|
|
// followed by a blank line
|
2011-05-30 07:00:31 +08:00
|
|
|
found := false
|
2011-06-30 10:15:58 +08:00
|
|
|
/*
|
|
|
|
closetag := []byte("\n</" + curtag + ">")
|
|
|
|
j = len(curtag) + 1
|
|
|
|
for !found {
|
|
|
|
// scan for a closing tag at the beginning of a line
|
|
|
|
if skip := bytes.Index(data[j:], closetag); skip >= 0 {
|
|
|
|
j += skip + len(closetag)
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// see if it is the only thing on the line
|
2011-07-08 01:56:45 +08:00
|
|
|
if skip := p.isEmpty(data[j:]); skip > 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
// see if it is followed by a blank line/eof
|
|
|
|
j += skip
|
|
|
|
if j >= len(data) {
|
|
|
|
found = true
|
|
|
|
i = j
|
|
|
|
} else {
|
2011-07-08 01:56:45 +08:00
|
|
|
if skip := p.isEmpty(data[j:]); skip > 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
j += skip
|
|
|
|
found = true
|
|
|
|
i = j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
// if not found, try a second pass looking for indented match
|
|
|
|
// but not if tag is "ins" or "del" (following original Markdown.pl)
|
2011-06-30 10:15:58 +08:00
|
|
|
if !found && curtag != "ins" && curtag != "del" {
|
2011-05-30 07:00:31 +08:00
|
|
|
i = 1
|
|
|
|
for i < len(data) {
|
|
|
|
i++
|
|
|
|
for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if i+2+len(curtag) >= len(data) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
j = p.htmlFindEnd(curtag, data[i-1:])
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
if j > 0 {
|
|
|
|
i += j - 1
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// the end of the block has been found
|
2011-06-30 01:13:17 +08:00
|
|
|
if doRender {
|
2011-06-30 05:38:35 +08:00
|
|
|
// trim newlines
|
|
|
|
end := i
|
|
|
|
for end > 0 && data[end-1] == '\n' {
|
|
|
|
end--
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.BlockHtml(out, data[:end])
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2015-12-25 19:04:56 +08:00
|
|
|
func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
|
2015-12-15 02:23:49 +08:00
|
|
|
// html block needs to end with a blank line
|
|
|
|
if i := p.isEmpty(data[start:]); i > 0 {
|
|
|
|
size := start + i
|
2011-06-30 10:15:58 +08:00
|
|
|
if doRender {
|
|
|
|
// trim trailing newlines
|
|
|
|
end := size
|
|
|
|
for end > 0 && data[end-1] == '\n' {
|
|
|
|
end--
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.BlockHtml(out, data[:end])
|
2011-06-30 10:15:58 +08:00
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-12-15 02:23:49 +08:00
|
|
|
// HTML comment, lax form
|
|
|
|
func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
|
2015-12-25 19:04:56 +08:00
|
|
|
i := p.inlineHTMLComment(out, data)
|
|
|
|
return p.renderHTMLBlock(out, data, i, doRender)
|
2015-12-15 02:23:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// HTML CDATA section
|
|
|
|
func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
|
2015-12-17 02:51:44 +08:00
|
|
|
const cdataTag = "<![cdata["
|
|
|
|
const cdataTagLen = len(cdataTag)
|
2015-12-15 02:23:49 +08:00
|
|
|
if len(data) < cdataTagLen+1 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if !bytes.Equal(bytes.ToLower(data[:cdataTagLen]), []byte(cdataTag)) {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i := cdataTagLen
|
|
|
|
// scan for an end-of-comment marker, across lines if necessary
|
|
|
|
for i < len(data) && !(data[i-2] == ']' && data[i-1] == ']' && data[i] == '>') {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
// no end-of-comment marker
|
|
|
|
if i >= len(data) {
|
|
|
|
return 0
|
|
|
|
}
|
2015-12-25 19:04:56 +08:00
|
|
|
return p.renderHTMLBlock(out, data, i, doRender)
|
2015-12-15 02:23:49 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// HR, which is the only self-closing block tag considered
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
|
|
|
|
return 0
|
|
|
|
}
|
2011-07-02 00:03:03 +08:00
|
|
|
if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
|
2011-06-30 10:15:58 +08:00
|
|
|
// not an <hr> tag after all; at least not a valid one
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 3
|
|
|
|
for data[i] != '>' && data[i] != '\n' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
if data[i] == '>' {
|
2015-12-25 19:04:56 +08:00
|
|
|
return p.renderHTMLBlock(out, data, i+1, doRender)
|
2011-06-30 10:15:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) htmlFindTag(data []byte) (string, bool) {
|
2011-06-30 10:15:58 +08:00
|
|
|
i := 0
|
|
|
|
for isalnum(data[i]) {
|
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
key := string(data[:i])
|
2015-11-10 13:18:55 +08:00
|
|
|
if _, ok := blockTags[key]; ok {
|
2011-05-30 07:00:31 +08:00
|
|
|
return key, true
|
|
|
|
}
|
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) htmlFindEnd(tag string, data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
// assume data[0] == '<' && data[1] == '/' already tested
|
|
|
|
|
|
|
|
// check if tag is a match
|
2011-06-30 10:15:58 +08:00
|
|
|
closetag := []byte("</" + tag + ">")
|
|
|
|
if !bytes.HasPrefix(data, closetag) {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
2011-06-30 10:15:58 +08:00
|
|
|
i := len(closetag)
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// check that the rest of the line is blank
|
|
|
|
skip := 0
|
2011-07-08 01:56:45 +08:00
|
|
|
if skip = p.isEmpty(data[i:]); skip == 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
i += skip
|
|
|
|
skip = 0
|
|
|
|
|
|
|
|
if i >= len(data) {
|
|
|
|
return i
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
return i
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
if skip = p.isEmpty(data[i:]); skip == 0 {
|
2011-06-30 10:15:58 +08:00
|
|
|
// following line must be blank
|
|
|
|
return 0
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
return i + skip
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) isEmpty(data []byte) int {
|
2011-06-30 10:15:58 +08:00
|
|
|
// it is okay to call isEmpty on an empty buffer
|
|
|
|
if len(data) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-05-30 07:00:31 +08:00
|
|
|
var i int
|
2013-06-27 00:09:27 +08:00
|
|
|
for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
|
|
|
if data[i] != ' ' && data[i] != '\t' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) isHRule(data []byte) bool {
|
2011-05-30 07:00:31 +08:00
|
|
|
i := 0
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// skip up to three spaces
|
|
|
|
for i < 3 && data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// look at the hrule char
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] != '*' && data[i] != '-' && data[i] != '_' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
c := data[i]
|
|
|
|
|
|
|
|
// the whole line must be the char or whitespace
|
|
|
|
n := 0
|
2011-06-30 10:15:58 +08:00
|
|
|
for data[i] != '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
switch {
|
|
|
|
case data[i] == c:
|
|
|
|
n++
|
2011-07-02 00:03:03 +08:00
|
|
|
case data[i] != ' ':
|
2011-05-30 07:00:31 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
return n >= 3
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) {
|
2011-06-29 00:30:25 +08:00
|
|
|
i, size := 0, 0
|
|
|
|
skip = 0
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// skip up to three spaces
|
2015-02-11 14:59:40 +08:00
|
|
|
for i < len(data) && i < 3 && data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
2015-02-11 14:59:40 +08:00
|
|
|
if i >= len(data) {
|
|
|
|
return
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-29 00:30:25 +08:00
|
|
|
// check for the marker characters: ~ or `
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] != '~' && data[i] != '`' {
|
2011-06-29 00:30:25 +08:00
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
c := data[i]
|
|
|
|
|
2011-06-29 00:30:25 +08:00
|
|
|
// the whole line must be the same char or whitespace
|
2015-02-11 14:59:40 +08:00
|
|
|
for i < len(data) && data[i] == c {
|
2011-06-29 00:30:25 +08:00
|
|
|
size++
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2015-02-11 14:59:40 +08:00
|
|
|
if i >= len(data) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2011-06-29 00:30:25 +08:00
|
|
|
// the marker char must occur at least 3 times
|
|
|
|
if size < 3 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
marker = string(data[i-size : i])
|
|
|
|
|
|
|
|
// if this is the end marker, it must match the beginning marker
|
|
|
|
if oldmarker != "" && marker != oldmarker {
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if syntax != nil {
|
|
|
|
syn := 0
|
2015-04-08 02:59:42 +08:00
|
|
|
i = skipChar(data, i, ' ')
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2015-02-11 14:59:40 +08:00
|
|
|
if i >= len(data) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
syntaxStart := i
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] == '{' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
2011-06-29 06:02:12 +08:00
|
|
|
syntaxStart++
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2015-02-11 14:59:40 +08:00
|
|
|
for i < len(data) && data[i] != '}' && data[i] != '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
syn++
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2015-02-11 14:59:40 +08:00
|
|
|
if i >= len(data) || data[i] != '}' {
|
2011-06-29 00:30:25 +08:00
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:30:25 +08:00
|
|
|
// strip all whitespace at the beginning and the end
|
2011-05-30 07:00:31 +08:00
|
|
|
// of the {} block
|
2011-06-29 06:02:12 +08:00
|
|
|
for syn > 0 && isspace(data[syntaxStart]) {
|
|
|
|
syntaxStart++
|
2011-05-30 07:00:31 +08:00
|
|
|
syn--
|
|
|
|
}
|
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
for syn > 0 && isspace(data[syntaxStart+syn-1]) {
|
2011-05-30 07:00:31 +08:00
|
|
|
syn--
|
|
|
|
}
|
|
|
|
|
|
|
|
i++
|
|
|
|
} else {
|
2015-02-11 14:59:40 +08:00
|
|
|
for i < len(data) && !isspace(data[i]) {
|
2011-05-30 07:00:31 +08:00
|
|
|
syn++
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
language := string(data[syntaxStart : syntaxStart+syn])
|
2011-05-30 07:00:31 +08:00
|
|
|
*syntax = &language
|
|
|
|
}
|
|
|
|
|
2015-04-08 02:59:42 +08:00
|
|
|
i = skipChar(data, i, ' ')
|
2015-02-11 14:59:40 +08:00
|
|
|
if i >= len(data) || data[i] != '\n' {
|
2011-07-02 04:13:26 +08:00
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-29 00:30:25 +08:00
|
|
|
skip = i + 1
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2014-03-31 03:57:58 +08:00
|
|
|
func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
var lang *string
|
2011-07-08 01:56:45 +08:00
|
|
|
beg, marker := p.isFencedCode(data, &lang, "")
|
2011-07-02 04:13:26 +08:00
|
|
|
if beg == 0 || beg >= len(data) {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-06-01 01:11:04 +08:00
|
|
|
var work bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
for {
|
|
|
|
// safe to assume beg < len(data)
|
|
|
|
|
|
|
|
// check for the end of the code block
|
2011-07-08 01:56:45 +08:00
|
|
|
fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker)
|
2011-06-29 06:02:12 +08:00
|
|
|
if fenceEnd != 0 {
|
|
|
|
beg += fenceEnd
|
2011-05-30 07:00:31 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// copy the current line
|
2015-04-08 02:59:42 +08:00
|
|
|
end := skipUntilChar(data, beg, '\n') + 1
|
2011-06-29 00:30:25 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// did we reach the end of the buffer without a closing marker?
|
|
|
|
if end >= len(data) {
|
2011-06-29 00:30:25 +08:00
|
|
|
return 0
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// verbatim copy to the working buffer
|
2014-03-31 03:57:58 +08:00
|
|
|
if doRender {
|
|
|
|
work.Write(data[beg:end])
|
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
beg = end
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 01:13:17 +08:00
|
|
|
syntax := ""
|
|
|
|
if lang != nil {
|
|
|
|
syntax = *lang
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2014-03-31 03:57:58 +08:00
|
|
|
if doRender {
|
|
|
|
p.r.BlockCode(out, work.Bytes(), syntax)
|
|
|
|
}
|
2011-06-30 01:13:17 +08:00
|
|
|
|
2011-05-30 07:00:31 +08:00
|
|
|
return beg
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) table(out *bytes.Buffer, data []byte) int {
|
2011-07-02 04:13:26 +08:00
|
|
|
var header bytes.Buffer
|
2011-07-08 01:56:45 +08:00
|
|
|
i, columns := p.tableHeader(&header, data)
|
2011-06-01 06:28:07 +08:00
|
|
|
if i == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
var body bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-01 06:28:07 +08:00
|
|
|
for i < len(data) {
|
2011-06-29 06:02:12 +08:00
|
|
|
pipes, rowStart := 0, i
|
2011-07-02 04:13:26 +08:00
|
|
|
for ; data[i] != '\n'; i++ {
|
2011-06-01 06:28:07 +08:00
|
|
|
if data[i] == '|' {
|
|
|
|
pipes++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
if pipes == 0 {
|
2011-06-29 06:02:12 +08:00
|
|
|
i = rowStart
|
2011-06-01 06:28:07 +08:00
|
|
|
break
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-06-01 06:28:07 +08:00
|
|
|
|
2011-07-06 04:22:21 +08:00
|
|
|
// include the newline in data sent to tableRow
|
2011-06-01 06:28:07 +08:00
|
|
|
i++
|
2013-10-16 18:36:33 +08:00
|
|
|
p.tableRow(&body, data[rowStart:i], columns, false)
|
2011-06-01 06:28:07 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.Table(out, header.Bytes(), body.Bytes(), columns)
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2015-04-08 02:12:29 +08:00
|
|
|
// check if the specified position is preceded by an odd number of backslashes
|
2011-09-14 06:23:24 +08:00
|
|
|
func isBackslashEscaped(data []byte, i int) bool {
|
|
|
|
backslashes := 0
|
|
|
|
for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
|
|
|
|
backslashes++
|
|
|
|
}
|
|
|
|
return backslashes&1 == 1
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
|
2011-07-02 04:13:26 +08:00
|
|
|
i := 0
|
|
|
|
colCount := 1
|
|
|
|
for i = 0; data[i] != '\n'; i++ {
|
2011-09-14 06:23:24 +08:00
|
|
|
if data[i] == '|' && !isBackslashEscaped(data, i) {
|
2011-07-02 04:13:26 +08:00
|
|
|
colCount++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// doesn't look like a table header
|
|
|
|
if colCount == 1 {
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-06 04:22:21 +08:00
|
|
|
// include the newline in the data sent to tableRow
|
2011-07-02 04:13:26 +08:00
|
|
|
header := data[:i+1]
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// column count ignores pipes at beginning or end of line
|
2011-05-30 07:00:31 +08:00
|
|
|
if data[0] == '|' {
|
2011-07-02 04:13:26 +08:00
|
|
|
colCount--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-09-14 06:23:24 +08:00
|
|
|
if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
|
2011-07-02 04:13:26 +08:00
|
|
|
colCount--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
columns = make([]int, colCount)
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// move on to the header underline
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
2011-07-02 04:13:26 +08:00
|
|
|
if i >= len(data) {
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-09-14 06:23:24 +08:00
|
|
|
if data[i] == '|' && !isBackslashEscaped(data, i) {
|
2011-07-02 04:13:26 +08:00
|
|
|
i++
|
|
|
|
}
|
2015-04-08 02:59:42 +08:00
|
|
|
i = skipChar(data, i, ' ')
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
|
|
|
|
// and trailing | optional on last column
|
2011-05-30 07:00:31 +08:00
|
|
|
col := 0
|
2011-07-02 04:13:26 +08:00
|
|
|
for data[i] != '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
dashes := 0
|
|
|
|
|
|
|
|
if data[i] == ':' {
|
|
|
|
i++
|
2011-07-02 04:13:26 +08:00
|
|
|
columns[col] |= TABLE_ALIGNMENT_LEFT
|
2011-05-30 07:00:31 +08:00
|
|
|
dashes++
|
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
for data[i] == '-' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
dashes++
|
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
if data[i] == ':' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
2011-07-02 04:13:26 +08:00
|
|
|
columns[col] |= TABLE_ALIGNMENT_RIGHT
|
2011-05-30 07:00:31 +08:00
|
|
|
dashes++
|
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
for data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// end of column test is messy
|
|
|
|
switch {
|
|
|
|
case dashes < 3:
|
|
|
|
// not a valid column
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-09-14 06:23:24 +08:00
|
|
|
case data[i] == '|' && !isBackslashEscaped(data, i):
|
2011-07-02 04:13:26 +08:00
|
|
|
// marker found, now skip past trailing whitespace
|
|
|
|
col++
|
|
|
|
i++
|
|
|
|
for data[i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-26 01:39:02 +08:00
|
|
|
// trailing junk found after last column
|
|
|
|
if col >= colCount && data[i] != '\n' {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2011-09-14 06:23:24 +08:00
|
|
|
case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
|
2011-07-02 04:13:26 +08:00
|
|
|
// something else found where marker was required
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
case data[i] == '\n':
|
|
|
|
// marker is optional for the last column
|
|
|
|
col++
|
|
|
|
|
|
|
|
default:
|
|
|
|
// trailing junk found after last column
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if col != colCount {
|
|
|
|
return
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2013-10-16 18:36:33 +08:00
|
|
|
p.tableRow(out, header, columns, true)
|
2011-07-02 04:13:26 +08:00
|
|
|
size = i + 1
|
2011-05-30 07:00:31 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-16 18:36:33 +08:00
|
|
|
func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
|
2011-05-30 07:00:31 +08:00
|
|
|
i, col := 0, 0
|
2011-06-29 06:02:12 +08:00
|
|
|
var rowWork bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-09-14 06:23:24 +08:00
|
|
|
if data[i] == '|' && !isBackslashEscaped(data, i) {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-07-26 00:23:31 +08:00
|
|
|
for col = 0; col < len(columns) && i < len(data); col++ {
|
2011-07-02 04:13:26 +08:00
|
|
|
for data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
cellStart := i
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-09-14 06:23:24 +08:00
|
|
|
for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
cellEnd := i
|
2011-07-26 00:23:31 +08:00
|
|
|
|
|
|
|
// skip the end-of-cell marker, possibly taking us past end of buffer
|
2011-07-02 04:13:26 +08:00
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
for cellEnd > cellStart && data[cellEnd-1] == ' ' {
|
2011-06-29 06:02:12 +08:00
|
|
|
cellEnd--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
var cellWork bytes.Buffer
|
2011-07-08 01:56:45 +08:00
|
|
|
p.inline(&cellWork, data[cellStart:cellEnd])
|
2013-10-16 18:36:33 +08:00
|
|
|
|
|
|
|
if header {
|
|
|
|
p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
|
|
|
|
} else {
|
|
|
|
p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// pad it out with empty columns to get the right number
|
|
|
|
for ; col < len(columns); col++ {
|
2013-10-16 18:36:33 +08:00
|
|
|
if header {
|
|
|
|
p.r.TableHeaderCell(&rowWork, nil, columns[col])
|
|
|
|
} else {
|
|
|
|
p.r.TableCell(&rowWork, nil, columns[col])
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-02 04:13:26 +08:00
|
|
|
// silently ignore rows with too many cells
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.TableRow(out, rowWork.Bytes())
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// returns blockquote prefix length
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) quotePrefix(data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
i := 0
|
2011-06-30 10:15:58 +08:00
|
|
|
for i < 3 && data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
2011-06-30 10:15:58 +08:00
|
|
|
if data[i] == '>' {
|
2011-07-02 00:03:03 +08:00
|
|
|
if data[i+1] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
return i + 1
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-10-30 02:02:11 +08:00
|
|
|
// blockquote ends with at least one blank line
|
|
|
|
// followed by something without a blockquote prefix
|
2015-11-01 15:32:30 +08:00
|
|
|
func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
|
2015-10-30 02:02:11 +08:00
|
|
|
if p.isEmpty(data[beg:]) <= 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if end >= len(data) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
|
|
|
}
|
|
|
|
|
2011-05-30 07:00:31 +08:00
|
|
|
// parse a blockquote fragment
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) quote(out *bytes.Buffer, data []byte) int {
|
2011-06-30 10:15:58 +08:00
|
|
|
var raw bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
beg, end := 0, 0
|
|
|
|
for beg < len(data) {
|
2011-07-02 04:13:26 +08:00
|
|
|
end = beg
|
2015-10-30 02:06:27 +08:00
|
|
|
// Step over whole lines, collecting them. While doing that, check for
|
|
|
|
// fenced code and if one's found, incorporate it altogether,
|
|
|
|
// irregardless of any contents inside it
|
2011-07-02 04:13:26 +08:00
|
|
|
for data[end] != '\n' {
|
2015-10-30 02:06:27 +08:00
|
|
|
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
|
|
|
if i := p.fencedCode(out, data[end:], false); i > 0 {
|
|
|
|
// -1 to compensate for the extra end++ after the loop:
|
|
|
|
end += i - 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
end++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-02 04:13:26 +08:00
|
|
|
end++
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
if pre := p.quotePrefix(data[beg:]); pre > 0 {
|
|
|
|
// skip the prefix
|
2011-06-30 10:15:58 +08:00
|
|
|
beg += pre
|
2015-11-01 15:32:30 +08:00
|
|
|
} else if p.terminateBlockquote(data, beg, end) {
|
2011-07-06 04:22:21 +08:00
|
|
|
break
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
// this line is part of the blockquote
|
|
|
|
raw.Write(data[beg:end])
|
2011-05-30 07:00:31 +08:00
|
|
|
beg = end
|
|
|
|
}
|
|
|
|
|
2011-06-30 10:15:58 +08:00
|
|
|
var cooked bytes.Buffer
|
2011-07-08 01:56:45 +08:00
|
|
|
p.block(&cooked, raw.Bytes())
|
|
|
|
p.r.BlockQuote(out, cooked.Bytes())
|
2011-05-30 07:00:31 +08:00
|
|
|
return end
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns prefix length for block code
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) codePrefix(data []byte) int {
|
2011-07-02 04:13:26 +08:00
|
|
|
if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 4
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) code(out *bytes.Buffer, data []byte) int {
|
2011-06-01 01:11:04 +08:00
|
|
|
var work bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
i := 0
|
|
|
|
for i < len(data) {
|
|
|
|
beg := i
|
|
|
|
for data[i] != '\n' {
|
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
blankline := p.isEmpty(data[beg:i]) > 0
|
|
|
|
if pre := p.codePrefix(data[beg:i]); pre > 0 {
|
2011-05-30 07:00:31 +08:00
|
|
|
beg += pre
|
2011-07-06 04:22:21 +08:00
|
|
|
} else if !blankline {
|
|
|
|
// non-empty, non-prefixed line breaks the pre
|
|
|
|
i = beg
|
|
|
|
break
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// verbatim copy to the working buffeu
|
|
|
|
if blankline {
|
|
|
|
work.WriteByte('\n')
|
|
|
|
} else {
|
|
|
|
work.Write(data[beg:i])
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// trim all the \n off the end of work
|
|
|
|
workbytes := work.Bytes()
|
2011-07-05 08:56:29 +08:00
|
|
|
eol := len(workbytes)
|
|
|
|
for eol > 0 && workbytes[eol-1] == '\n' {
|
|
|
|
eol--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
if eol != len(workbytes) {
|
|
|
|
work.Truncate(eol)
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
work.WriteByte('\n')
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.BlockCode(out, work.Bytes(), "")
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
return i
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// returns unordered list item prefix
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) uliPrefix(data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
i := 0
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// start with up to 3 spaces
|
2011-07-05 08:56:29 +08:00
|
|
|
for i < 3 && data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
// need a *, +, or - followed by a space
|
2011-07-05 08:56:29 +08:00
|
|
|
if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
|
2011-07-02 00:03:03 +08:00
|
|
|
data[i+1] != ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns ordered list item prefix
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) oliPrefix(data []byte) int {
|
2011-05-30 07:00:31 +08:00
|
|
|
i := 0
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// start with up to 3 spaces
|
2011-07-05 08:56:29 +08:00
|
|
|
for i < 3 && data[i] == ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// count the digits
|
|
|
|
start := i
|
2011-07-05 08:56:29 +08:00
|
|
|
for data[i] >= '0' && data[i] <= '9' {
|
2011-05-30 07:00:31 +08:00
|
|
|
i++
|
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
// we need >= 1 digits followed by a dot and a space
|
2011-07-05 08:56:29 +08:00
|
|
|
if start == i || data[i] != '.' || data[i+1] != ' ' {
|
2011-05-30 07:00:31 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
|
2015-05-29 19:30:49 +08:00
|
|
|
// returns definition list item prefix
|
|
|
|
func (p *parser) dliPrefix(data []byte) int {
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
// need a : followed by a spaces
|
|
|
|
if data[i] != ':' || data[i+1] != ' ' {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
for data[i] == ' ' {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
return i + 2
|
|
|
|
}
|
|
|
|
|
2011-05-30 07:00:31 +08:00
|
|
|
// parse ordered or unordered list block
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
|
2011-06-26 05:02:46 +08:00
|
|
|
i := 0
|
2011-07-02 01:19:42 +08:00
|
|
|
flags |= LIST_ITEM_BEGINNING_OF_LIST
|
2011-06-26 05:02:46 +08:00
|
|
|
work := func() bool {
|
|
|
|
for i < len(data) {
|
2011-07-08 01:56:45 +08:00
|
|
|
skip := p.listItem(out, data[i:], &flags)
|
2011-07-05 08:56:29 +08:00
|
|
|
i += skip
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
|
2011-06-26 05:02:46 +08:00
|
|
|
break
|
|
|
|
}
|
2011-07-02 01:19:42 +08:00
|
|
|
flags &= ^LIST_ITEM_BEGINNING_OF_LIST
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-06-26 05:02:46 +08:00
|
|
|
return true
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.List(out, work, flags)
|
2011-05-30 07:00:31 +08:00
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// Parse a single list item.
|
|
|
|
// Assumes initial prefix is already removed if this is a sublist.
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
|
2011-07-05 08:56:29 +08:00
|
|
|
// keep track of the indentation of the first line
|
|
|
|
itemIndent := 0
|
|
|
|
for itemIndent < 3 && data[itemIndent] == ' ' {
|
|
|
|
itemIndent++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
i := p.uliPrefix(data)
|
2011-07-05 08:56:29 +08:00
|
|
|
if i == 0 {
|
2011-07-08 01:56:45 +08:00
|
|
|
i = p.oliPrefix(data)
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
if i == 0 {
|
2015-05-29 19:30:49 +08:00
|
|
|
i = p.dliPrefix(data)
|
|
|
|
// reset definition term flag
|
|
|
|
if i > 0 {
|
|
|
|
*flags &= ^LIST_TYPE_TERM
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if i == 0 {
|
|
|
|
// if in defnition list, set term flag and continue
|
|
|
|
if *flags&LIST_TYPE_DEFINITION != 0 {
|
|
|
|
*flags |= LIST_TYPE_TERM
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// skip leading whitespace on first line
|
2011-07-05 08:56:29 +08:00
|
|
|
for data[i] == ' ' {
|
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// find the end of the line
|
|
|
|
line := i
|
2015-05-29 19:30:49 +08:00
|
|
|
for i > 0 && data[i-1] != '\n' {
|
2011-07-05 08:56:29 +08:00
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// get working buffer
|
|
|
|
var raw bytes.Buffer
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
// put the first line into the working buffer
|
2011-07-05 08:56:29 +08:00
|
|
|
raw.Write(data[line:i])
|
|
|
|
line = i
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
// process the following lines
|
2011-07-05 08:56:29 +08:00
|
|
|
containsBlankLine := false
|
|
|
|
sublist := 0
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
gatherlines:
|
2011-07-05 08:56:29 +08:00
|
|
|
for line < len(data) {
|
|
|
|
i++
|
|
|
|
|
|
|
|
// find the end of this line
|
|
|
|
for data[i-1] != '\n' {
|
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// if it is an empty line, guess that it is part of this item
|
|
|
|
// and move on to the next line
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.isEmpty(data[line:i]) > 0 {
|
2011-06-29 06:02:12 +08:00
|
|
|
containsBlankLine = true
|
2015-12-27 02:18:22 +08:00
|
|
|
raw.Write(data[line:i])
|
2011-07-05 08:56:29 +08:00
|
|
|
line = i
|
2011-05-30 07:00:31 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate the indentation
|
2011-07-05 08:56:29 +08:00
|
|
|
indent := 0
|
|
|
|
for indent < 4 && line+indent < i && data[line+indent] == ' ' {
|
|
|
|
indent++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
chunk := data[line+indent : i]
|
|
|
|
|
|
|
|
// evaluate how this line fits in
|
|
|
|
switch {
|
|
|
|
// is this a nested list item?
|
2011-07-08 01:56:45 +08:00
|
|
|
case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
|
2015-05-29 19:30:49 +08:00
|
|
|
p.oliPrefix(chunk) > 0 ||
|
|
|
|
p.dliPrefix(chunk) > 0:
|
2011-06-28 00:13:13 +08:00
|
|
|
|
2011-06-29 06:02:12 +08:00
|
|
|
if containsBlankLine {
|
2016-05-02 21:54:19 +08:00
|
|
|
// end the list if the type changed after a blank line
|
|
|
|
if (*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
|
|
|
|
(*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0) {
|
|
|
|
|
|
|
|
*flags |= LIST_ITEM_END_OF_LIST
|
|
|
|
break gatherlines
|
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// to be a nested list, it must be indented more
|
|
|
|
// if not, it is the next item in the same list
|
|
|
|
if indent <= itemIndent {
|
2011-07-08 01:56:45 +08:00
|
|
|
break gatherlines
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2015-05-23 14:28:21 +08:00
|
|
|
// is this the first item in the nested list?
|
2011-05-30 07:00:31 +08:00
|
|
|
if sublist == 0 {
|
2011-07-05 08:56:29 +08:00
|
|
|
sublist = raw.Len()
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
|
|
|
|
// is this a nested prefix header?
|
2011-07-08 01:56:45 +08:00
|
|
|
case p.isPrefixHeader(chunk):
|
2011-07-05 08:56:29 +08:00
|
|
|
// if the header is not indented, it is not nested in the list
|
|
|
|
// and thus ends the list
|
|
|
|
if containsBlankLine && indent < 4 {
|
|
|
|
*flags |= LIST_ITEM_END_OF_LIST
|
2011-07-08 01:56:45 +08:00
|
|
|
break gatherlines
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
|
|
|
|
|
|
|
// anything following an empty line is only part
|
|
|
|
// of this item if it is indented 4 spaces
|
|
|
|
// (regardless of the indentation of the beginning of the item)
|
|
|
|
case containsBlankLine && indent < 4:
|
2015-05-29 19:30:49 +08:00
|
|
|
if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
|
|
|
|
// is the next item still a part of this list?
|
|
|
|
next := i
|
|
|
|
for data[next] != '\n' {
|
|
|
|
next++
|
|
|
|
}
|
2015-06-07 15:36:53 +08:00
|
|
|
for next < len(data)-1 && data[next] == '\n' {
|
|
|
|
next++
|
|
|
|
}
|
|
|
|
if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
|
2015-05-29 19:30:49 +08:00
|
|
|
*flags |= LIST_ITEM_END_OF_LIST
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*flags |= LIST_ITEM_END_OF_LIST
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
break gatherlines
|
2011-07-05 08:56:29 +08:00
|
|
|
|
|
|
|
// a blank line means this should be parsed as a block
|
|
|
|
case containsBlankLine:
|
|
|
|
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2015-12-27 02:18:22 +08:00
|
|
|
containsBlankLine = false
|
2011-05-30 07:00:31 +08:00
|
|
|
|
|
|
|
// add the line into the working buffer without prefix
|
2011-07-05 08:56:29 +08:00
|
|
|
raw.Write(data[line+indent : i])
|
2011-09-10 02:30:45 +08:00
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
line = i
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
rawBytes := raw.Bytes()
|
2013-06-25 09:18:47 +08:00
|
|
|
|
|
|
|
// render the contents of the list item
|
2011-07-05 08:56:29 +08:00
|
|
|
var cooked bytes.Buffer
|
2015-05-29 19:30:49 +08:00
|
|
|
if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
|
|
|
|
// intermediate render of block item, except for definition term
|
2011-07-05 08:56:29 +08:00
|
|
|
if sublist > 0 {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.block(&cooked, rawBytes[:sublist])
|
|
|
|
p.block(&cooked, rawBytes[sublist:])
|
2011-05-30 07:00:31 +08:00
|
|
|
} else {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.block(&cooked, rawBytes)
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
} else {
|
2015-05-29 19:30:49 +08:00
|
|
|
// intermediate render of inline item
|
2011-07-05 08:56:29 +08:00
|
|
|
if sublist > 0 {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.inline(&cooked, rawBytes[:sublist])
|
|
|
|
p.block(&cooked, rawBytes[sublist:])
|
2011-05-30 07:00:31 +08:00
|
|
|
} else {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.inline(&cooked, rawBytes)
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// render the actual list item
|
|
|
|
cookedBytes := cooked.Bytes()
|
|
|
|
parsedEnd := len(cookedBytes)
|
|
|
|
|
|
|
|
// strip trailing newlines
|
|
|
|
for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
|
2011-06-30 05:38:35 +08:00
|
|
|
parsedEnd--
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
return line
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// render a single paragraph that has already been parsed out
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
|
2011-07-05 08:56:29 +08:00
|
|
|
if len(data) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// trim leading spaces
|
2011-06-26 05:18:34 +08:00
|
|
|
beg := 0
|
2011-07-05 08:56:29 +08:00
|
|
|
for data[beg] == ' ' {
|
2011-06-26 05:18:34 +08:00
|
|
|
beg++
|
|
|
|
}
|
|
|
|
|
2011-07-05 08:56:29 +08:00
|
|
|
// trim trailing newline
|
|
|
|
end := len(data) - 1
|
|
|
|
|
|
|
|
// trim trailing spaces
|
|
|
|
for end > beg && data[end-1] == ' ' {
|
2011-06-01 06:07:15 +08:00
|
|
|
end--
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-06-27 07:21:11 +08:00
|
|
|
work := func() bool {
|
2011-07-08 01:56:45 +08:00
|
|
|
p.inline(out, data[beg:end])
|
2011-06-27 07:21:11 +08:00
|
|
|
return true
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
p.r.Paragraph(out, work)
|
2011-06-01 06:07:15 +08:00
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
|
2011-06-01 06:07:15 +08:00
|
|
|
// prev: index of 1st char of previous line
|
|
|
|
// line: index of 1st char of current line
|
|
|
|
// i: index of cursor/end of current line
|
|
|
|
var prev, line, i int
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// keep going until we find something to mark the end of the paragraph
|
|
|
|
for i < len(data) {
|
|
|
|
// mark the beginning of the current line
|
|
|
|
prev = line
|
|
|
|
current := data[i:]
|
|
|
|
line = i
|
|
|
|
|
|
|
|
// did we find a blank line marking the end of the paragraph?
|
2011-07-08 01:56:45 +08:00
|
|
|
if n := p.isEmpty(current); n > 0 {
|
2015-05-29 19:30:49 +08:00
|
|
|
// did this blank line followed by a definition list item?
|
|
|
|
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
|
|
|
if i < len(data)-1 && data[i+1] == ':' {
|
|
|
|
return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.renderParagraph(out, data[:i])
|
2011-06-01 06:07:15 +08:00
|
|
|
return i + n
|
|
|
|
}
|
|
|
|
|
|
|
|
// an underline under some text marks a header, so our paragraph ended on prev line
|
2011-06-30 01:13:17 +08:00
|
|
|
if i > 0 {
|
2011-07-08 01:56:45 +08:00
|
|
|
if level := p.isUnderlinedHeader(current); level > 0 {
|
2011-06-01 06:07:15 +08:00
|
|
|
// render the paragraph
|
2011-07-08 01:56:45 +08:00
|
|
|
p.renderParagraph(out, data[:prev])
|
2011-06-01 06:07:15 +08:00
|
|
|
|
2011-06-28 01:42:38 +08:00
|
|
|
// ignore leading and trailing whitespace
|
|
|
|
eol := i - 1
|
2011-07-02 00:03:03 +08:00
|
|
|
for prev < eol && data[prev] == ' ' {
|
2011-06-28 01:42:38 +08:00
|
|
|
prev++
|
|
|
|
}
|
2011-07-02 00:03:03 +08:00
|
|
|
for eol > prev && data[eol-1] == ' ' {
|
2011-06-28 01:42:38 +08:00
|
|
|
eol--
|
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// render the header
|
2011-06-28 01:42:38 +08:00
|
|
|
// this ugly double closure avoids forcing variables onto the heap
|
2011-07-08 01:56:45 +08:00
|
|
|
work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
|
2011-06-25 22:20:08 +08:00
|
|
|
return func() bool {
|
2011-07-08 01:56:45 +08:00
|
|
|
pp.inline(o, d)
|
2011-06-25 22:20:08 +08:00
|
|
|
return true
|
|
|
|
}
|
2011-07-08 01:56:45 +08:00
|
|
|
}(out, p, data[prev:eol])
|
2014-10-28 04:54:23 +08:00
|
|
|
|
|
|
|
id := ""
|
|
|
|
if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
|
Prevent generated header collisions, less naively.
> This is a rework of an earlier version of this code.
The automatic header ID generation code submitted in #125 has a subtle
bug where it will use the same ID for multiple headers with identical
text. In the case below, all the headers are rendered a `<h1
id="header">Header</h1>`.
```markdown
# Header
# Header
# Header
# Header
```
This change is a simple but robust approach that uses an incrementing
counter and pre-checking to prevent header collision. (The above would
be rendered as `header`, `header-1`, `header-2`, and `header-3`.) In
more complex cases, it will append a new counter suffix (`-1`), like so:
```markdown
# Header
# Header 1
# Header
# Header
```
This will generate `header`, `header-1`, `header-1-1`, and `header-1-2`.
This code has two additional changes over the prior version:
1. Rather than reimplementing @shurcooL’s anchor sanitization code, I
have imported it as from
`github.com/shurcooL/go/github_flavored_markdown/sanitized_anchor_name`.
2. The markdown block parser is now only interested in *generating* a
sanitized anchor name, not with ensuring its uniqueness. That code
has been moved to the HTML renderer. This means that if the HTML
renderer is modified to identify all unique headers prior to
rendering, the hackish nature of the collision detection can be
eliminated.
2014-11-02 06:35:35 +08:00
|
|
|
id = sanitized_anchor_name.Create(string(data[prev:eol]))
|
2014-10-28 04:54:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
p.r.Header(out, work, level, id)
|
2011-06-01 06:07:15 +08:00
|
|
|
|
|
|
|
// find the end of the underline
|
2011-07-05 08:56:29 +08:00
|
|
|
for data[i] != '\n' {
|
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-06-01 06:07:15 +08:00
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// if the next line starts a block of HTML, then the paragraph ends here
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
|
|
|
|
if data[i] == '<' && p.html(out, current, false) > 0 {
|
2011-06-01 06:07:15 +08:00
|
|
|
// rewind to before the HTML block
|
2011-07-08 01:56:45 +08:00
|
|
|
p.renderParagraph(out, data[:i])
|
2011-06-01 06:07:15 +08:00
|
|
|
return i
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// if there's a prefixed header or a horizontal rule after this, paragraph is over
|
2011-07-08 01:56:45 +08:00
|
|
|
if p.isPrefixHeader(current) || p.isHRule(current) {
|
|
|
|
p.renderParagraph(out, data[:i])
|
2011-06-01 06:07:15 +08:00
|
|
|
return i
|
|
|
|
}
|
2011-05-30 07:00:31 +08:00
|
|
|
|
2015-10-29 03:37:14 +08:00
|
|
|
// if there's a fenced code block, paragraph is over
|
|
|
|
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
|
|
|
if p.fencedCode(out, current, false) > 0 {
|
|
|
|
p.renderParagraph(out, data[:i])
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-29 19:30:49 +08:00
|
|
|
// if there's a definition list item, prev line is a definition term
|
|
|
|
if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
|
|
|
|
if p.dliPrefix(current) != 0 {
|
|
|
|
return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-30 10:32:11 +08:00
|
|
|
// if there's a list after this, paragraph is over
|
|
|
|
if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
|
|
|
|
if p.uliPrefix(current) != 0 ||
|
|
|
|
p.oliPrefix(current) != 0 ||
|
|
|
|
p.quotePrefix(current) != 0 ||
|
|
|
|
p.codePrefix(current) != 0 {
|
|
|
|
p.renderParagraph(out, data[:i])
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-01 06:07:15 +08:00
|
|
|
// otherwise, scan to the beginning of the next line
|
2011-07-05 08:56:29 +08:00
|
|
|
for data[i] != '\n' {
|
2011-06-01 06:07:15 +08:00
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
2011-07-05 08:56:29 +08:00
|
|
|
i++
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 01:56:45 +08:00
|
|
|
p.renderParagraph(out, data[:i])
|
2011-06-01 06:07:15 +08:00
|
|
|
return i
|
2011-05-30 07:00:31 +08:00
|
|
|
}
|