token.go 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. // Copyright 2010 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package html
  5. import (
  6. "bytes"
  7. "errors"
  8. "io"
  9. "strconv"
  10. "strings"
  11. "golang.org/x/net/html/atom"
  12. )
  13. // A TokenType is the type of a Token.
  14. type TokenType uint32
  15. const (
  16. // ErrorToken means that an error occurred during tokenization.
  17. ErrorToken TokenType = iota
  18. // TextToken means a text node.
  19. TextToken
  20. // A StartTagToken looks like <a>.
  21. StartTagToken
  22. // An EndTagToken looks like </a>.
  23. EndTagToken
  24. // A SelfClosingTagToken tag looks like <br/>.
  25. SelfClosingTagToken
  26. // A CommentToken looks like <!--x-->.
  27. CommentToken
  28. // A DoctypeToken looks like <!DOCTYPE x>
  29. DoctypeToken
  30. )
  31. // ErrBufferExceeded means that the buffering limit was exceeded.
  32. var ErrBufferExceeded = errors.New("max buffer exceeded")
  33. // String returns a string representation of the TokenType.
  34. func (t TokenType) String() string {
  35. switch t {
  36. case ErrorToken:
  37. return "Error"
  38. case TextToken:
  39. return "Text"
  40. case StartTagToken:
  41. return "StartTag"
  42. case EndTagToken:
  43. return "EndTag"
  44. case SelfClosingTagToken:
  45. return "SelfClosingTag"
  46. case CommentToken:
  47. return "Comment"
  48. case DoctypeToken:
  49. return "Doctype"
  50. }
  51. return "Invalid(" + strconv.Itoa(int(t)) + ")"
  52. }
  53. // An Attribute is an attribute namespace-key-value triple. Namespace is
  54. // non-empty for foreign attributes like xlink, Key is alphabetic (and hence
  55. // does not contain escapable characters like '&', '<' or '>'), and Val is
  56. // unescaped (it looks like "a<b" rather than "a&lt;b").
  57. //
  58. // Namespace is only used by the parser, not the tokenizer.
  59. type Attribute struct {
  60. Namespace, Key, Val string
  61. }
  62. // A Token consists of a TokenType and some Data (tag name for start and end
  63. // tags, content for text, comments and doctypes). A tag Token may also contain
  64. // a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
  65. // rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or
  66. // zero if Data is not a known tag name.
  67. type Token struct {
  68. Type TokenType
  69. DataAtom atom.Atom
  70. Data string
  71. Attr []Attribute
  72. }
  73. // tagString returns a string representation of a tag Token's Data and Attr.
  74. func (t Token) tagString() string {
  75. if len(t.Attr) == 0 {
  76. return t.Data
  77. }
  78. buf := bytes.NewBufferString(t.Data)
  79. for _, a := range t.Attr {
  80. buf.WriteByte(' ')
  81. buf.WriteString(a.Key)
  82. buf.WriteString(`="`)
  83. escape(buf, a.Val)
  84. buf.WriteByte('"')
  85. }
  86. return buf.String()
  87. }
  88. // String returns a string representation of the Token.
  89. func (t Token) String() string {
  90. switch t.Type {
  91. case ErrorToken:
  92. return ""
  93. case TextToken:
  94. return EscapeString(t.Data)
  95. case StartTagToken:
  96. return "<" + t.tagString() + ">"
  97. case EndTagToken:
  98. return "</" + t.tagString() + ">"
  99. case SelfClosingTagToken:
  100. return "<" + t.tagString() + "/>"
  101. case CommentToken:
  102. return "<!--" + t.Data + "-->"
  103. case DoctypeToken:
  104. return "<!DOCTYPE " + t.Data + ">"
  105. }
  106. return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
  107. }
  108. // span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
  109. // the end is exclusive.
  110. type span struct {
  111. start, end int
  112. }
  113. // A Tokenizer returns a stream of HTML Tokens.
  114. type Tokenizer struct {
  115. // r is the source of the HTML text.
  116. r io.Reader
  117. // tt is the TokenType of the current token.
  118. tt TokenType
  119. // err is the first error encountered during tokenization. It is possible
  120. // for tt != Error && err != nil to hold: this means that Next returned a
  121. // valid token but the subsequent Next call will return an error token.
  122. // For example, if the HTML text input was just "plain", then the first
  123. // Next call would set z.err to io.EOF but return a TextToken, and all
  124. // subsequent Next calls would return an ErrorToken.
  125. // err is never reset. Once it becomes non-nil, it stays non-nil.
  126. err error
  127. // readErr is the error returned by the io.Reader r. It is separate from
  128. // err because it is valid for an io.Reader to return (n int, err1 error)
  129. // such that n > 0 && err1 != nil, and callers should always process the
  130. // n > 0 bytes before considering the error err1.
  131. readErr error
  132. // buf[raw.start:raw.end] holds the raw bytes of the current token.
  133. // buf[raw.end:] is buffered input that will yield future tokens.
  134. raw span
  135. buf []byte
  136. // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
  137. maxBuf int
  138. // buf[data.start:data.end] holds the raw bytes of the current token's data:
  139. // a text token's text, a tag token's tag name, etc.
  140. data span
  141. // pendingAttr is the attribute key and value currently being tokenized.
  142. // When complete, pendingAttr is pushed onto attr. nAttrReturned is
  143. // incremented on each call to TagAttr.
  144. pendingAttr [2]span
  145. attr [][2]span
  146. nAttrReturned int
  147. // rawTag is the "script" in "</script>" that closes the next token. If
  148. // non-empty, the subsequent call to Next will return a raw or RCDATA text
  149. // token: one that treats "<p>" as text instead of an element.
  150. // rawTag's contents are lower-cased.
  151. rawTag string
  152. // textIsRaw is whether the current text token's data is not escaped.
  153. textIsRaw bool
  154. // convertNUL is whether NUL bytes in the current token's data should
  155. // be converted into \ufffd replacement characters.
  156. convertNUL bool
  157. // allowCDATA is whether CDATA sections are allowed in the current context.
  158. allowCDATA bool
  159. }
  160. // AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
  161. // the text "foo". The default value is false, which means to recognize it as
  162. // a bogus comment "<!-- [CDATA[foo]] -->" instead.
  163. //
  164. // Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
  165. // only if tokenizing foreign content, such as MathML and SVG. However,
  166. // tracking foreign-contentness is difficult to do purely in the tokenizer,
  167. // as opposed to the parser, due to HTML integration points: an <svg> element
  168. // can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
  169. // HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
  170. // responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
  171. // In practice, if using the tokenizer without caring whether MathML or SVG
  172. // CDATA is text or comments, such as tokenizing HTML to find all the anchor
  173. // text, it is acceptable to ignore this responsibility.
  174. func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
  175. z.allowCDATA = allowCDATA
  176. }
  177. // NextIsNotRawText instructs the tokenizer that the next token should not be
  178. // considered as 'raw text'. Some elements, such as script and title elements,
  179. // normally require the next token after the opening tag to be 'raw text' that
  180. // has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
  181. // yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
  182. // an end tag token for "</title>". There are no distinct start tag or end tag
  183. // tokens for the "<b>" and "</b>".
  184. //
  185. // This tokenizer implementation will generally look for raw text at the right
  186. // times. Strictly speaking, an HTML5 compliant tokenizer should not look for
  187. // raw text if in foreign content: <title> generally needs raw text, but a
  188. // <title> inside an <svg> does not. Another example is that a <textarea>
  189. // generally needs raw text, but a <textarea> is not allowed as an immediate
  190. // child of a <select>; in normal parsing, a <textarea> implies </select>, but
  191. // one cannot close the implicit element when parsing a <select>'s InnerHTML.
  192. // Similarly to AllowCDATA, tracking the correct moment to override raw-text-
  193. // ness is difficult to do purely in the tokenizer, as opposed to the parser.
  194. // For strict compliance with the HTML5 tokenization algorithm, it is the
  195. // responsibility of the user of a tokenizer to call NextIsNotRawText as
  196. // appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
  197. // responsibility for basic usage.
  198. //
  199. // Note that this 'raw text' concept is different from the one offered by the
  200. // Tokenizer.Raw method.
  201. func (z *Tokenizer) NextIsNotRawText() {
  202. z.rawTag = ""
  203. }
  204. // Err returns the error associated with the most recent ErrorToken token.
  205. // This is typically io.EOF, meaning the end of tokenization.
  206. func (z *Tokenizer) Err() error {
  207. if z.tt != ErrorToken {
  208. return nil
  209. }
  210. return z.err
  211. }
  212. // readByte returns the next byte from the input stream, doing a buffered read
  213. // from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
  214. // slice that holds all the bytes read so far for the current token.
  215. // It sets z.err if the underlying reader returns an error.
  216. // Pre-condition: z.err == nil.
  217. func (z *Tokenizer) readByte() byte {
  218. if z.raw.end >= len(z.buf) {
  219. // Our buffer is exhausted and we have to read from z.r. Check if the
  220. // previous read resulted in an error.
  221. if z.readErr != nil {
  222. z.err = z.readErr
  223. return 0
  224. }
  225. // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
  226. // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
  227. // allocate a new buffer before the copy.
  228. c := cap(z.buf)
  229. d := z.raw.end - z.raw.start
  230. var buf1 []byte
  231. if 2*d > c {
  232. buf1 = make([]byte, d, 2*c)
  233. } else {
  234. buf1 = z.buf[:d]
  235. }
  236. copy(buf1, z.buf[z.raw.start:z.raw.end])
  237. if x := z.raw.start; x != 0 {
  238. // Adjust the data/attr spans to refer to the same contents after the copy.
  239. z.data.start -= x
  240. z.data.end -= x
  241. z.pendingAttr[0].start -= x
  242. z.pendingAttr[0].end -= x
  243. z.pendingAttr[1].start -= x
  244. z.pendingAttr[1].end -= x
  245. for i := range z.attr {
  246. z.attr[i][0].start -= x
  247. z.attr[i][0].end -= x
  248. z.attr[i][1].start -= x
  249. z.attr[i][1].end -= x
  250. }
  251. }
  252. z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
  253. // Now that we have copied the live bytes to the start of the buffer,
  254. // we read from z.r into the remainder.
  255. var n int
  256. n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
  257. if n == 0 {
  258. z.err = z.readErr
  259. return 0
  260. }
  261. z.buf = buf1[:d+n]
  262. }
  263. x := z.buf[z.raw.end]
  264. z.raw.end++
  265. if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
  266. z.err = ErrBufferExceeded
  267. return 0
  268. }
  269. return x
  270. }
  271. // Buffered returns a slice containing data buffered but not yet tokenized.
  272. func (z *Tokenizer) Buffered() []byte {
  273. return z.buf[z.raw.end:]
  274. }
  275. // readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
  276. // It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
  277. // too many times in succession.
  278. func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
  279. for i := 0; i < 100; i++ {
  280. n, err := r.Read(b)
  281. if n != 0 || err != nil {
  282. return n, err
  283. }
  284. }
  285. return 0, io.ErrNoProgress
  286. }
  287. // skipWhiteSpace skips past any white space.
  288. func (z *Tokenizer) skipWhiteSpace() {
  289. if z.err != nil {
  290. return
  291. }
  292. for {
  293. c := z.readByte()
  294. if z.err != nil {
  295. return
  296. }
  297. switch c {
  298. case ' ', '\n', '\r', '\t', '\f':
  299. // No-op.
  300. default:
  301. z.raw.end--
  302. return
  303. }
  304. }
  305. }
  306. // readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
  307. // is typically something like "script" or "textarea".
  308. func (z *Tokenizer) readRawOrRCDATA() {
  309. if z.rawTag == "script" {
  310. z.readScript()
  311. z.textIsRaw = true
  312. z.rawTag = ""
  313. return
  314. }
  315. loop:
  316. for {
  317. c := z.readByte()
  318. if z.err != nil {
  319. break loop
  320. }
  321. if c != '<' {
  322. continue loop
  323. }
  324. c = z.readByte()
  325. if z.err != nil {
  326. break loop
  327. }
  328. if c != '/' {
  329. z.raw.end--
  330. continue loop
  331. }
  332. if z.readRawEndTag() || z.err != nil {
  333. break loop
  334. }
  335. }
  336. z.data.end = z.raw.end
  337. // A textarea's or title's RCDATA can contain escaped entities.
  338. z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
  339. z.rawTag = ""
  340. }
  341. // readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
  342. // If it succeeds, it backs up the input position to reconsume the tag and
  343. // returns true. Otherwise it returns false. The opening "</" has already been
  344. // consumed.
  345. func (z *Tokenizer) readRawEndTag() bool {
  346. for i := 0; i < len(z.rawTag); i++ {
  347. c := z.readByte()
  348. if z.err != nil {
  349. return false
  350. }
  351. if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
  352. z.raw.end--
  353. return false
  354. }
  355. }
  356. c := z.readByte()
  357. if z.err != nil {
  358. return false
  359. }
  360. switch c {
  361. case ' ', '\n', '\r', '\t', '\f', '/', '>':
  362. // The 3 is 2 for the leading "</" plus 1 for the trailing character c.
  363. z.raw.end -= 3 + len(z.rawTag)
  364. return true
  365. }
  366. z.raw.end--
  367. return false
  368. }
  369. // readScript reads until the next </script> tag, following the byzantine
  370. // rules for escaping/hiding the closing tag.
  371. func (z *Tokenizer) readScript() {
  372. defer func() {
  373. z.data.end = z.raw.end
  374. }()
  375. var c byte
  376. scriptData:
  377. c = z.readByte()
  378. if z.err != nil {
  379. return
  380. }
  381. if c == '<' {
  382. goto scriptDataLessThanSign
  383. }
  384. goto scriptData
  385. scriptDataLessThanSign:
  386. c = z.readByte()
  387. if z.err != nil {
  388. return
  389. }
  390. switch c {
  391. case '/':
  392. goto scriptDataEndTagOpen
  393. case '!':
  394. goto scriptDataEscapeStart
  395. }
  396. z.raw.end--
  397. goto scriptData
  398. scriptDataEndTagOpen:
  399. if z.readRawEndTag() || z.err != nil {
  400. return
  401. }
  402. goto scriptData
  403. scriptDataEscapeStart:
  404. c = z.readByte()
  405. if z.err != nil {
  406. return
  407. }
  408. if c == '-' {
  409. goto scriptDataEscapeStartDash
  410. }
  411. z.raw.end--
  412. goto scriptData
  413. scriptDataEscapeStartDash:
  414. c = z.readByte()
  415. if z.err != nil {
  416. return
  417. }
  418. if c == '-' {
  419. goto scriptDataEscapedDashDash
  420. }
  421. z.raw.end--
  422. goto scriptData
  423. scriptDataEscaped:
  424. c = z.readByte()
  425. if z.err != nil {
  426. return
  427. }
  428. switch c {
  429. case '-':
  430. goto scriptDataEscapedDash
  431. case '<':
  432. goto scriptDataEscapedLessThanSign
  433. }
  434. goto scriptDataEscaped
  435. scriptDataEscapedDash:
  436. c = z.readByte()
  437. if z.err != nil {
  438. return
  439. }
  440. switch c {
  441. case '-':
  442. goto scriptDataEscapedDashDash
  443. case '<':
  444. goto scriptDataEscapedLessThanSign
  445. }
  446. goto scriptDataEscaped
  447. scriptDataEscapedDashDash:
  448. c = z.readByte()
  449. if z.err != nil {
  450. return
  451. }
  452. switch c {
  453. case '-':
  454. goto scriptDataEscapedDashDash
  455. case '<':
  456. goto scriptDataEscapedLessThanSign
  457. case '>':
  458. goto scriptData
  459. }
  460. goto scriptDataEscaped
  461. scriptDataEscapedLessThanSign:
  462. c = z.readByte()
  463. if z.err != nil {
  464. return
  465. }
  466. if c == '/' {
  467. goto scriptDataEscapedEndTagOpen
  468. }
  469. if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
  470. goto scriptDataDoubleEscapeStart
  471. }
  472. z.raw.end--
  473. goto scriptData
  474. scriptDataEscapedEndTagOpen:
  475. if z.readRawEndTag() || z.err != nil {
  476. return
  477. }
  478. goto scriptDataEscaped
  479. scriptDataDoubleEscapeStart:
  480. z.raw.end--
  481. for i := 0; i < len("script"); i++ {
  482. c = z.readByte()
  483. if z.err != nil {
  484. return
  485. }
  486. if c != "script"[i] && c != "SCRIPT"[i] {
  487. z.raw.end--
  488. goto scriptDataEscaped
  489. }
  490. }
  491. c = z.readByte()
  492. if z.err != nil {
  493. return
  494. }
  495. switch c {
  496. case ' ', '\n', '\r', '\t', '\f', '/', '>':
  497. goto scriptDataDoubleEscaped
  498. }
  499. z.raw.end--
  500. goto scriptDataEscaped
  501. scriptDataDoubleEscaped:
  502. c = z.readByte()
  503. if z.err != nil {
  504. return
  505. }
  506. switch c {
  507. case '-':
  508. goto scriptDataDoubleEscapedDash
  509. case '<':
  510. goto scriptDataDoubleEscapedLessThanSign
  511. }
  512. goto scriptDataDoubleEscaped
  513. scriptDataDoubleEscapedDash:
  514. c = z.readByte()
  515. if z.err != nil {
  516. return
  517. }
  518. switch c {
  519. case '-':
  520. goto scriptDataDoubleEscapedDashDash
  521. case '<':
  522. goto scriptDataDoubleEscapedLessThanSign
  523. }
  524. goto scriptDataDoubleEscaped
  525. scriptDataDoubleEscapedDashDash:
  526. c = z.readByte()
  527. if z.err != nil {
  528. return
  529. }
  530. switch c {
  531. case '-':
  532. goto scriptDataDoubleEscapedDashDash
  533. case '<':
  534. goto scriptDataDoubleEscapedLessThanSign
  535. case '>':
  536. goto scriptData
  537. }
  538. goto scriptDataDoubleEscaped
  539. scriptDataDoubleEscapedLessThanSign:
  540. c = z.readByte()
  541. if z.err != nil {
  542. return
  543. }
  544. if c == '/' {
  545. goto scriptDataDoubleEscapeEnd
  546. }
  547. z.raw.end--
  548. goto scriptDataDoubleEscaped
  549. scriptDataDoubleEscapeEnd:
  550. if z.readRawEndTag() {
  551. z.raw.end += len("</script>")
  552. goto scriptDataEscaped
  553. }
  554. if z.err != nil {
  555. return
  556. }
  557. goto scriptDataDoubleEscaped
  558. }
  559. // readComment reads the next comment token starting with "<!--". The opening
  560. // "<!--" has already been consumed.
  561. func (z *Tokenizer) readComment() {
  562. z.data.start = z.raw.end
  563. defer func() {
  564. if z.data.end < z.data.start {
  565. // It's a comment with no data, like <!-->.
  566. z.data.end = z.data.start
  567. }
  568. }()
  569. for dashCount := 2; ; {
  570. c := z.readByte()
  571. if z.err != nil {
  572. // Ignore up to two dashes at EOF.
  573. if dashCount > 2 {
  574. dashCount = 2
  575. }
  576. z.data.end = z.raw.end - dashCount
  577. return
  578. }
  579. switch c {
  580. case '-':
  581. dashCount++
  582. continue
  583. case '>':
  584. if dashCount >= 2 {
  585. z.data.end = z.raw.end - len("-->")
  586. return
  587. }
  588. case '!':
  589. if dashCount >= 2 {
  590. c = z.readByte()
  591. if z.err != nil {
  592. z.data.end = z.raw.end
  593. return
  594. }
  595. if c == '>' {
  596. z.data.end = z.raw.end - len("--!>")
  597. return
  598. }
  599. }
  600. }
  601. dashCount = 0
  602. }
  603. }
  604. // readUntilCloseAngle reads until the next ">".
  605. func (z *Tokenizer) readUntilCloseAngle() {
  606. z.data.start = z.raw.end
  607. for {
  608. c := z.readByte()
  609. if z.err != nil {
  610. z.data.end = z.raw.end
  611. return
  612. }
  613. if c == '>' {
  614. z.data.end = z.raw.end - len(">")
  615. return
  616. }
  617. }
  618. }
  619. // readMarkupDeclaration reads the next token starting with "<!". It might be
  620. // a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
  621. // "<!a bogus comment". The opening "<!" has already been consumed.
  622. func (z *Tokenizer) readMarkupDeclaration() TokenType {
  623. z.data.start = z.raw.end
  624. var c [2]byte
  625. for i := 0; i < 2; i++ {
  626. c[i] = z.readByte()
  627. if z.err != nil {
  628. z.data.end = z.raw.end
  629. return CommentToken
  630. }
  631. }
  632. if c[0] == '-' && c[1] == '-' {
  633. z.readComment()
  634. return CommentToken
  635. }
  636. z.raw.end -= 2
  637. if z.readDoctype() {
  638. return DoctypeToken
  639. }
  640. if z.allowCDATA && z.readCDATA() {
  641. z.convertNUL = true
  642. return TextToken
  643. }
  644. // It's a bogus comment.
  645. z.readUntilCloseAngle()
  646. return CommentToken
  647. }
  648. // readDoctype attempts to read a doctype declaration and returns true if
  649. // successful. The opening "<!" has already been consumed.
  650. func (z *Tokenizer) readDoctype() bool {
  651. const s = "DOCTYPE"
  652. for i := 0; i < len(s); i++ {
  653. c := z.readByte()
  654. if z.err != nil {
  655. z.data.end = z.raw.end
  656. return false
  657. }
  658. if c != s[i] && c != s[i]+('a'-'A') {
  659. // Back up to read the fragment of "DOCTYPE" again.
  660. z.raw.end = z.data.start
  661. return false
  662. }
  663. }
  664. if z.skipWhiteSpace(); z.err != nil {
  665. z.data.start = z.raw.end
  666. z.data.end = z.raw.end
  667. return true
  668. }
  669. z.readUntilCloseAngle()
  670. return true
  671. }
  672. // readCDATA attempts to read a CDATA section and returns true if
  673. // successful. The opening "<!" has already been consumed.
  674. func (z *Tokenizer) readCDATA() bool {
  675. const s = "[CDATA["
  676. for i := 0; i < len(s); i++ {
  677. c := z.readByte()
  678. if z.err != nil {
  679. z.data.end = z.raw.end
  680. return false
  681. }
  682. if c != s[i] {
  683. // Back up to read the fragment of "[CDATA[" again.
  684. z.raw.end = z.data.start
  685. return false
  686. }
  687. }
  688. z.data.start = z.raw.end
  689. brackets := 0
  690. for {
  691. c := z.readByte()
  692. if z.err != nil {
  693. z.data.end = z.raw.end
  694. return true
  695. }
  696. switch c {
  697. case ']':
  698. brackets++
  699. case '>':
  700. if brackets >= 2 {
  701. z.data.end = z.raw.end - len("]]>")
  702. return true
  703. }
  704. brackets = 0
  705. default:
  706. brackets = 0
  707. }
  708. }
  709. }
  710. // startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
  711. // case-insensitively matches any element of ss.
  712. func (z *Tokenizer) startTagIn(ss ...string) bool {
  713. loop:
  714. for _, s := range ss {
  715. if z.data.end-z.data.start != len(s) {
  716. continue loop
  717. }
  718. for i := 0; i < len(s); i++ {
  719. c := z.buf[z.data.start+i]
  720. if 'A' <= c && c <= 'Z' {
  721. c += 'a' - 'A'
  722. }
  723. if c != s[i] {
  724. continue loop
  725. }
  726. }
  727. return true
  728. }
  729. return false
  730. }
  731. // readStartTag reads the next start tag token. The opening "<a" has already
  732. // been consumed, where 'a' means anything in [A-Za-z].
  733. func (z *Tokenizer) readStartTag() TokenType {
  734. z.readTag(true)
  735. if z.err != nil {
  736. return ErrorToken
  737. }
  738. // Several tags flag the tokenizer's next token as raw.
  739. c, raw := z.buf[z.data.start], false
  740. if 'A' <= c && c <= 'Z' {
  741. c += 'a' - 'A'
  742. }
  743. switch c {
  744. case 'i':
  745. raw = z.startTagIn("iframe")
  746. case 'n':
  747. raw = z.startTagIn("noembed", "noframes", "noscript")
  748. case 'p':
  749. raw = z.startTagIn("plaintext")
  750. case 's':
  751. raw = z.startTagIn("script", "style")
  752. case 't':
  753. raw = z.startTagIn("textarea", "title")
  754. case 'x':
  755. raw = z.startTagIn("xmp")
  756. }
  757. if raw {
  758. z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
  759. }
  760. // Look for a self-closing token like "<br/>".
  761. if z.err == nil && z.buf[z.raw.end-2] == '/' {
  762. return SelfClosingTagToken
  763. }
  764. return StartTagToken
  765. }
  766. // readTag reads the next tag token and its attributes. If saveAttr, those
  767. // attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
  768. // The opening "<a" or "</a" has already been consumed, where 'a' means anything
  769. // in [A-Za-z].
  770. func (z *Tokenizer) readTag(saveAttr bool) {
  771. z.attr = z.attr[:0]
  772. z.nAttrReturned = 0
  773. // Read the tag name and attribute key/value pairs.
  774. z.readTagName()
  775. if z.skipWhiteSpace(); z.err != nil {
  776. return
  777. }
  778. for {
  779. c := z.readByte()
  780. if z.err != nil || c == '>' {
  781. break
  782. }
  783. z.raw.end--
  784. z.readTagAttrKey()
  785. z.readTagAttrVal()
  786. // Save pendingAttr if saveAttr and that attribute has a non-empty key.
  787. if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
  788. z.attr = append(z.attr, z.pendingAttr)
  789. }
  790. if z.skipWhiteSpace(); z.err != nil {
  791. break
  792. }
  793. }
  794. }
  795. // readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
  796. // is positioned such that the first byte of the tag name (the "d" in "<div")
  797. // has already been consumed.
  798. func (z *Tokenizer) readTagName() {
  799. z.data.start = z.raw.end - 1
  800. for {
  801. c := z.readByte()
  802. if z.err != nil {
  803. z.data.end = z.raw.end
  804. return
  805. }
  806. switch c {
  807. case ' ', '\n', '\r', '\t', '\f':
  808. z.data.end = z.raw.end - 1
  809. return
  810. case '/', '>':
  811. z.raw.end--
  812. z.data.end = z.raw.end
  813. return
  814. }
  815. }
  816. }
  817. // readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
  818. // Precondition: z.err == nil.
  819. func (z *Tokenizer) readTagAttrKey() {
  820. z.pendingAttr[0].start = z.raw.end
  821. for {
  822. c := z.readByte()
  823. if z.err != nil {
  824. z.pendingAttr[0].end = z.raw.end
  825. return
  826. }
  827. switch c {
  828. case ' ', '\n', '\r', '\t', '\f', '/':
  829. z.pendingAttr[0].end = z.raw.end - 1
  830. return
  831. case '=', '>':
  832. z.raw.end--
  833. z.pendingAttr[0].end = z.raw.end
  834. return
  835. }
  836. }
  837. }
  838. // readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
  839. func (z *Tokenizer) readTagAttrVal() {
  840. z.pendingAttr[1].start = z.raw.end
  841. z.pendingAttr[1].end = z.raw.end
  842. if z.skipWhiteSpace(); z.err != nil {
  843. return
  844. }
  845. c := z.readByte()
  846. if z.err != nil {
  847. return
  848. }
  849. if c != '=' {
  850. z.raw.end--
  851. return
  852. }
  853. if z.skipWhiteSpace(); z.err != nil {
  854. return
  855. }
  856. quote := z.readByte()
  857. if z.err != nil {
  858. return
  859. }
  860. switch quote {
  861. case '>':
  862. z.raw.end--
  863. return
  864. case '\'', '"':
  865. z.pendingAttr[1].start = z.raw.end
  866. for {
  867. c := z.readByte()
  868. if z.err != nil {
  869. z.pendingAttr[1].end = z.raw.end
  870. return
  871. }
  872. if c == quote {
  873. z.pendingAttr[1].end = z.raw.end - 1
  874. return
  875. }
  876. }
  877. default:
  878. z.pendingAttr[1].start = z.raw.end - 1
  879. for {
  880. c := z.readByte()
  881. if z.err != nil {
  882. z.pendingAttr[1].end = z.raw.end
  883. return
  884. }
  885. switch c {
  886. case ' ', '\n', '\r', '\t', '\f':
  887. z.pendingAttr[1].end = z.raw.end - 1
  888. return
  889. case '>':
  890. z.raw.end--
  891. z.pendingAttr[1].end = z.raw.end
  892. return
  893. }
  894. }
  895. }
  896. }
  897. // Next scans the next token and returns its type.
  898. func (z *Tokenizer) Next() TokenType {
  899. z.raw.start = z.raw.end
  900. z.data.start = z.raw.end
  901. z.data.end = z.raw.end
  902. if z.err != nil {
  903. z.tt = ErrorToken
  904. return z.tt
  905. }
  906. if z.rawTag != "" {
  907. if z.rawTag == "plaintext" {
  908. // Read everything up to EOF.
  909. for z.err == nil {
  910. z.readByte()
  911. }
  912. z.data.end = z.raw.end
  913. z.textIsRaw = true
  914. } else {
  915. z.readRawOrRCDATA()
  916. }
  917. if z.data.end > z.data.start {
  918. z.tt = TextToken
  919. z.convertNUL = true
  920. return z.tt
  921. }
  922. }
  923. z.textIsRaw = false
  924. z.convertNUL = false
  925. loop:
  926. for {
  927. c := z.readByte()
  928. if z.err != nil {
  929. break loop
  930. }
  931. if c != '<' {
  932. continue loop
  933. }
  934. // Check if the '<' we have just read is part of a tag, comment
  935. // or doctype. If not, it's part of the accumulated text token.
  936. c = z.readByte()
  937. if z.err != nil {
  938. break loop
  939. }
  940. var tokenType TokenType
  941. switch {
  942. case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
  943. tokenType = StartTagToken
  944. case c == '/':
  945. tokenType = EndTagToken
  946. case c == '!' || c == '?':
  947. // We use CommentToken to mean any of "<!--actual comments-->",
  948. // "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
  949. tokenType = CommentToken
  950. default:
  951. // Reconsume the current character.
  952. z.raw.end--
  953. continue
  954. }
  955. // We have a non-text token, but we might have accumulated some text
  956. // before that. If so, we return the text first, and return the non-
  957. // text token on the subsequent call to Next.
  958. if x := z.raw.end - len("<a"); z.raw.start < x {
  959. z.raw.end = x
  960. z.data.end = x
  961. z.tt = TextToken
  962. return z.tt
  963. }
  964. switch tokenType {
  965. case StartTagToken:
  966. z.tt = z.readStartTag()
  967. return z.tt
  968. case EndTagToken:
  969. c = z.readByte()
  970. if z.err != nil {
  971. break loop
  972. }
  973. if c == '>' {
  974. // "</>" does not generate a token at all. Generate an empty comment
  975. // to allow passthrough clients to pick up the data using Raw.
  976. // Reset the tokenizer state and start again.
  977. z.tt = CommentToken
  978. return z.tt
  979. }
  980. if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
  981. z.readTag(false)
  982. if z.err != nil {
  983. z.tt = ErrorToken
  984. } else {
  985. z.tt = EndTagToken
  986. }
  987. return z.tt
  988. }
  989. z.raw.end--
  990. z.readUntilCloseAngle()
  991. z.tt = CommentToken
  992. return z.tt
  993. case CommentToken:
  994. if c == '!' {
  995. z.tt = z.readMarkupDeclaration()
  996. return z.tt
  997. }
  998. z.raw.end--
  999. z.readUntilCloseAngle()
  1000. z.tt = CommentToken
  1001. return z.tt
  1002. }
  1003. }
  1004. if z.raw.start < z.raw.end {
  1005. z.data.end = z.raw.end
  1006. z.tt = TextToken
  1007. return z.tt
  1008. }
  1009. z.tt = ErrorToken
  1010. return z.tt
  1011. }
  1012. // Raw returns the unmodified text of the current token. Calling Next, Token,
  1013. // Text, TagName or TagAttr may change the contents of the returned slice.
  1014. //
  1015. // The token stream's raw bytes partition the byte stream (up until an
  1016. // ErrorToken). There are no overlaps or gaps between two consecutive token's
  1017. // raw bytes. One implication is that the byte offset of the current token is
  1018. // the sum of the lengths of all previous tokens' raw bytes.
  1019. func (z *Tokenizer) Raw() []byte {
  1020. return z.buf[z.raw.start:z.raw.end]
  1021. }
  1022. // convertNewlines converts "\r" and "\r\n" in s to "\n".
  1023. // The conversion happens in place, but the resulting slice may be shorter.
  1024. func convertNewlines(s []byte) []byte {
  1025. for i, c := range s {
  1026. if c != '\r' {
  1027. continue
  1028. }
  1029. src := i + 1
  1030. if src >= len(s) || s[src] != '\n' {
  1031. s[i] = '\n'
  1032. continue
  1033. }
  1034. dst := i
  1035. for src < len(s) {
  1036. if s[src] == '\r' {
  1037. if src+1 < len(s) && s[src+1] == '\n' {
  1038. src++
  1039. }
  1040. s[dst] = '\n'
  1041. } else {
  1042. s[dst] = s[src]
  1043. }
  1044. src++
  1045. dst++
  1046. }
  1047. return s[:dst]
  1048. }
  1049. return s
  1050. }
  1051. var (
  1052. nul = []byte("\x00")
  1053. replacement = []byte("\ufffd")
  1054. )
  1055. // Text returns the unescaped text of a text, comment or doctype token. The
  1056. // contents of the returned slice may change on the next call to Next.
  1057. func (z *Tokenizer) Text() []byte {
  1058. switch z.tt {
  1059. case TextToken, CommentToken, DoctypeToken:
  1060. s := z.buf[z.data.start:z.data.end]
  1061. z.data.start = z.raw.end
  1062. z.data.end = z.raw.end
  1063. s = convertNewlines(s)
  1064. if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
  1065. s = bytes.Replace(s, nul, replacement, -1)
  1066. }
  1067. if !z.textIsRaw {
  1068. s = unescape(s, false)
  1069. }
  1070. return s
  1071. }
  1072. return nil
  1073. }
  1074. // TagName returns the lower-cased name of a tag token (the `img` out of
  1075. // `<IMG SRC="foo">`) and whether the tag has attributes.
  1076. // The contents of the returned slice may change on the next call to Next.
  1077. func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
  1078. if z.data.start < z.data.end {
  1079. switch z.tt {
  1080. case StartTagToken, EndTagToken, SelfClosingTagToken:
  1081. s := z.buf[z.data.start:z.data.end]
  1082. z.data.start = z.raw.end
  1083. z.data.end = z.raw.end
  1084. return lower(s), z.nAttrReturned < len(z.attr)
  1085. }
  1086. }
  1087. return nil, false
  1088. }
  1089. // TagAttr returns the lower-cased key and unescaped value of the next unparsed
  1090. // attribute for the current tag token and whether there are more attributes.
  1091. // The contents of the returned slices may change on the next call to Next.
  1092. func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
  1093. if z.nAttrReturned < len(z.attr) {
  1094. switch z.tt {
  1095. case StartTagToken, SelfClosingTagToken:
  1096. x := z.attr[z.nAttrReturned]
  1097. z.nAttrReturned++
  1098. key = z.buf[x[0].start:x[0].end]
  1099. val = z.buf[x[1].start:x[1].end]
  1100. return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
  1101. }
  1102. }
  1103. return nil, nil, false
  1104. }
  1105. // Token returns the current Token. The result's Data and Attr values remain
  1106. // valid after subsequent Next calls.
  1107. func (z *Tokenizer) Token() Token {
  1108. t := Token{Type: z.tt}
  1109. switch z.tt {
  1110. case TextToken, CommentToken, DoctypeToken:
  1111. t.Data = string(z.Text())
  1112. case StartTagToken, SelfClosingTagToken, EndTagToken:
  1113. name, moreAttr := z.TagName()
  1114. for moreAttr {
  1115. var key, val []byte
  1116. key, val, moreAttr = z.TagAttr()
  1117. t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
  1118. }
  1119. if a := atom.Lookup(name); a != 0 {
  1120. t.DataAtom, t.Data = a, a.String()
  1121. } else {
  1122. t.DataAtom, t.Data = 0, string(name)
  1123. }
  1124. }
  1125. return t
  1126. }
  1127. // SetMaxBuf sets a limit on the amount of data buffered during tokenization.
  1128. // A value of 0 means unlimited.
  1129. func (z *Tokenizer) SetMaxBuf(n int) {
  1130. z.maxBuf = n
  1131. }
  1132. // NewTokenizer returns a new HTML Tokenizer for the given Reader.
  1133. // The input is assumed to be UTF-8 encoded.
  1134. func NewTokenizer(r io.Reader) *Tokenizer {
  1135. return NewTokenizerFragment(r, "")
  1136. }
  1137. // NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
  1138. // tokenizing an existing element's InnerHTML fragment. contextTag is that
  1139. // element's tag, such as "div" or "iframe".
  1140. //
  1141. // For example, how the InnerHTML "a<b" is tokenized depends on whether it is
  1142. // for a <p> tag or a <script> tag.
  1143. //
  1144. // The input is assumed to be UTF-8 encoded.
  1145. func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
  1146. z := &Tokenizer{
  1147. r: r,
  1148. buf: make([]byte, 0, 4096),
  1149. }
  1150. if contextTag != "" {
  1151. switch s := strings.ToLower(contextTag); s {
  1152. case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":
  1153. z.rawTag = s
  1154. }
  1155. }
  1156. return z
  1157. }