enc_dfast.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import "fmt"
  6. const (
  7. dFastLongTableBits = 17 // Bits used in the long match table
  8. dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
  9. dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
  10. dFastShortTableBits = tableBits // Bits used in the short match table
  11. dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
  12. dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
  13. )
  14. type doubleFastEncoder struct {
  15. fastEncoder
  16. longTable [dFastLongTableSize]tableEntry
  17. }
  18. // Encode mimmics functionality in zstd_dfast.c
  19. func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
  20. const (
  21. // Input margin is the number of bytes we read (8)
  22. // and the maximum we will read ahead (2)
  23. inputMargin = 8 + 2
  24. minNonLiteralBlockSize = 16
  25. )
  26. // Protect against e.cur wraparound.
  27. for e.cur >= bufferReset {
  28. if len(e.hist) == 0 {
  29. for i := range e.table[:] {
  30. e.table[i] = tableEntry{}
  31. }
  32. for i := range e.longTable[:] {
  33. e.longTable[i] = tableEntry{}
  34. }
  35. e.cur = e.maxMatchOff
  36. break
  37. }
  38. // Shift down everything in the table that isn't already too far away.
  39. minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
  40. for i := range e.table[:] {
  41. v := e.table[i].offset
  42. if v < minOff {
  43. v = 0
  44. } else {
  45. v = v - e.cur + e.maxMatchOff
  46. }
  47. e.table[i].offset = v
  48. }
  49. for i := range e.longTable[:] {
  50. v := e.longTable[i].offset
  51. if v < minOff {
  52. v = 0
  53. } else {
  54. v = v - e.cur + e.maxMatchOff
  55. }
  56. e.longTable[i].offset = v
  57. }
  58. e.cur = e.maxMatchOff
  59. break
  60. }
  61. s := e.addBlock(src)
  62. blk.size = len(src)
  63. if len(src) < minNonLiteralBlockSize {
  64. blk.extraLits = len(src)
  65. blk.literals = blk.literals[:len(src)]
  66. copy(blk.literals, src)
  67. return
  68. }
  69. // Override src
  70. src = e.hist
  71. sLimit := int32(len(src)) - inputMargin
  72. // stepSize is the number of bytes to skip on every main loop iteration.
  73. // It should be >= 1.
  74. const stepSize = 1
  75. const kSearchStrength = 8
  76. // nextEmit is where in src the next emitLiteral should start from.
  77. nextEmit := s
  78. cv := load6432(src, s)
  79. // Relative offsets
  80. offset1 := int32(blk.recentOffsets[0])
  81. offset2 := int32(blk.recentOffsets[1])
  82. addLiterals := func(s *seq, until int32) {
  83. if until == nextEmit {
  84. return
  85. }
  86. blk.literals = append(blk.literals, src[nextEmit:until]...)
  87. s.litLen = uint32(until - nextEmit)
  88. }
  89. if debug {
  90. println("recent offsets:", blk.recentOffsets)
  91. }
  92. encodeLoop:
  93. for {
  94. var t int32
  95. // We allow the encoder to optionally turn off repeat offsets across blocks
  96. canRepeat := len(blk.sequences) > 2
  97. for {
  98. if debugAsserts && canRepeat && offset1 == 0 {
  99. panic("offset0 was 0")
  100. }
  101. nextHashS := hash5(cv, dFastShortTableBits)
  102. nextHashL := hash8(cv, dFastLongTableBits)
  103. candidateL := e.longTable[nextHashL]
  104. candidateS := e.table[nextHashS]
  105. const repOff = 1
  106. repIndex := s - offset1 + repOff
  107. entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
  108. e.longTable[nextHashL] = entry
  109. e.table[nextHashS] = entry
  110. if canRepeat {
  111. if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
  112. // Consider history as well.
  113. var seq seq
  114. lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
  115. seq.matchLen = uint32(lenght - zstdMinMatch)
  116. // We might be able to match backwards.
  117. // Extend as long as we can.
  118. start := s + repOff
  119. // We end the search early, so we don't risk 0 literals
  120. // and have to do special offset treatment.
  121. startLimit := nextEmit + 1
  122. tMin := s - e.maxMatchOff
  123. if tMin < 0 {
  124. tMin = 0
  125. }
  126. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
  127. repIndex--
  128. start--
  129. seq.matchLen++
  130. }
  131. addLiterals(&seq, start)
  132. // rep 0
  133. seq.offset = 1
  134. if debugSequences {
  135. println("repeat sequence", seq, "next s:", s)
  136. }
  137. blk.sequences = append(blk.sequences, seq)
  138. s += lenght + repOff
  139. nextEmit = s
  140. if s >= sLimit {
  141. if debug {
  142. println("repeat ended", s, lenght)
  143. }
  144. break encodeLoop
  145. }
  146. cv = load6432(src, s)
  147. continue
  148. }
  149. }
  150. // Find the offsets of our two matches.
  151. coffsetL := s - (candidateL.offset - e.cur)
  152. coffsetS := s - (candidateS.offset - e.cur)
  153. // Check if we have a long match.
  154. if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
  155. // Found a long match, likely at least 8 bytes.
  156. // Reference encoder checks all 8 bytes, we only check 4,
  157. // but the likelihood of both the first 4 bytes and the hash matching should be enough.
  158. t = candidateL.offset - e.cur
  159. if debugAsserts && s <= t {
  160. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  161. }
  162. if debugAsserts && s-t > e.maxMatchOff {
  163. panic("s - t >e.maxMatchOff")
  164. }
  165. if debugMatches {
  166. println("long match")
  167. }
  168. break
  169. }
  170. // Check if we have a short match.
  171. if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
  172. // found a regular match
  173. // See if we can find a long match at s+1
  174. const checkAt = 1
  175. cv := load6432(src, s+checkAt)
  176. nextHashL = hash8(cv, dFastLongTableBits)
  177. candidateL = e.longTable[nextHashL]
  178. coffsetL = s - (candidateL.offset - e.cur) + checkAt
  179. // We can store it, since we have at least a 4 byte match.
  180. e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
  181. if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
  182. // Found a long match, likely at least 8 bytes.
  183. // Reference encoder checks all 8 bytes, we only check 4,
  184. // but the likelihood of both the first 4 bytes and the hash matching should be enough.
  185. t = candidateL.offset - e.cur
  186. s += checkAt
  187. if debugMatches {
  188. println("long match (after short)")
  189. }
  190. break
  191. }
  192. t = candidateS.offset - e.cur
  193. if debugAsserts && s <= t {
  194. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  195. }
  196. if debugAsserts && s-t > e.maxMatchOff {
  197. panic("s - t >e.maxMatchOff")
  198. }
  199. if debugAsserts && t < 0 {
  200. panic("t<0")
  201. }
  202. if debugMatches {
  203. println("short match")
  204. }
  205. break
  206. }
  207. // No match found, move forward in input.
  208. s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
  209. if s >= sLimit {
  210. break encodeLoop
  211. }
  212. cv = load6432(src, s)
  213. }
  214. // A 4-byte match has been found. Update recent offsets.
  215. // We'll later see if more than 4 bytes.
  216. offset2 = offset1
  217. offset1 = s - t
  218. if debugAsserts && s <= t {
  219. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  220. }
  221. if debugAsserts && canRepeat && int(offset1) > len(src) {
  222. panic("invalid offset")
  223. }
  224. // Extend the 4-byte match as long as possible.
  225. l := e.matchlen(s+4, t+4, src) + 4
  226. // Extend backwards
  227. tMin := s - e.maxMatchOff
  228. if tMin < 0 {
  229. tMin = 0
  230. }
  231. for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
  232. s--
  233. t--
  234. l++
  235. }
  236. // Write our sequence
  237. var seq seq
  238. seq.litLen = uint32(s - nextEmit)
  239. seq.matchLen = uint32(l - zstdMinMatch)
  240. if seq.litLen > 0 {
  241. blk.literals = append(blk.literals, src[nextEmit:s]...)
  242. }
  243. seq.offset = uint32(s-t) + 3
  244. s += l
  245. if debugSequences {
  246. println("sequence", seq, "next s:", s)
  247. }
  248. blk.sequences = append(blk.sequences, seq)
  249. nextEmit = s
  250. if s >= sLimit {
  251. break encodeLoop
  252. }
  253. // Index match start+1 (long) and start+2 (short)
  254. index0 := s - l + 1
  255. // Index match end-2 (long) and end-1 (short)
  256. index1 := s - 2
  257. cv0 := load6432(src, index0)
  258. cv1 := load6432(src, index1)
  259. te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
  260. te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
  261. e.longTable[hash8(cv0, dFastLongTableBits)] = te0
  262. e.longTable[hash8(cv1, dFastLongTableBits)] = te1
  263. cv0 >>= 8
  264. cv1 >>= 8
  265. te0.offset++
  266. te1.offset++
  267. te0.val = uint32(cv0)
  268. te1.val = uint32(cv1)
  269. e.table[hash5(cv0, dFastShortTableBits)] = te0
  270. e.table[hash5(cv1, dFastShortTableBits)] = te1
  271. cv = load6432(src, s)
  272. if !canRepeat {
  273. continue
  274. }
  275. // Check offset 2
  276. for {
  277. o2 := s - offset2
  278. if load3232(src, o2) != uint32(cv) {
  279. // Do regular search
  280. break
  281. }
  282. // Store this, since we have it.
  283. nextHashS := hash5(cv, dFastShortTableBits)
  284. nextHashL := hash8(cv, dFastLongTableBits)
  285. // We have at least 4 byte match.
  286. // No need to check backwards. We come straight from a match
  287. l := 4 + e.matchlen(s+4, o2+4, src)
  288. entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
  289. e.longTable[nextHashL] = entry
  290. e.table[nextHashS] = entry
  291. seq.matchLen = uint32(l) - zstdMinMatch
  292. seq.litLen = 0
  293. // Since litlen is always 0, this is offset 1.
  294. seq.offset = 1
  295. s += l
  296. nextEmit = s
  297. if debugSequences {
  298. println("sequence", seq, "next s:", s)
  299. }
  300. blk.sequences = append(blk.sequences, seq)
  301. // Swap offset 1 and 2.
  302. offset1, offset2 = offset2, offset1
  303. if s >= sLimit {
  304. // Finished
  305. break encodeLoop
  306. }
  307. cv = load6432(src, s)
  308. }
  309. }
  310. if int(nextEmit) < len(src) {
  311. blk.literals = append(blk.literals, src[nextEmit:]...)
  312. blk.extraLits = len(src) - int(nextEmit)
  313. }
  314. blk.recentOffsets[0] = uint32(offset1)
  315. blk.recentOffsets[1] = uint32(offset2)
  316. if debug {
  317. println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
  318. }
  319. }
  320. // EncodeNoHist will encode a block with no history and no following blocks.
  321. // Most notable difference is that src will not be copied for history and
  322. // we do not need to check for max match length.
  323. func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
  324. const (
  325. // Input margin is the number of bytes we read (8)
  326. // and the maximum we will read ahead (2)
  327. inputMargin = 8 + 2
  328. minNonLiteralBlockSize = 16
  329. )
  330. // Protect against e.cur wraparound.
  331. if e.cur >= bufferReset {
  332. for i := range e.table[:] {
  333. e.table[i] = tableEntry{}
  334. }
  335. for i := range e.longTable[:] {
  336. e.longTable[i] = tableEntry{}
  337. }
  338. e.cur = e.maxMatchOff
  339. }
  340. s := int32(0)
  341. blk.size = len(src)
  342. if len(src) < minNonLiteralBlockSize {
  343. blk.extraLits = len(src)
  344. blk.literals = blk.literals[:len(src)]
  345. copy(blk.literals, src)
  346. return
  347. }
  348. // Override src
  349. sLimit := int32(len(src)) - inputMargin
  350. // stepSize is the number of bytes to skip on every main loop iteration.
  351. // It should be >= 1.
  352. const stepSize = 1
  353. const kSearchStrength = 8
  354. // nextEmit is where in src the next emitLiteral should start from.
  355. nextEmit := s
  356. cv := load6432(src, s)
  357. // Relative offsets
  358. offset1 := int32(blk.recentOffsets[0])
  359. offset2 := int32(blk.recentOffsets[1])
  360. addLiterals := func(s *seq, until int32) {
  361. if until == nextEmit {
  362. return
  363. }
  364. blk.literals = append(blk.literals, src[nextEmit:until]...)
  365. s.litLen = uint32(until - nextEmit)
  366. }
  367. if debug {
  368. println("recent offsets:", blk.recentOffsets)
  369. }
  370. encodeLoop:
  371. for {
  372. var t int32
  373. for {
  374. nextHashS := hash5(cv, dFastShortTableBits)
  375. nextHashL := hash8(cv, dFastLongTableBits)
  376. candidateL := e.longTable[nextHashL]
  377. candidateS := e.table[nextHashS]
  378. const repOff = 1
  379. repIndex := s - offset1 + repOff
  380. entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
  381. e.longTable[nextHashL] = entry
  382. e.table[nextHashS] = entry
  383. if len(blk.sequences) > 2 {
  384. if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
  385. // Consider history as well.
  386. var seq seq
  387. //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
  388. length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
  389. seq.matchLen = uint32(length - zstdMinMatch)
  390. // We might be able to match backwards.
  391. // Extend as long as we can.
  392. start := s + repOff
  393. // We end the search early, so we don't risk 0 literals
  394. // and have to do special offset treatment.
  395. startLimit := nextEmit + 1
  396. tMin := s - e.maxMatchOff
  397. if tMin < 0 {
  398. tMin = 0
  399. }
  400. for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
  401. repIndex--
  402. start--
  403. seq.matchLen++
  404. }
  405. addLiterals(&seq, start)
  406. // rep 0
  407. seq.offset = 1
  408. if debugSequences {
  409. println("repeat sequence", seq, "next s:", s)
  410. }
  411. blk.sequences = append(blk.sequences, seq)
  412. s += length + repOff
  413. nextEmit = s
  414. if s >= sLimit {
  415. if debug {
  416. println("repeat ended", s, length)
  417. }
  418. break encodeLoop
  419. }
  420. cv = load6432(src, s)
  421. continue
  422. }
  423. }
  424. // Find the offsets of our two matches.
  425. coffsetL := s - (candidateL.offset - e.cur)
  426. coffsetS := s - (candidateS.offset - e.cur)
  427. // Check if we have a long match.
  428. if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
  429. // Found a long match, likely at least 8 bytes.
  430. // Reference encoder checks all 8 bytes, we only check 4,
  431. // but the likelihood of both the first 4 bytes and the hash matching should be enough.
  432. t = candidateL.offset - e.cur
  433. if debugAsserts && s <= t {
  434. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  435. }
  436. if debugAsserts && s-t > e.maxMatchOff {
  437. panic("s - t >e.maxMatchOff")
  438. }
  439. if debugMatches {
  440. println("long match")
  441. }
  442. break
  443. }
  444. // Check if we have a short match.
  445. if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
  446. // found a regular match
  447. // See if we can find a long match at s+1
  448. const checkAt = 1
  449. cv := load6432(src, s+checkAt)
  450. nextHashL = hash8(cv, dFastLongTableBits)
  451. candidateL = e.longTable[nextHashL]
  452. coffsetL = s - (candidateL.offset - e.cur) + checkAt
  453. // We can store it, since we have at least a 4 byte match.
  454. e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
  455. if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
  456. // Found a long match, likely at least 8 bytes.
  457. // Reference encoder checks all 8 bytes, we only check 4,
  458. // but the likelihood of both the first 4 bytes and the hash matching should be enough.
  459. t = candidateL.offset - e.cur
  460. s += checkAt
  461. if debugMatches {
  462. println("long match (after short)")
  463. }
  464. break
  465. }
  466. t = candidateS.offset - e.cur
  467. if debugAsserts && s <= t {
  468. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  469. }
  470. if debugAsserts && s-t > e.maxMatchOff {
  471. panic("s - t >e.maxMatchOff")
  472. }
  473. if debugAsserts && t < 0 {
  474. panic("t<0")
  475. }
  476. if debugMatches {
  477. println("short match")
  478. }
  479. break
  480. }
  481. // No match found, move forward in input.
  482. s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
  483. if s >= sLimit {
  484. break encodeLoop
  485. }
  486. cv = load6432(src, s)
  487. }
  488. // A 4-byte match has been found. Update recent offsets.
  489. // We'll later see if more than 4 bytes.
  490. offset2 = offset1
  491. offset1 = s - t
  492. if debugAsserts && s <= t {
  493. panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
  494. }
  495. // Extend the 4-byte match as long as possible.
  496. //l := e.matchlen(s+4, t+4, src) + 4
  497. l := int32(matchLen(src[s+4:], src[t+4:])) + 4
  498. // Extend backwards
  499. tMin := s - e.maxMatchOff
  500. if tMin < 0 {
  501. tMin = 0
  502. }
  503. for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
  504. s--
  505. t--
  506. l++
  507. }
  508. // Write our sequence
  509. var seq seq
  510. seq.litLen = uint32(s - nextEmit)
  511. seq.matchLen = uint32(l - zstdMinMatch)
  512. if seq.litLen > 0 {
  513. blk.literals = append(blk.literals, src[nextEmit:s]...)
  514. }
  515. seq.offset = uint32(s-t) + 3
  516. s += l
  517. if debugSequences {
  518. println("sequence", seq, "next s:", s)
  519. }
  520. blk.sequences = append(blk.sequences, seq)
  521. nextEmit = s
  522. if s >= sLimit {
  523. break encodeLoop
  524. }
  525. // Index match start+1 (long) and start+2 (short)
  526. index0 := s - l + 1
  527. // Index match end-2 (long) and end-1 (short)
  528. index1 := s - 2
  529. cv0 := load6432(src, index0)
  530. cv1 := load6432(src, index1)
  531. te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
  532. te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
  533. e.longTable[hash8(cv0, dFastLongTableBits)] = te0
  534. e.longTable[hash8(cv1, dFastLongTableBits)] = te1
  535. cv0 >>= 8
  536. cv1 >>= 8
  537. te0.offset++
  538. te1.offset++
  539. te0.val = uint32(cv0)
  540. te1.val = uint32(cv1)
  541. e.table[hash5(cv0, dFastShortTableBits)] = te0
  542. e.table[hash5(cv1, dFastShortTableBits)] = te1
  543. cv = load6432(src, s)
  544. if len(blk.sequences) <= 2 {
  545. continue
  546. }
  547. // Check offset 2
  548. for {
  549. o2 := s - offset2
  550. if load3232(src, o2) != uint32(cv) {
  551. // Do regular search
  552. break
  553. }
  554. // Store this, since we have it.
  555. nextHashS := hash5(cv1>>8, dFastShortTableBits)
  556. nextHashL := hash8(cv, dFastLongTableBits)
  557. // We have at least 4 byte match.
  558. // No need to check backwards. We come straight from a match
  559. //l := 4 + e.matchlen(s+4, o2+4, src)
  560. l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
  561. entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
  562. e.longTable[nextHashL] = entry
  563. e.table[nextHashS] = entry
  564. seq.matchLen = uint32(l) - zstdMinMatch
  565. seq.litLen = 0
  566. // Since litlen is always 0, this is offset 1.
  567. seq.offset = 1
  568. s += l
  569. nextEmit = s
  570. if debugSequences {
  571. println("sequence", seq, "next s:", s)
  572. }
  573. blk.sequences = append(blk.sequences, seq)
  574. // Swap offset 1 and 2.
  575. offset1, offset2 = offset2, offset1
  576. if s >= sLimit {
  577. // Finished
  578. break encodeLoop
  579. }
  580. cv = load6432(src, s)
  581. }
  582. }
  583. if int(nextEmit) < len(src) {
  584. blk.literals = append(blk.literals, src[nextEmit:]...)
  585. blk.extraLits = len(src) - int(nextEmit)
  586. }
  587. if debug {
  588. println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
  589. }
  590. }