decode_amd64.s 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // +build !appengine
  2. // +build gc
  3. // +build !noasm
  4. #include "textflag.h"
  5. // AX scratch
  6. // BX scratch
  7. // CX scratch
  8. // DX token
  9. //
  10. // DI &dst
  11. // SI &src
  12. // R8 &dst + len(dst)
  13. // R9 &src + len(src)
  14. // R11 &dst
  15. // R12 short output end
  16. // R13 short input end
  17. // func decodeBlock(dst, src []byte) int
  18. // using 50 bytes of stack currently
  19. TEXT ·decodeBlock(SB), NOSPLIT, $64-56
  20. MOVQ dst_base+0(FP), DI
  21. MOVQ DI, R11
  22. MOVQ dst_len+8(FP), R8
  23. ADDQ DI, R8
  24. MOVQ src_base+24(FP), SI
  25. MOVQ src_len+32(FP), R9
  26. ADDQ SI, R9
  27. // shortcut ends
  28. // short output end
  29. MOVQ R8, R12
  30. SUBQ $32, R12
  31. // short input end
  32. MOVQ R9, R13
  33. SUBQ $16, R13
  34. loop:
  35. // for si < len(src)
  36. CMPQ SI, R9
  37. JGE end
  38. // token := uint32(src[si])
  39. MOVBQZX (SI), DX
  40. INCQ SI
  41. // lit_len = token >> 4
  42. // if lit_len > 0
  43. // CX = lit_len
  44. MOVQ DX, CX
  45. SHRQ $4, CX
  46. // if lit_len != 0xF
  47. CMPQ CX, $0xF
  48. JEQ lit_len_loop_pre
  49. CMPQ DI, R12
  50. JGE lit_len_loop_pre
  51. CMPQ SI, R13
  52. JGE lit_len_loop_pre
  53. // copy shortcut
  54. // A two-stage shortcut for the most common case:
  55. // 1) If the literal length is 0..14, and there is enough space,
  56. // enter the shortcut and copy 16 bytes on behalf of the literals
  57. // (in the fast mode, only 8 bytes can be safely copied this way).
  58. // 2) Further if the match length is 4..18, copy 18 bytes in a similar
  59. // manner; but we ensure that there's enough space in the output for
  60. // those 18 bytes earlier, upon entering the shortcut (in other words,
  61. // there is a combined check for both stages).
  62. // copy literal
  63. MOVOU (SI), X0
  64. MOVOU X0, (DI)
  65. ADDQ CX, DI
  66. ADDQ CX, SI
  67. MOVQ DX, CX
  68. ANDQ $0xF, CX
  69. // The second stage: prepare for match copying, decode full info.
  70. // If it doesn't work out, the info won't be wasted.
  71. // offset := uint16(data[:2])
  72. MOVWQZX (SI), DX
  73. ADDQ $2, SI
  74. MOVQ DI, AX
  75. SUBQ DX, AX
  76. CMPQ AX, DI
  77. JGT err_short_buf
  78. // if we can't do the second stage then jump straight to read the
  79. // match length, we already have the offset.
  80. CMPQ CX, $0xF
  81. JEQ match_len_loop_pre
  82. CMPQ DX, $8
  83. JLT match_len_loop_pre
  84. CMPQ AX, R11
  85. JLT err_short_buf
  86. // memcpy(op + 0, match + 0, 8);
  87. MOVQ (AX), BX
  88. MOVQ BX, (DI)
  89. // memcpy(op + 8, match + 8, 8);
  90. MOVQ 8(AX), BX
  91. MOVQ BX, 8(DI)
  92. // memcpy(op +16, match +16, 2);
  93. MOVW 16(AX), BX
  94. MOVW BX, 16(DI)
  95. LEAQ 4(DI)(CX*1), DI // minmatch
  96. // shortcut complete, load next token
  97. JMP loop
  98. lit_len_loop_pre:
  99. // if lit_len > 0
  100. CMPQ CX, $0
  101. JEQ offset
  102. CMPQ CX, $0xF
  103. JNE copy_literal
  104. lit_len_loop:
  105. // for src[si] == 0xFF
  106. CMPB (SI), $0xFF
  107. JNE lit_len_finalise
  108. // bounds check src[si+1]
  109. LEAQ 1(SI), AX
  110. CMPQ AX, R9
  111. JGT err_short_buf
  112. // lit_len += 0xFF
  113. ADDQ $0xFF, CX
  114. INCQ SI
  115. JMP lit_len_loop
  116. lit_len_finalise:
  117. // lit_len += int(src[si])
  118. // si++
  119. MOVBQZX (SI), AX
  120. ADDQ AX, CX
  121. INCQ SI
  122. copy_literal:
  123. // bounds check src and dst
  124. LEAQ (SI)(CX*1), AX
  125. CMPQ AX, R9
  126. JGT err_short_buf
  127. LEAQ (DI)(CX*1), AX
  128. CMPQ AX, R8
  129. JGT err_short_buf
  130. // whats a good cut off to call memmove?
  131. CMPQ CX, $16
  132. JGT memmove_lit
  133. // if len(dst[di:]) < 16
  134. MOVQ R8, AX
  135. SUBQ DI, AX
  136. CMPQ AX, $16
  137. JLT memmove_lit
  138. // if len(src[si:]) < 16
  139. MOVQ R9, AX
  140. SUBQ SI, AX
  141. CMPQ AX, $16
  142. JLT memmove_lit
  143. MOVOU (SI), X0
  144. MOVOU X0, (DI)
  145. JMP finish_lit_copy
  146. memmove_lit:
  147. // memmove(to, from, len)
  148. MOVQ DI, 0(SP)
  149. MOVQ SI, 8(SP)
  150. MOVQ CX, 16(SP)
  151. // spill
  152. MOVQ DI, 24(SP)
  153. MOVQ SI, 32(SP)
  154. MOVQ CX, 40(SP) // need len to inc SI, DI after
  155. MOVB DX, 48(SP)
  156. CALL runtime·memmove(SB)
  157. // restore registers
  158. MOVQ 24(SP), DI
  159. MOVQ 32(SP), SI
  160. MOVQ 40(SP), CX
  161. MOVB 48(SP), DX
  162. // recalc initial values
  163. MOVQ dst_base+0(FP), R8
  164. MOVQ R8, R11
  165. ADDQ dst_len+8(FP), R8
  166. MOVQ src_base+24(FP), R9
  167. ADDQ src_len+32(FP), R9
  168. MOVQ R8, R12
  169. SUBQ $32, R12
  170. MOVQ R9, R13
  171. SUBQ $16, R13
  172. finish_lit_copy:
  173. ADDQ CX, SI
  174. ADDQ CX, DI
  175. CMPQ SI, R9
  176. JGE end
  177. offset:
  178. // CX := mLen
  179. // free up DX to use for offset
  180. MOVQ DX, CX
  181. LEAQ 2(SI), AX
  182. CMPQ AX, R9
  183. JGT err_short_buf
  184. // offset
  185. // DX := int(src[si]) | int(src[si+1])<<8
  186. MOVWQZX (SI), DX
  187. ADDQ $2, SI
  188. // 0 offset is invalid
  189. CMPQ DX, $0
  190. JEQ err_corrupt
  191. ANDB $0xF, CX
  192. match_len_loop_pre:
  193. // if mlen != 0xF
  194. CMPB CX, $0xF
  195. JNE copy_match
  196. match_len_loop:
  197. // for src[si] == 0xFF
  198. // lit_len += 0xFF
  199. CMPB (SI), $0xFF
  200. JNE match_len_finalise
  201. // bounds check src[si+1]
  202. LEAQ 1(SI), AX
  203. CMPQ AX, R9
  204. JGT err_short_buf
  205. ADDQ $0xFF, CX
  206. INCQ SI
  207. JMP match_len_loop
  208. match_len_finalise:
  209. // lit_len += int(src[si])
  210. // si++
  211. MOVBQZX (SI), AX
  212. ADDQ AX, CX
  213. INCQ SI
  214. copy_match:
  215. // mLen += minMatch
  216. ADDQ $4, CX
  217. // check we have match_len bytes left in dst
  218. // di+match_len < len(dst)
  219. LEAQ (DI)(CX*1), AX
  220. CMPQ AX, R8
  221. JGT err_short_buf
  222. // DX = offset
  223. // CX = match_len
  224. // BX = &dst + (di - offset)
  225. MOVQ DI, BX
  226. SUBQ DX, BX
  227. // check BX is within dst
  228. // if BX < &dst
  229. CMPQ BX, R11
  230. JLT err_short_buf
  231. // if offset + match_len < di
  232. LEAQ (BX)(CX*1), AX
  233. CMPQ DI, AX
  234. JGT copy_interior_match
  235. // AX := len(dst[:di])
  236. // MOVQ DI, AX
  237. // SUBQ R11, AX
  238. // copy 16 bytes at a time
  239. // if di-offset < 16 copy 16-(di-offset) bytes to di
  240. // then do the remaining
  241. copy_match_loop:
  242. // for match_len >= 0
  243. // dst[di] = dst[i]
  244. // di++
  245. // i++
  246. MOVB (BX), AX
  247. MOVB AX, (DI)
  248. INCQ DI
  249. INCQ BX
  250. DECQ CX
  251. CMPQ CX, $0
  252. JGT copy_match_loop
  253. JMP loop
  254. copy_interior_match:
  255. CMPQ CX, $16
  256. JGT memmove_match
  257. // if len(dst[di:]) < 16
  258. MOVQ R8, AX
  259. SUBQ DI, AX
  260. CMPQ AX, $16
  261. JLT memmove_match
  262. MOVOU (BX), X0
  263. MOVOU X0, (DI)
  264. ADDQ CX, DI
  265. JMP loop
  266. memmove_match:
  267. // memmove(to, from, len)
  268. MOVQ DI, 0(SP)
  269. MOVQ BX, 8(SP)
  270. MOVQ CX, 16(SP)
  271. // spill
  272. MOVQ DI, 24(SP)
  273. MOVQ SI, 32(SP)
  274. MOVQ CX, 40(SP) // need len to inc SI, DI after
  275. CALL runtime·memmove(SB)
  276. // restore registers
  277. MOVQ 24(SP), DI
  278. MOVQ 32(SP), SI
  279. MOVQ 40(SP), CX
  280. // recalc initial values
  281. MOVQ dst_base+0(FP), R8
  282. MOVQ R8, R11 // TODO: make these sensible numbers
  283. ADDQ dst_len+8(FP), R8
  284. MOVQ src_base+24(FP), R9
  285. ADDQ src_len+32(FP), R9
  286. MOVQ R8, R12
  287. SUBQ $32, R12
  288. MOVQ R9, R13
  289. SUBQ $16, R13
  290. ADDQ CX, DI
  291. JMP loop
  292. err_corrupt:
  293. MOVQ $-1, ret+48(FP)
  294. RET
  295. err_short_buf:
  296. MOVQ $-2, ret+48(FP)
  297. RET
  298. end:
  299. SUBQ R11, DI
  300. MOVQ DI, ret+48(FP)
  301. RET