blamka_amd64.s 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. // Copyright 2017 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build amd64,!gccgo,!appengine
  5. #include "textflag.h"
  6. DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
  7. DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
  8. GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
  9. DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
  10. DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
  11. GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
  12. #define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
  13. MOVO v4, t1; \
  14. MOVO v5, v4; \
  15. MOVO t1, v5; \
  16. MOVO v6, t1; \
  17. PUNPCKLQDQ v6, t2; \
  18. PUNPCKHQDQ v7, v6; \
  19. PUNPCKHQDQ t2, v6; \
  20. PUNPCKLQDQ v7, t2; \
  21. MOVO t1, v7; \
  22. MOVO v2, t1; \
  23. PUNPCKHQDQ t2, v7; \
  24. PUNPCKLQDQ v3, t2; \
  25. PUNPCKHQDQ t2, v2; \
  26. PUNPCKLQDQ t1, t2; \
  27. PUNPCKHQDQ t2, v3
  28. #define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
  29. MOVO v4, t1; \
  30. MOVO v5, v4; \
  31. MOVO t1, v5; \
  32. MOVO v2, t1; \
  33. PUNPCKLQDQ v2, t2; \
  34. PUNPCKHQDQ v3, v2; \
  35. PUNPCKHQDQ t2, v2; \
  36. PUNPCKLQDQ v3, t2; \
  37. MOVO t1, v3; \
  38. MOVO v6, t1; \
  39. PUNPCKHQDQ t2, v3; \
  40. PUNPCKLQDQ v7, t2; \
  41. PUNPCKHQDQ t2, v6; \
  42. PUNPCKLQDQ t1, t2; \
  43. PUNPCKHQDQ t2, v7
  44. #define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
  45. MOVO v0, t0; \
  46. PMULULQ v2, t0; \
  47. PADDQ v2, v0; \
  48. PADDQ t0, v0; \
  49. PADDQ t0, v0; \
  50. PXOR v0, v6; \
  51. PSHUFD $0xB1, v6, v6; \
  52. MOVO v4, t0; \
  53. PMULULQ v6, t0; \
  54. PADDQ v6, v4; \
  55. PADDQ t0, v4; \
  56. PADDQ t0, v4; \
  57. PXOR v4, v2; \
  58. PSHUFB c40, v2; \
  59. MOVO v0, t0; \
  60. PMULULQ v2, t0; \
  61. PADDQ v2, v0; \
  62. PADDQ t0, v0; \
  63. PADDQ t0, v0; \
  64. PXOR v0, v6; \
  65. PSHUFB c48, v6; \
  66. MOVO v4, t0; \
  67. PMULULQ v6, t0; \
  68. PADDQ v6, v4; \
  69. PADDQ t0, v4; \
  70. PADDQ t0, v4; \
  71. PXOR v4, v2; \
  72. MOVO v2, t0; \
  73. PADDQ v2, t0; \
  74. PSRLQ $63, v2; \
  75. PXOR t0, v2; \
  76. MOVO v1, t0; \
  77. PMULULQ v3, t0; \
  78. PADDQ v3, v1; \
  79. PADDQ t0, v1; \
  80. PADDQ t0, v1; \
  81. PXOR v1, v7; \
  82. PSHUFD $0xB1, v7, v7; \
  83. MOVO v5, t0; \
  84. PMULULQ v7, t0; \
  85. PADDQ v7, v5; \
  86. PADDQ t0, v5; \
  87. PADDQ t0, v5; \
  88. PXOR v5, v3; \
  89. PSHUFB c40, v3; \
  90. MOVO v1, t0; \
  91. PMULULQ v3, t0; \
  92. PADDQ v3, v1; \
  93. PADDQ t0, v1; \
  94. PADDQ t0, v1; \
  95. PXOR v1, v7; \
  96. PSHUFB c48, v7; \
  97. MOVO v5, t0; \
  98. PMULULQ v7, t0; \
  99. PADDQ v7, v5; \
  100. PADDQ t0, v5; \
  101. PADDQ t0, v5; \
  102. PXOR v5, v3; \
  103. MOVO v3, t0; \
  104. PADDQ v3, t0; \
  105. PSRLQ $63, v3; \
  106. PXOR t0, v3
  107. #define LOAD_MSG_0(block, off) \
  108. MOVOU 8*(off+0)(block), X0; \
  109. MOVOU 8*(off+2)(block), X1; \
  110. MOVOU 8*(off+4)(block), X2; \
  111. MOVOU 8*(off+6)(block), X3; \
  112. MOVOU 8*(off+8)(block), X4; \
  113. MOVOU 8*(off+10)(block), X5; \
  114. MOVOU 8*(off+12)(block), X6; \
  115. MOVOU 8*(off+14)(block), X7
  116. #define STORE_MSG_0(block, off) \
  117. MOVOU X0, 8*(off+0)(block); \
  118. MOVOU X1, 8*(off+2)(block); \
  119. MOVOU X2, 8*(off+4)(block); \
  120. MOVOU X3, 8*(off+6)(block); \
  121. MOVOU X4, 8*(off+8)(block); \
  122. MOVOU X5, 8*(off+10)(block); \
  123. MOVOU X6, 8*(off+12)(block); \
  124. MOVOU X7, 8*(off+14)(block)
  125. #define LOAD_MSG_1(block, off) \
  126. MOVOU 8*off+0*8(block), X0; \
  127. MOVOU 8*off+16*8(block), X1; \
  128. MOVOU 8*off+32*8(block), X2; \
  129. MOVOU 8*off+48*8(block), X3; \
  130. MOVOU 8*off+64*8(block), X4; \
  131. MOVOU 8*off+80*8(block), X5; \
  132. MOVOU 8*off+96*8(block), X6; \
  133. MOVOU 8*off+112*8(block), X7
  134. #define STORE_MSG_1(block, off) \
  135. MOVOU X0, 8*off+0*8(block); \
  136. MOVOU X1, 8*off+16*8(block); \
  137. MOVOU X2, 8*off+32*8(block); \
  138. MOVOU X3, 8*off+48*8(block); \
  139. MOVOU X4, 8*off+64*8(block); \
  140. MOVOU X5, 8*off+80*8(block); \
  141. MOVOU X6, 8*off+96*8(block); \
  142. MOVOU X7, 8*off+112*8(block)
  143. #define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
  144. LOAD_MSG_0(block, off); \
  145. HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
  146. SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
  147. HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
  148. SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
  149. STORE_MSG_0(block, off)
  150. #define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
  151. LOAD_MSG_1(block, off); \
  152. HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
  153. SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
  154. HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
  155. SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
  156. STORE_MSG_1(block, off)
  157. // func blamkaSSE4(b *block)
  158. TEXT ·blamkaSSE4(SB), 4, $0-8
  159. MOVQ b+0(FP), AX
  160. MOVOU ·c40<>(SB), X10
  161. MOVOU ·c48<>(SB), X11
  162. BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
  163. BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
  164. BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
  165. BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
  166. BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
  167. BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
  168. BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
  169. BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
  170. BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
  171. BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
  172. BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
  173. BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
  174. BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
  175. BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
  176. BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
  177. BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
  178. RET
  179. // func mixBlocksSSE2(out, a, b, c *block)
  180. TEXT ·mixBlocksSSE2(SB), 4, $0-32
  181. MOVQ out+0(FP), DX
  182. MOVQ a+8(FP), AX
  183. MOVQ b+16(FP), BX
  184. MOVQ a+24(FP), CX
  185. MOVQ $128, BP
  186. loop:
  187. MOVOU 0(AX), X0
  188. MOVOU 0(BX), X1
  189. MOVOU 0(CX), X2
  190. PXOR X1, X0
  191. PXOR X2, X0
  192. MOVOU X0, 0(DX)
  193. ADDQ $16, AX
  194. ADDQ $16, BX
  195. ADDQ $16, CX
  196. ADDQ $16, DX
  197. SUBQ $2, BP
  198. JA loop
  199. RET
  200. // func xorBlocksSSE2(out, a, b, c *block)
  201. TEXT ·xorBlocksSSE2(SB), 4, $0-32
  202. MOVQ out+0(FP), DX
  203. MOVQ a+8(FP), AX
  204. MOVQ b+16(FP), BX
  205. MOVQ a+24(FP), CX
  206. MOVQ $128, BP
  207. loop:
  208. MOVOU 0(AX), X0
  209. MOVOU 0(BX), X1
  210. MOVOU 0(CX), X2
  211. MOVOU 0(DX), X3
  212. PXOR X1, X0
  213. PXOR X2, X0
  214. PXOR X3, X0
  215. MOVOU X0, 0(DX)
  216. ADDQ $16, AX
  217. ADDQ $16, BX
  218. ADDQ $16, CX
  219. ADDQ $16, DX
  220. SUBQ $2, BP
  221. JA loop
  222. RET