blake2bAVX2_amd64.s 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. // Copyright 2016 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build go1.7,amd64,!gccgo,!appengine
  5. #include "textflag.h"
  6. DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
  7. DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
  8. DATA ·AVX_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
  9. DATA ·AVX_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
  10. GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $32
  11. DATA ·AVX_iv1<>+0x00(SB)/8, $0x510e527fade682d1
  12. DATA ·AVX_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
  13. DATA ·AVX_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
  14. DATA ·AVX_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
  15. GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $32
  16. DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
  17. DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
  18. DATA ·AVX_c40<>+0x10(SB)/8, $0x0201000706050403
  19. DATA ·AVX_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
  20. GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $32
  21. DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
  22. DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
  23. DATA ·AVX_c48<>+0x10(SB)/8, $0x0100070605040302
  24. DATA ·AVX_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
  25. GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $32
  26. // unfortunately the BYTE representation of VPERMQ must be used
  27. #define ROUND(m0, m1, m2, m3, t, c40, c48) \
  28. VPADDQ m0, Y0, Y0; \
  29. VPADDQ Y1, Y0, Y0; \
  30. VPXOR Y0, Y3, Y3; \
  31. VPSHUFD $-79, Y3, Y3; \
  32. VPADDQ Y3, Y2, Y2; \
  33. VPXOR Y2, Y1, Y1; \
  34. VPSHUFB c40, Y1, Y1; \
  35. VPADDQ m1, Y0, Y0; \
  36. VPADDQ Y1, Y0, Y0; \
  37. VPXOR Y0, Y3, Y3; \
  38. VPSHUFB c48, Y3, Y3; \
  39. VPADDQ Y3, Y2, Y2; \
  40. VPXOR Y2, Y1, Y1; \
  41. VPADDQ Y1, Y1, t; \
  42. VPSRLQ $63, Y1, Y1; \
  43. VPXOR t, Y1, Y1; \
  44. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 \ // VPERMQ 0x39, Y1, Y1
  45. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2
  46. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 \ // VPERMQ 0x93, Y3, Y3
  47. VPADDQ m2, Y0, Y0; \
  48. VPADDQ Y1, Y0, Y0; \
  49. VPXOR Y0, Y3, Y3; \
  50. VPSHUFD $-79, Y3, Y3; \
  51. VPADDQ Y3, Y2, Y2; \
  52. VPXOR Y2, Y1, Y1; \
  53. VPSHUFB c40, Y1, Y1; \
  54. VPADDQ m3, Y0, Y0; \
  55. VPADDQ Y1, Y0, Y0; \
  56. VPXOR Y0, Y3, Y3; \
  57. VPSHUFB c48, Y3, Y3; \
  58. VPADDQ Y3, Y2, Y2; \
  59. VPXOR Y2, Y1, Y1; \
  60. VPADDQ Y1, Y1, t; \
  61. VPSRLQ $63, Y1, Y1; \
  62. VPXOR t, Y1, Y1; \
  63. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 \ // VPERMQ 0x39, Y3, Y3
  64. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2
  65. BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 \ // VPERMQ 0x93, Y1, Y1
  66. // load msg into Y12, Y13, Y14, Y15
  67. #define LOAD_MSG(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \
  68. MOVQ i0*8(src), X12; \
  69. PINSRQ $1, i1*8(src), X12; \
  70. MOVQ i2*8(src), X11; \
  71. PINSRQ $1, i3*8(src), X11; \
  72. VINSERTI128 $1, X11, Y12, Y12; \
  73. MOVQ i4*8(src), X13; \
  74. PINSRQ $1, i5*8(src), X13; \
  75. MOVQ i6*8(src), X11; \
  76. PINSRQ $1, i7*8(src), X11; \
  77. VINSERTI128 $1, X11, Y13, Y13; \
  78. MOVQ i8*8(src), X14; \
  79. PINSRQ $1, i9*8(src), X14; \
  80. MOVQ i10*8(src), X11; \
  81. PINSRQ $1, i11*8(src), X11; \
  82. VINSERTI128 $1, X11, Y14, Y14; \
  83. MOVQ i12*8(src), X15; \
  84. PINSRQ $1, i13*8(src), X15; \
  85. MOVQ i14*8(src), X11; \
  86. PINSRQ $1, i15*8(src), X11; \
  87. VINSERTI128 $1, X11, Y15, Y15
  88. // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
  89. TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
  90. MOVQ h+0(FP), AX
  91. MOVQ c+8(FP), BX
  92. MOVQ flag+16(FP), CX
  93. MOVQ blocks_base+24(FP), SI
  94. MOVQ blocks_len+32(FP), DI
  95. MOVQ SP, DX
  96. MOVQ SP, R9
  97. ADDQ $31, R9
  98. ANDQ $~31, R9
  99. MOVQ R9, SP
  100. MOVQ CX, 16(SP)
  101. XORQ CX, CX
  102. MOVQ CX, 24(SP)
  103. VMOVDQU ·AVX_c40<>(SB), Y4
  104. VMOVDQU ·AVX_c48<>(SB), Y5
  105. VMOVDQU 0(AX), Y8
  106. VMOVDQU 32(AX), Y9
  107. VMOVDQU ·AVX_iv0<>(SB), Y6
  108. VMOVDQU ·AVX_iv1<>(SB), Y7
  109. MOVQ 0(BX), R8
  110. MOVQ 8(BX), R9
  111. MOVQ R9, 8(SP)
  112. loop:
  113. ADDQ $128, R8
  114. MOVQ R8, 0(SP)
  115. CMPQ R8, $128
  116. JGE noinc
  117. INCQ R9
  118. MOVQ R9, 8(SP)
  119. noinc:
  120. VMOVDQA Y8, Y0
  121. VMOVDQA Y9, Y1
  122. VMOVDQU Y6, Y2
  123. VPXOR 0(SP), Y7, Y3
  124. LOAD_MSG(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15)
  125. VMOVDQA Y12, 32(SP)
  126. VMOVDQA Y13, 64(SP)
  127. VMOVDQA Y14, 96(SP)
  128. VMOVDQA Y15, 128(SP)
  129. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  130. LOAD_MSG(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3)
  131. VMOVDQA Y12, 160(SP)
  132. VMOVDQA Y13, 192(SP)
  133. VMOVDQA Y14, 224(SP)
  134. VMOVDQA Y15, 256(SP)
  135. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  136. LOAD_MSG(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4)
  137. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  138. LOAD_MSG(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8)
  139. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  140. LOAD_MSG(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13)
  141. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  142. LOAD_MSG(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9)
  143. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  144. LOAD_MSG(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11)
  145. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  146. LOAD_MSG(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10)
  147. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  148. LOAD_MSG(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5)
  149. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  150. LOAD_MSG(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0)
  151. ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  152. ROUND(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
  153. ROUND(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
  154. VPXOR Y0, Y8, Y8
  155. VPXOR Y1, Y9, Y9
  156. VPXOR Y2, Y8, Y8
  157. VPXOR Y3, Y9, Y9
  158. LEAQ 128(SI), SI
  159. SUBQ $128, DI
  160. JNE loop
  161. MOVQ R8, 0(BX)
  162. MOVQ R9, 8(BX)
  163. VMOVDQU Y8, 0(AX)
  164. VMOVDQU Y9, 32(AX)
  165. MOVQ DX, SP
  166. RET
  167. // func supportAVX2() bool
  168. TEXT ·supportAVX2(SB), 4, $0-1
  169. MOVQ runtime·support_avx2(SB), AX
  170. MOVB AX, ret+0(FP)
  171. RET