本站源代码
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

751 lines
24KB

  1. // Copyright 2016 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build go1.7,amd64,!gccgo,!appengine
  5. #include "textflag.h"
  6. DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
  7. DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
  8. DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
  9. DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
  10. GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
  11. DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
  12. DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
  13. DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
  14. DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
  15. GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
  16. DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
  17. DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
  18. DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
  19. DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
  20. GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
  21. DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
  22. DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
  23. DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
  24. DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
  25. GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
  26. DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
  27. DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
  28. GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
  29. DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
  30. DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
  31. GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
  32. DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
  33. DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
  34. GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
  35. DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
  36. DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
  37. GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
  38. DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
  39. DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
  40. GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
  41. DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
  42. DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
  43. GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
  44. #define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
  45. #define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
  46. #define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
  47. #define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
  48. #define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
  49. #define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
  50. VPADDQ m0, Y0, Y0; \
  51. VPADDQ Y1, Y0, Y0; \
  52. VPXOR Y0, Y3, Y3; \
  53. VPSHUFD $-79, Y3, Y3; \
  54. VPADDQ Y3, Y2, Y2; \
  55. VPXOR Y2, Y1, Y1; \
  56. VPSHUFB c40, Y1, Y1; \
  57. VPADDQ m1, Y0, Y0; \
  58. VPADDQ Y1, Y0, Y0; \
  59. VPXOR Y0, Y3, Y3; \
  60. VPSHUFB c48, Y3, Y3; \
  61. VPADDQ Y3, Y2, Y2; \
  62. VPXOR Y2, Y1, Y1; \
  63. VPADDQ Y1, Y1, t; \
  64. VPSRLQ $63, Y1, Y1; \
  65. VPXOR t, Y1, Y1; \
  66. VPERMQ_0x39_Y1_Y1; \
  67. VPERMQ_0x4E_Y2_Y2; \
  68. VPERMQ_0x93_Y3_Y3; \
  69. VPADDQ m2, Y0, Y0; \
  70. VPADDQ Y1, Y0, Y0; \
  71. VPXOR Y0, Y3, Y3; \
  72. VPSHUFD $-79, Y3, Y3; \
  73. VPADDQ Y3, Y2, Y2; \
  74. VPXOR Y2, Y1, Y1; \
  75. VPSHUFB c40, Y1, Y1; \
  76. VPADDQ m3, Y0, Y0; \
  77. VPADDQ Y1, Y0, Y0; \
  78. VPXOR Y0, Y3, Y3; \
  79. VPSHUFB c48, Y3, Y3; \
  80. VPADDQ Y3, Y2, Y2; \
  81. VPXOR Y2, Y1, Y1; \
  82. VPADDQ Y1, Y1, t; \
  83. VPSRLQ $63, Y1, Y1; \
  84. VPXOR t, Y1, Y1; \
  85. VPERMQ_0x39_Y3_Y3; \
  86. VPERMQ_0x4E_Y2_Y2; \
  87. VPERMQ_0x93_Y1_Y1
  88. #define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
  89. #define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
  90. #define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
  91. #define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
  92. #define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
  93. #define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
  94. #define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
  95. #define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
  96. #define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
  97. #define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
  98. #define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
  99. #define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
  100. #define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
  101. #define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
  102. #define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
  103. #define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
  104. #define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
  105. #define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
  106. #define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
  107. #define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
  108. #define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
  109. #define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
  110. // load msg: Y12 = (i0, i1, i2, i3)
  111. // i0, i1, i2, i3 must not be 0
  112. #define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
  113. VMOVQ_SI_X12(i0*8); \
  114. VMOVQ_SI_X11(i2*8); \
  115. VPINSRQ_1_SI_X12(i1*8); \
  116. VPINSRQ_1_SI_X11(i3*8); \
  117. VINSERTI128 $1, X11, Y12, Y12
  118. // load msg: Y13 = (i0, i1, i2, i3)
  119. // i0, i1, i2, i3 must not be 0
  120. #define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
  121. VMOVQ_SI_X13(i0*8); \
  122. VMOVQ_SI_X11(i2*8); \
  123. VPINSRQ_1_SI_X13(i1*8); \
  124. VPINSRQ_1_SI_X11(i3*8); \
  125. VINSERTI128 $1, X11, Y13, Y13
  126. // load msg: Y14 = (i0, i1, i2, i3)
  127. // i0, i1, i2, i3 must not be 0
  128. #define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
  129. VMOVQ_SI_X14(i0*8); \
  130. VMOVQ_SI_X11(i2*8); \
  131. VPINSRQ_1_SI_X14(i1*8); \
  132. VPINSRQ_1_SI_X11(i3*8); \
  133. VINSERTI128 $1, X11, Y14, Y14
  134. // load msg: Y15 = (i0, i1, i2, i3)
  135. // i0, i1, i2, i3 must not be 0
  136. #define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
  137. VMOVQ_SI_X15(i0*8); \
  138. VMOVQ_SI_X11(i2*8); \
  139. VPINSRQ_1_SI_X15(i1*8); \
  140. VPINSRQ_1_SI_X11(i3*8); \
  141. VINSERTI128 $1, X11, Y15, Y15
  142. #define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
  143. VMOVQ_SI_X12_0; \
  144. VMOVQ_SI_X11(4*8); \
  145. VPINSRQ_1_SI_X12(2*8); \
  146. VPINSRQ_1_SI_X11(6*8); \
  147. VINSERTI128 $1, X11, Y12, Y12; \
  148. LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
  149. LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
  150. LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
  151. #define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
  152. LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
  153. LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
  154. VMOVQ_SI_X11(11*8); \
  155. VPSHUFD $0x4E, 0*8(SI), X14; \
  156. VPINSRQ_1_SI_X11(5*8); \
  157. VINSERTI128 $1, X11, Y14, Y14; \
  158. LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
  159. #define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
  160. VMOVQ_SI_X11(5*8); \
  161. VMOVDQU 11*8(SI), X12; \
  162. VPINSRQ_1_SI_X11(15*8); \
  163. VINSERTI128 $1, X11, Y12, Y12; \
  164. VMOVQ_SI_X13(8*8); \
  165. VMOVQ_SI_X11(2*8); \
  166. VPINSRQ_1_SI_X13_0; \
  167. VPINSRQ_1_SI_X11(13*8); \
  168. VINSERTI128 $1, X11, Y13, Y13; \
  169. LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
  170. LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
  171. #define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
  172. LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
  173. LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
  174. LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
  175. VMOVQ_SI_X15(6*8); \
  176. VMOVQ_SI_X11_0; \
  177. VPINSRQ_1_SI_X15(10*8); \
  178. VPINSRQ_1_SI_X11(8*8); \
  179. VINSERTI128 $1, X11, Y15, Y15
  180. #define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
  181. LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
  182. VMOVQ_SI_X13_0; \
  183. VMOVQ_SI_X11(4*8); \
  184. VPINSRQ_1_SI_X13(7*8); \
  185. VPINSRQ_1_SI_X11(15*8); \
  186. VINSERTI128 $1, X11, Y13, Y13; \
  187. LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
  188. LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
  189. #define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
  190. VMOVQ_SI_X12(2*8); \
  191. VMOVQ_SI_X11_0; \
  192. VPINSRQ_1_SI_X12(6*8); \
  193. VPINSRQ_1_SI_X11(8*8); \
  194. VINSERTI128 $1, X11, Y12, Y12; \
  195. LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
  196. LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
  197. LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
  198. #define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
  199. LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
  200. LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
  201. VMOVQ_SI_X14_0; \
  202. VPSHUFD $0x4E, 8*8(SI), X11; \
  203. VPINSRQ_1_SI_X14(6*8); \
  204. VINSERTI128 $1, X11, Y14, Y14; \
  205. LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
  206. #define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
  207. LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
  208. LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
  209. LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
  210. VMOVQ_SI_X15_0; \
  211. VMOVQ_SI_X11(6*8); \
  212. VPINSRQ_1_SI_X15(4*8); \
  213. VPINSRQ_1_SI_X11(10*8); \
  214. VINSERTI128 $1, X11, Y15, Y15
  215. #define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
  216. VMOVQ_SI_X12(6*8); \
  217. VMOVQ_SI_X11(11*8); \
  218. VPINSRQ_1_SI_X12(14*8); \
  219. VPINSRQ_1_SI_X11_0; \
  220. VINSERTI128 $1, X11, Y12, Y12; \
  221. LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
  222. VMOVQ_SI_X11(1*8); \
  223. VMOVDQU 12*8(SI), X14; \
  224. VPINSRQ_1_SI_X11(10*8); \
  225. VINSERTI128 $1, X11, Y14, Y14; \
  226. VMOVQ_SI_X15(2*8); \
  227. VMOVDQU 4*8(SI), X11; \
  228. VPINSRQ_1_SI_X15(7*8); \
  229. VINSERTI128 $1, X11, Y15, Y15
  230. #define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
  231. LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
  232. VMOVQ_SI_X13(2*8); \
  233. VPSHUFD $0x4E, 5*8(SI), X11; \
  234. VPINSRQ_1_SI_X13(4*8); \
  235. VINSERTI128 $1, X11, Y13, Y13; \
  236. LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
  237. VMOVQ_SI_X15(11*8); \
  238. VMOVQ_SI_X11(12*8); \
  239. VPINSRQ_1_SI_X15(14*8); \
  240. VPINSRQ_1_SI_X11_0; \
  241. VINSERTI128 $1, X11, Y15, Y15
  242. // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
  243. TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
  244. MOVQ h+0(FP), AX
  245. MOVQ c+8(FP), BX
  246. MOVQ flag+16(FP), CX
  247. MOVQ blocks_base+24(FP), SI
  248. MOVQ blocks_len+32(FP), DI
  249. MOVQ SP, DX
  250. MOVQ SP, R9
  251. ADDQ $31, R9
  252. ANDQ $~31, R9
  253. MOVQ R9, SP
  254. MOVQ CX, 16(SP)
  255. XORQ CX, CX
  256. MOVQ CX, 24(SP)
  257. VMOVDQU ·AVX2_c40<>(SB), Y4
  258. VMOVDQU ·AVX2_c48<>(SB), Y5
  259. VMOVDQU 0(AX), Y8
  260. VMOVDQU 32(AX), Y9
  261. VMOVDQU ·AVX2_iv0<>(SB), Y6
  262. VMOVDQU ·AVX2_iv1<>(SB), Y7
  263. MOVQ 0(BX), R8
  264. MOVQ 8(BX), R9
  265. MOVQ R9, 8(SP)
  266. loop:
  267. ADDQ $128, R8
  268. MOVQ R8, 0(SP)
  269. CMPQ R8, $128
  270. JGE noinc
  271. INCQ R9
  272. MOVQ R9, 8(SP)
  273. noinc:
  274. VMOVDQA Y8, Y0
  275. VMOVDQA Y9, Y1
  276. VMOVDQA Y6, Y2
  277. VPXOR 0(SP), Y7, Y3
  278. LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
  279. VMOVDQA Y12, 32(SP)
  280. VMOVDQA Y13, 64(SP)
  281. VMOVDQA Y14, 96(SP)
  282. VMOVDQA Y15, 128(SP)
  283. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  284. LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
  285. VMOVDQA Y12, 160(SP)
  286. VMOVDQA Y13, 192(SP)
  287. VMOVDQA Y14, 224(SP)
  288. VMOVDQA Y15, 256(SP)
  289. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  290. LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
  291. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  292. LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
  293. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  294. LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
  295. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  296. LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
  297. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  298. LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
  299. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  300. LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
  301. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  302. LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
  303. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  304. LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
  305. ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
  306. ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
  307. ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
  308. VPXOR Y0, Y8, Y8
  309. VPXOR Y1, Y9, Y9
  310. VPXOR Y2, Y8, Y8
  311. VPXOR Y3, Y9, Y9
  312. LEAQ 128(SI), SI
  313. SUBQ $128, DI
  314. JNE loop
  315. MOVQ R8, 0(BX)
  316. MOVQ R9, 8(BX)
  317. VMOVDQU Y8, 0(AX)
  318. VMOVDQU Y9, 32(AX)
  319. VZEROUPPER
  320. MOVQ DX, SP
  321. RET
  322. #define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
  323. #define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
  324. #define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
  325. #define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
  326. #define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
  327. #define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
  328. #define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
  329. #define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
  330. #define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
  331. #define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
  332. #define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
  333. #define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
  334. #define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
  335. #define SHUFFLE_AVX() \
  336. VMOVDQA X6, X13; \
  337. VMOVDQA X2, X14; \
  338. VMOVDQA X4, X6; \
  339. VPUNPCKLQDQ_X13_X13_X15; \
  340. VMOVDQA X5, X4; \
  341. VMOVDQA X6, X5; \
  342. VPUNPCKHQDQ_X15_X7_X6; \
  343. VPUNPCKLQDQ_X7_X7_X15; \
  344. VPUNPCKHQDQ_X15_X13_X7; \
  345. VPUNPCKLQDQ_X3_X3_X15; \
  346. VPUNPCKHQDQ_X15_X2_X2; \
  347. VPUNPCKLQDQ_X14_X14_X15; \
  348. VPUNPCKHQDQ_X15_X3_X3; \
  349. #define SHUFFLE_AVX_INV() \
  350. VMOVDQA X2, X13; \
  351. VMOVDQA X4, X14; \
  352. VPUNPCKLQDQ_X2_X2_X15; \
  353. VMOVDQA X5, X4; \
  354. VPUNPCKHQDQ_X15_X3_X2; \
  355. VMOVDQA X14, X5; \
  356. VPUNPCKLQDQ_X3_X3_X15; \
  357. VMOVDQA X6, X14; \
  358. VPUNPCKHQDQ_X15_X13_X3; \
  359. VPUNPCKLQDQ_X7_X7_X15; \
  360. VPUNPCKHQDQ_X15_X6_X6; \
  361. VPUNPCKLQDQ_X14_X14_X15; \
  362. VPUNPCKHQDQ_X15_X7_X7; \
  363. #define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
  364. VPADDQ m0, v0, v0; \
  365. VPADDQ v2, v0, v0; \
  366. VPADDQ m1, v1, v1; \
  367. VPADDQ v3, v1, v1; \
  368. VPXOR v0, v6, v6; \
  369. VPXOR v1, v7, v7; \
  370. VPSHUFD $-79, v6, v6; \
  371. VPSHUFD $-79, v7, v7; \
  372. VPADDQ v6, v4, v4; \
  373. VPADDQ v7, v5, v5; \
  374. VPXOR v4, v2, v2; \
  375. VPXOR v5, v3, v3; \
  376. VPSHUFB c40, v2, v2; \
  377. VPSHUFB c40, v3, v3; \
  378. VPADDQ m2, v0, v0; \
  379. VPADDQ v2, v0, v0; \
  380. VPADDQ m3, v1, v1; \
  381. VPADDQ v3, v1, v1; \
  382. VPXOR v0, v6, v6; \
  383. VPXOR v1, v7, v7; \
  384. VPSHUFB c48, v6, v6; \
  385. VPSHUFB c48, v7, v7; \
  386. VPADDQ v6, v4, v4; \
  387. VPADDQ v7, v5, v5; \
  388. VPXOR v4, v2, v2; \
  389. VPXOR v5, v3, v3; \
  390. VPADDQ v2, v2, t0; \
  391. VPSRLQ $63, v2, v2; \
  392. VPXOR t0, v2, v2; \
  393. VPADDQ v3, v3, t0; \
  394. VPSRLQ $63, v3, v3; \
  395. VPXOR t0, v3, v3
  396. // load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
  397. // i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
  398. #define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
  399. VMOVQ_SI_X12(i0*8); \
  400. VMOVQ_SI_X13(i2*8); \
  401. VMOVQ_SI_X14(i4*8); \
  402. VMOVQ_SI_X15(i6*8); \
  403. VPINSRQ_1_SI_X12(i1*8); \
  404. VPINSRQ_1_SI_X13(i3*8); \
  405. VPINSRQ_1_SI_X14(i5*8); \
  406. VPINSRQ_1_SI_X15(i7*8)
  407. // load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
  408. #define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
  409. VMOVQ_SI_X12_0; \
  410. VMOVQ_SI_X13(4*8); \
  411. VMOVQ_SI_X14(1*8); \
  412. VMOVQ_SI_X15(5*8); \
  413. VPINSRQ_1_SI_X12(2*8); \
  414. VPINSRQ_1_SI_X13(6*8); \
  415. VPINSRQ_1_SI_X14(3*8); \
  416. VPINSRQ_1_SI_X15(7*8)
  417. // load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
  418. #define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
  419. VPSHUFD $0x4E, 0*8(SI), X12; \
  420. VMOVQ_SI_X13(11*8); \
  421. VMOVQ_SI_X14(12*8); \
  422. VMOVQ_SI_X15(7*8); \
  423. VPINSRQ_1_SI_X13(5*8); \
  424. VPINSRQ_1_SI_X14(2*8); \
  425. VPINSRQ_1_SI_X15(3*8)
  426. // load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
  427. #define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
  428. VMOVDQU 11*8(SI), X12; \
  429. VMOVQ_SI_X13(5*8); \
  430. VMOVQ_SI_X14(8*8); \
  431. VMOVQ_SI_X15(2*8); \
  432. VPINSRQ_1_SI_X13(15*8); \
  433. VPINSRQ_1_SI_X14_0; \
  434. VPINSRQ_1_SI_X15(13*8)
  435. // load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
  436. #define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
  437. VMOVQ_SI_X12(2*8); \
  438. VMOVQ_SI_X13(4*8); \
  439. VMOVQ_SI_X14(6*8); \
  440. VMOVQ_SI_X15_0; \
  441. VPINSRQ_1_SI_X12(5*8); \
  442. VPINSRQ_1_SI_X13(15*8); \
  443. VPINSRQ_1_SI_X14(10*8); \
  444. VPINSRQ_1_SI_X15(8*8)
  445. // load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
  446. #define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
  447. VMOVQ_SI_X12(9*8); \
  448. VMOVQ_SI_X13(2*8); \
  449. VMOVQ_SI_X14_0; \
  450. VMOVQ_SI_X15(4*8); \
  451. VPINSRQ_1_SI_X12(5*8); \
  452. VPINSRQ_1_SI_X13(10*8); \
  453. VPINSRQ_1_SI_X14(7*8); \
  454. VPINSRQ_1_SI_X15(15*8)
  455. // load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
  456. #define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
  457. VMOVQ_SI_X12(2*8); \
  458. VMOVQ_SI_X13_0; \
  459. VMOVQ_SI_X14(12*8); \
  460. VMOVQ_SI_X15(11*8); \
  461. VPINSRQ_1_SI_X12(6*8); \
  462. VPINSRQ_1_SI_X13(8*8); \
  463. VPINSRQ_1_SI_X14(10*8); \
  464. VPINSRQ_1_SI_X15(3*8)
  465. // load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
  466. #define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
  467. MOVQ 0*8(SI), X12; \
  468. VPSHUFD $0x4E, 8*8(SI), X13; \
  469. MOVQ 7*8(SI), X14; \
  470. MOVQ 2*8(SI), X15; \
  471. VPINSRQ_1_SI_X12(6*8); \
  472. VPINSRQ_1_SI_X14(3*8); \
  473. VPINSRQ_1_SI_X15(11*8)
  474. // load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
  475. #define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
  476. MOVQ 6*8(SI), X12; \
  477. MOVQ 11*8(SI), X13; \
  478. MOVQ 15*8(SI), X14; \
  479. MOVQ 3*8(SI), X15; \
  480. VPINSRQ_1_SI_X12(14*8); \
  481. VPINSRQ_1_SI_X13_0; \
  482. VPINSRQ_1_SI_X14(9*8); \
  483. VPINSRQ_1_SI_X15(8*8)
  484. // load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
  485. #define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
  486. MOVQ 5*8(SI), X12; \
  487. MOVQ 8*8(SI), X13; \
  488. MOVQ 0*8(SI), X14; \
  489. MOVQ 6*8(SI), X15; \
  490. VPINSRQ_1_SI_X12(15*8); \
  491. VPINSRQ_1_SI_X13(2*8); \
  492. VPINSRQ_1_SI_X14(4*8); \
  493. VPINSRQ_1_SI_X15(10*8)
  494. // load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
  495. #define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
  496. VMOVDQU 12*8(SI), X12; \
  497. MOVQ 1*8(SI), X13; \
  498. MOVQ 2*8(SI), X14; \
  499. VPINSRQ_1_SI_X13(10*8); \
  500. VPINSRQ_1_SI_X14(7*8); \
  501. VMOVDQU 4*8(SI), X15
  502. // load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
  503. #define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
  504. MOVQ 15*8(SI), X12; \
  505. MOVQ 3*8(SI), X13; \
  506. MOVQ 11*8(SI), X14; \
  507. MOVQ 12*8(SI), X15; \
  508. VPINSRQ_1_SI_X12(9*8); \
  509. VPINSRQ_1_SI_X13(13*8); \
  510. VPINSRQ_1_SI_X14(14*8); \
  511. VPINSRQ_1_SI_X15_0
  512. // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
  513. TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
  514. MOVQ h+0(FP), AX
  515. MOVQ c+8(FP), BX
  516. MOVQ flag+16(FP), CX
  517. MOVQ blocks_base+24(FP), SI
  518. MOVQ blocks_len+32(FP), DI
  519. MOVQ SP, BP
  520. MOVQ SP, R9
  521. ADDQ $15, R9
  522. ANDQ $~15, R9
  523. MOVQ R9, SP
  524. VMOVDQU ·AVX_c40<>(SB), X0
  525. VMOVDQU ·AVX_c48<>(SB), X1
  526. VMOVDQA X0, X8
  527. VMOVDQA X1, X9
  528. VMOVDQU ·AVX_iv3<>(SB), X0
  529. VMOVDQA X0, 0(SP)
  530. XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
  531. VMOVDQU 0(AX), X10
  532. VMOVDQU 16(AX), X11
  533. VMOVDQU 32(AX), X2
  534. VMOVDQU 48(AX), X3
  535. MOVQ 0(BX), R8
  536. MOVQ 8(BX), R9
  537. loop:
  538. ADDQ $128, R8
  539. CMPQ R8, $128
  540. JGE noinc
  541. INCQ R9
  542. noinc:
  543. VMOVQ_R8_X15
  544. VPINSRQ_1_R9_X15
  545. VMOVDQA X10, X0
  546. VMOVDQA X11, X1
  547. VMOVDQU ·AVX_iv0<>(SB), X4
  548. VMOVDQU ·AVX_iv1<>(SB), X5
  549. VMOVDQU ·AVX_iv2<>(SB), X6
  550. VPXOR X15, X6, X6
  551. VMOVDQA 0(SP), X7
  552. LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
  553. VMOVDQA X12, 16(SP)
  554. VMOVDQA X13, 32(SP)
  555. VMOVDQA X14, 48(SP)
  556. VMOVDQA X15, 64(SP)
  557. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  558. SHUFFLE_AVX()
  559. LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
  560. VMOVDQA X12, 80(SP)
  561. VMOVDQA X13, 96(SP)
  562. VMOVDQA X14, 112(SP)
  563. VMOVDQA X15, 128(SP)
  564. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  565. SHUFFLE_AVX_INV()
  566. LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
  567. VMOVDQA X12, 144(SP)
  568. VMOVDQA X13, 160(SP)
  569. VMOVDQA X14, 176(SP)
  570. VMOVDQA X15, 192(SP)
  571. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  572. SHUFFLE_AVX()
  573. LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
  574. VMOVDQA X12, 208(SP)
  575. VMOVDQA X13, 224(SP)
  576. VMOVDQA X14, 240(SP)
  577. VMOVDQA X15, 256(SP)
  578. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  579. SHUFFLE_AVX_INV()
  580. LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
  581. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  582. SHUFFLE_AVX()
  583. LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
  584. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  585. SHUFFLE_AVX_INV()
  586. LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
  587. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  588. SHUFFLE_AVX()
  589. LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
  590. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  591. SHUFFLE_AVX_INV()
  592. LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
  593. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  594. SHUFFLE_AVX()
  595. LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
  596. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  597. SHUFFLE_AVX_INV()
  598. LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
  599. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  600. SHUFFLE_AVX()
  601. LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
  602. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  603. SHUFFLE_AVX_INV()
  604. LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
  605. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  606. SHUFFLE_AVX()
  607. LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
  608. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  609. SHUFFLE_AVX_INV()
  610. LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
  611. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  612. SHUFFLE_AVX()
  613. LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
  614. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  615. SHUFFLE_AVX_INV()
  616. LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
  617. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  618. SHUFFLE_AVX()
  619. LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
  620. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  621. SHUFFLE_AVX_INV()
  622. LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
  623. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  624. SHUFFLE_AVX()
  625. LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
  626. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
  627. SHUFFLE_AVX_INV()
  628. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
  629. SHUFFLE_AVX()
  630. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
  631. SHUFFLE_AVX_INV()
  632. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
  633. SHUFFLE_AVX()
  634. HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
  635. SHUFFLE_AVX_INV()
  636. VMOVDQU 32(AX), X14
  637. VMOVDQU 48(AX), X15
  638. VPXOR X0, X10, X10
  639. VPXOR X1, X11, X11
  640. VPXOR X2, X14, X14
  641. VPXOR X3, X15, X15
  642. VPXOR X4, X10, X10
  643. VPXOR X5, X11, X11
  644. VPXOR X6, X14, X2
  645. VPXOR X7, X15, X3
  646. VMOVDQU X2, 32(AX)
  647. VMOVDQU X3, 48(AX)
  648. LEAQ 128(SI), SI
  649. SUBQ $128, DI
  650. JNE loop
  651. VMOVDQU X10, 0(AX)
  652. VMOVDQU X11, 16(AX)
  653. MOVQ R8, 0(BX)
  654. MOVQ R9, 8(BX)
  655. VZEROUPPER
  656. MOVQ BP, SP
  657. RET
上海开阖软件有限公司 沪ICP备12045867号-1