jdsample-avx2.asm 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. ;
  2. ; jdsample.asm - upsampling (64-bit AVX2)
  3. ;
  4. ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
  5. ; Copyright (C) 2009, 2016, D. R. Commander.
  6. ; Copyright (C) 2015, Intel Corporation.
  7. ; Copyright (C) 2018, Matthias Räncker.
  8. ;
  9. ; Based on the x86 SIMD extension for IJG JPEG library
  10. ; Copyright (C) 1999-2006, MIYASAKA Masaru.
  11. ; For conditions of distribution and use, see copyright notice in jsimdext.inc
  12. ;
  13. ; This file should be assembled with NASM (Netwide Assembler),
  14. ; can *not* be assembled with Microsoft's MASM or any compatible
  15. ; assembler (including Borland's Turbo Assembler).
  16. ; NASM is available from http://nasm.sourceforge.net/ or
  17. ; http://sourceforge.net/project/showfiles.php?group_id=6208
  18. %include "jsimdext.inc"
  19. ; --------------------------------------------------------------------------
  20. SECTION SEG_CONST
  21. alignz 32
  22. GLOBAL_DATA(jconst_fancy_upsample_avx2)
  23. EXTN(jconst_fancy_upsample_avx2):
  24. PW_ONE times 16 dw 1
  25. PW_TWO times 16 dw 2
  26. PW_THREE times 16 dw 3
  27. PW_SEVEN times 16 dw 7
  28. PW_EIGHT times 16 dw 8
  29. alignz 32
  30. ; --------------------------------------------------------------------------
  31. SECTION SEG_TEXT
  32. BITS 64
  33. ;
  34. ; Fancy processing for the common case of 2:1 horizontal and 1:1 vertical.
  35. ;
  36. ; The upsampling algorithm is linear interpolation between pixel centers,
  37. ; also known as a "triangle filter". This is a good compromise between
  38. ; speed and visual quality. The centers of the output pixels are 1/4 and 3/4
  39. ; of the way between input pixel centers.
  40. ;
  41. ; GLOBAL(void)
  42. ; jsimd_h2v1_fancy_upsample_avx2(int max_v_samp_factor,
  43. ; JDIMENSION downsampled_width,
  44. ; JSAMPARRAY input_data,
  45. ; JSAMPARRAY *output_data_ptr);
  46. ;
  47. ; r10 = int max_v_samp_factor
  48. ; r11d = JDIMENSION downsampled_width
  49. ; r12 = JSAMPARRAY input_data
  50. ; r13 = JSAMPARRAY *output_data_ptr
  51. align 32
  52. GLOBAL_FUNCTION(jsimd_h2v1_fancy_upsample_avx2)
  53. EXTN(jsimd_h2v1_fancy_upsample_avx2):
  54. push rbp
  55. mov rax, rsp
  56. mov rbp, rsp
  57. push_xmm 3
  58. collect_args 4
  59. mov eax, r11d ; colctr
  60. test rax, rax
  61. jz near .return
  62. mov rcx, r10 ; rowctr
  63. test rcx, rcx
  64. jz near .return
  65. mov rsi, r12 ; input_data
  66. mov rdi, r13
  67. mov rdip, JSAMPARRAY [rdi] ; output_data
  68. vpxor ymm0, ymm0, ymm0 ; ymm0=(all 0's)
  69. vpcmpeqb xmm9, xmm9, xmm9
  70. vpsrldq xmm10, xmm9, (SIZEOF_XMMWORD-1) ; (ff -- -- -- ... -- --) LSB is ff
  71. vpslldq xmm9, xmm9, (SIZEOF_XMMWORD-1)
  72. vperm2i128 ymm9, ymm9, ymm9, 1 ; (---- ---- ... ---- ---- ff) MSB is ff
  73. .rowloop:
  74. push rax ; colctr
  75. push rdi
  76. push rsi
  77. mov rsip, JSAMPROW [rsi] ; inptr
  78. mov rdip, JSAMPROW [rdi] ; outptr
  79. test rax, SIZEOF_YMMWORD-1
  80. jz short .skip
  81. mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE]
  82. mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample
  83. .skip:
  84. vpand ymm7, ymm10, YMMWORD [rsi+0*SIZEOF_YMMWORD]
  85. add rax, byte SIZEOF_YMMWORD-1
  86. and rax, byte -SIZEOF_YMMWORD
  87. cmp rax, byte SIZEOF_YMMWORD
  88. ja short .columnloop
  89. .columnloop_last:
  90. vpand ymm6, ymm9, YMMWORD [rsi+0*SIZEOF_YMMWORD]
  91. jmp short .upsample
  92. .columnloop:
  93. vmovdqu ymm6, YMMWORD [rsi+1*SIZEOF_YMMWORD]
  94. vperm2i128 ymm6, ymm0, ymm6, 0x20
  95. vpslldq ymm6, ymm6, 15
  96. .upsample:
  97. vmovdqu ymm1, YMMWORD [rsi+0*SIZEOF_YMMWORD] ; ymm1=( 0 1 2 ... 29 30 31)
  98. vperm2i128 ymm2, ymm0, ymm1, 0x20
  99. vpalignr ymm2, ymm1, ymm2, 15 ; ymm2=(-- 0 1 ... 28 29 30)
  100. vperm2i128 ymm4, ymm0, ymm1, 0x03
  101. vpalignr ymm3, ymm4, ymm1, 1 ; ymm3=( 1 2 3 ... 30 31 --)
  102. vpor ymm2, ymm2, ymm7 ; ymm2=(-1 0 1 ... 28 29 30)
  103. vpor ymm3, ymm3, ymm6 ; ymm3=( 1 2 3 ... 30 31 32)
  104. vpsrldq ymm7, ymm4, (SIZEOF_XMMWORD-1) ; ymm7=(31 -- -- ... -- -- --)
  105. vpunpckhbw ymm4, ymm1, ymm0 ; ymm4=( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  106. vpunpcklbw ymm5, ymm1, ymm0 ; ymm5=( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  107. vperm2i128 ymm1, ymm5, ymm4, 0x20 ; ymm1=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  108. vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  109. vpunpckhbw ymm5, ymm2, ymm0 ; ymm5=( 7 8 9 10 11 12 13 14 23 24 25 26 27 28 29 30)
  110. vpunpcklbw ymm6, ymm2, ymm0 ; ymm6=(-1 0 1 2 3 4 5 6 15 16 17 18 19 20 21 22)
  111. vperm2i128 ymm2, ymm6, ymm5, 0x20 ; ymm2=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
  112. vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
  113. vpunpckhbw ymm6, ymm3, ymm0 ; ymm6=( 1 2 3 4 5 6 7 8 17 18 19 20 21 22 23 24)
  114. vpunpcklbw ymm8, ymm3, ymm0 ; ymm8=( 9 10 11 12 13 14 15 16 25 26 27 28 29 30 31 32)
  115. vperm2i128 ymm3, ymm8, ymm6, 0x20 ; ymm3=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
  116. vperm2i128 ymm6, ymm8, ymm6, 0x31 ; ymm6=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
  117. vpmullw ymm1, ymm1, [rel PW_THREE]
  118. vpmullw ymm4, ymm4, [rel PW_THREE]
  119. vpaddw ymm2, ymm2, [rel PW_ONE]
  120. vpaddw ymm5, ymm5, [rel PW_ONE]
  121. vpaddw ymm3, ymm3, [rel PW_TWO]
  122. vpaddw ymm6, ymm6, [rel PW_TWO]
  123. vpaddw ymm2, ymm2, ymm1
  124. vpaddw ymm5, ymm5, ymm4
  125. vpsrlw ymm2, ymm2, 2 ; ymm2=OutLE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
  126. vpsrlw ymm5, ymm5, 2 ; ymm5=OutHE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
  127. vpaddw ymm3, ymm3, ymm1
  128. vpaddw ymm6, ymm6, ymm4
  129. vpsrlw ymm3, ymm3, 2 ; ymm3=OutLO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
  130. vpsrlw ymm6, ymm6, 2 ; ymm6=OutHO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
  131. vpsllw ymm3, ymm3, BYTE_BIT
  132. vpsllw ymm6, ymm6, BYTE_BIT
  133. vpor ymm2, ymm2, ymm3 ; ymm2=OutL=( 0 1 2 ... 29 30 31)
  134. vpor ymm5, ymm5, ymm6 ; ymm5=OutH=(32 33 34 ... 61 62 63)
  135. vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm2
  136. vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm5
  137. sub rax, byte SIZEOF_YMMWORD
  138. add rsi, byte 1*SIZEOF_YMMWORD ; inptr
  139. add rdi, byte 2*SIZEOF_YMMWORD ; outptr
  140. cmp rax, byte SIZEOF_YMMWORD
  141. ja near .columnloop
  142. test eax, eax
  143. jnz near .columnloop_last
  144. pop rsi
  145. pop rdi
  146. pop rax
  147. add rsi, byte SIZEOF_JSAMPROW ; input_data
  148. add rdi, byte SIZEOF_JSAMPROW ; output_data
  149. dec rcx ; rowctr
  150. jg near .rowloop
  151. .return:
  152. vzeroupper
  153. uncollect_args 4
  154. pop_xmm 3
  155. pop rbp
  156. ret
  157. ; --------------------------------------------------------------------------
  158. ;
  159. ; Fancy processing for the common case of 2:1 horizontal and 2:1 vertical.
  160. ; Again a triangle filter; see comments for h2v1 case, above.
  161. ;
  162. ; GLOBAL(void)
  163. ; jsimd_h2v2_fancy_upsample_avx2(int max_v_samp_factor,
  164. ; JDIMENSION downsampled_width,
  165. ; JSAMPARRAY input_data,
  166. ; JSAMPARRAY *output_data_ptr);
  167. ;
  168. ; r10 = int max_v_samp_factor
  169. ; r11d = JDIMENSION downsampled_width
  170. ; r12 = JSAMPARRAY input_data
  171. ; r13 = JSAMPARRAY *output_data_ptr
  172. %define wk(i) rbp - (WK_NUM - (i)) * SIZEOF_YMMWORD ; ymmword wk[WK_NUM]
  173. %define WK_NUM 4
  174. align 32
  175. GLOBAL_FUNCTION(jsimd_h2v2_fancy_upsample_avx2)
  176. EXTN(jsimd_h2v2_fancy_upsample_avx2):
  177. push rbp
  178. mov rax, rsp ; rax = original rbp
  179. sub rsp, byte 4
  180. and rsp, byte (-SIZEOF_YMMWORD) ; align to 256 bits
  181. mov [rsp], rax
  182. mov rbp, rsp ; rbp = aligned rbp
  183. lea rsp, [wk(0)]
  184. push_xmm 3
  185. collect_args 4
  186. push rbx
  187. mov eax, r11d ; colctr
  188. test rax, rax
  189. jz near .return
  190. mov rcx, r10 ; rowctr
  191. test rcx, rcx
  192. jz near .return
  193. mov rsi, r12 ; input_data
  194. mov rdi, r13
  195. mov rdip, JSAMPARRAY [rdi] ; output_data
  196. .rowloop:
  197. push rax ; colctr
  198. push rcx
  199. push rdi
  200. push rsi
  201. mov rcxp, JSAMPROW [rsi-1*SIZEOF_JSAMPROW] ; inptr1(above)
  202. mov rbxp, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; inptr0
  203. mov rsip, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; inptr1(below)
  204. mov rdxp, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0
  205. mov rdip, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1
  206. vpxor ymm8, ymm8, ymm8 ; ymm8=(all 0's)
  207. vpcmpeqb xmm9, xmm9, xmm9
  208. vpsrldq xmm10, xmm9, (SIZEOF_XMMWORD-2) ; (ffff ---- ---- ... ---- ----) LSB is ffff
  209. vpslldq xmm9, xmm9, (SIZEOF_XMMWORD-2)
  210. vperm2i128 ymm9, ymm9, ymm9, 1 ; (---- ---- ... ---- ---- ffff) MSB is ffff
  211. test rax, SIZEOF_YMMWORD-1
  212. jz short .skip
  213. push rdx
  214. mov dl, JSAMPLE [rcx+(rax-1)*SIZEOF_JSAMPLE]
  215. mov JSAMPLE [rcx+rax*SIZEOF_JSAMPLE], dl
  216. mov dl, JSAMPLE [rbx+(rax-1)*SIZEOF_JSAMPLE]
  217. mov JSAMPLE [rbx+rax*SIZEOF_JSAMPLE], dl
  218. mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE]
  219. mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample
  220. pop rdx
  221. .skip:
  222. ; -- process the first column block
  223. vmovdqu ymm0, YMMWORD [rbx+0*SIZEOF_YMMWORD] ; ymm0=row[ 0][0]
  224. vmovdqu ymm1, YMMWORD [rcx+0*SIZEOF_YMMWORD] ; ymm1=row[-1][0]
  225. vmovdqu ymm2, YMMWORD [rsi+0*SIZEOF_YMMWORD] ; ymm2=row[+1][0]
  226. vpunpckhbw ymm4, ymm0, ymm8 ; ymm4=row[ 0]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  227. vpunpcklbw ymm5, ymm0, ymm8 ; ymm5=row[ 0]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  228. vperm2i128 ymm0, ymm5, ymm4, 0x20 ; ymm0=row[ 0]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  229. vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=row[ 0](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  230. vpunpckhbw ymm5, ymm1, ymm8 ; ymm5=row[-1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  231. vpunpcklbw ymm6, ymm1, ymm8 ; ymm6=row[-1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  232. vperm2i128 ymm1, ymm6, ymm5, 0x20 ; ymm1=row[-1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  233. vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=row[-1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  234. vpunpckhbw ymm6, ymm2, ymm8 ; ymm6=row[+1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  235. vpunpcklbw ymm3, ymm2, ymm8 ; ymm3=row[+1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  236. vperm2i128 ymm2, ymm3, ymm6, 0x20 ; ymm2=row[+1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  237. vperm2i128 ymm6, ymm3, ymm6, 0x31 ; ymm6=row[+1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  238. vpmullw ymm0, ymm0, [rel PW_THREE]
  239. vpmullw ymm4, ymm4, [rel PW_THREE]
  240. vpaddw ymm1, ymm1, ymm0 ; ymm1=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  241. vpaddw ymm5, ymm5, ymm4 ; ymm5=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  242. vpaddw ymm2, ymm2, ymm0 ; ymm2=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  243. vpaddw ymm6, ymm6, ymm4 ; ymm6=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  244. vmovdqu YMMWORD [rdx+0*SIZEOF_YMMWORD], ymm1 ; temporarily save
  245. vmovdqu YMMWORD [rdx+1*SIZEOF_YMMWORD], ymm5 ; the intermediate data
  246. vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm2
  247. vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm6
  248. vpand ymm1, ymm1, ymm10 ; ymm1=( 0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  249. vpand ymm2, ymm2, ymm10 ; ymm2=( 0 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  250. vmovdqa YMMWORD [wk(0)], ymm1
  251. vmovdqa YMMWORD [wk(1)], ymm2
  252. add rax, byte SIZEOF_YMMWORD-1
  253. and rax, byte -SIZEOF_YMMWORD
  254. cmp rax, byte SIZEOF_YMMWORD
  255. ja short .columnloop
  256. .columnloop_last:
  257. ; -- process the last column block
  258. vpand ymm1, ymm9, YMMWORD [rdx+1*SIZEOF_YMMWORD]
  259. vpand ymm2, ymm9, YMMWORD [rdi+1*SIZEOF_YMMWORD]
  260. vmovdqa YMMWORD [wk(2)], ymm1 ; ymm1=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 31)
  261. vmovdqa YMMWORD [wk(3)], ymm2 ; ymm2=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 31)
  262. jmp near .upsample
  263. .columnloop:
  264. ; -- process the next column block
  265. vmovdqu ymm0, YMMWORD [rbx+1*SIZEOF_YMMWORD] ; ymm0=row[ 0][1]
  266. vmovdqu ymm1, YMMWORD [rcx+1*SIZEOF_YMMWORD] ; ymm1=row[-1][1]
  267. vmovdqu ymm2, YMMWORD [rsi+1*SIZEOF_YMMWORD] ; ymm2=row[+1][1]
  268. vpunpckhbw ymm4, ymm0, ymm8 ; ymm4=row[ 0]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  269. vpunpcklbw ymm5, ymm0, ymm8 ; ymm5=row[ 0]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  270. vperm2i128 ymm0, ymm5, ymm4, 0x20 ; ymm0=row[ 0]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  271. vperm2i128 ymm4, ymm5, ymm4, 0x31 ; ymm4=row[ 0](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  272. vpunpckhbw ymm5, ymm1, ymm8 ; ymm5=row[-1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  273. vpunpcklbw ymm6, ymm1, ymm8 ; ymm6=row[-1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  274. vperm2i128 ymm1, ymm6, ymm5, 0x20 ; ymm1=row[-1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  275. vperm2i128 ymm5, ymm6, ymm5, 0x31 ; ymm5=row[-1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  276. vpunpckhbw ymm6, ymm2, ymm8 ; ymm6=row[+1]( 8 9 10 11 12 13 14 15 24 25 26 27 28 29 30 31)
  277. vpunpcklbw ymm7, ymm2, ymm8 ; ymm7=row[+1]( 0 1 2 3 4 5 6 7 16 17 18 19 20 21 22 23)
  278. vperm2i128 ymm2, ymm7, ymm6, 0x20 ; ymm2=row[+1]( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  279. vperm2i128 ymm6, ymm7, ymm6, 0x31 ; ymm6=row[+1](16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  280. vpmullw ymm0, ymm0, [rel PW_THREE]
  281. vpmullw ymm4, ymm4, [rel PW_THREE]
  282. vpaddw ymm1, ymm1, ymm0 ; ymm1=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  283. vpaddw ymm5, ymm5, ymm4 ; ymm5=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  284. vpaddw ymm2, ymm2, ymm0 ; ymm2=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  285. vpaddw ymm6, ymm6, ymm4 ; ymm6=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  286. vmovdqu YMMWORD [rdx+2*SIZEOF_YMMWORD], ymm1 ; temporarily save
  287. vmovdqu YMMWORD [rdx+3*SIZEOF_YMMWORD], ymm5 ; the intermediate data
  288. vmovdqu YMMWORD [rdi+2*SIZEOF_YMMWORD], ymm2
  289. vmovdqu YMMWORD [rdi+3*SIZEOF_YMMWORD], ymm6
  290. vperm2i128 ymm1, ymm8, ymm1, 0x20
  291. vpslldq ymm1, ymm1, 14 ; ymm1=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 0)
  292. vperm2i128 ymm2, ymm8, ymm2, 0x20
  293. vpslldq ymm2, ymm2, 14 ; ymm2=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 0)
  294. vmovdqa YMMWORD [wk(2)], ymm1
  295. vmovdqa YMMWORD [wk(3)], ymm2
  296. .upsample:
  297. ; -- process the upper row
  298. vmovdqu ymm7, YMMWORD [rdx+0*SIZEOF_YMMWORD] ; ymm7=Int0L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  299. vmovdqu ymm3, YMMWORD [rdx+1*SIZEOF_YMMWORD] ; ymm3=Int0H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  300. vperm2i128 ymm0, ymm8, ymm7, 0x03
  301. vpalignr ymm0, ymm0, ymm7, 2 ; ymm0=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 --)
  302. vperm2i128 ymm4, ymm8, ymm3, 0x20
  303. vpslldq ymm4, ymm4, 14 ; ymm4=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 16)
  304. vperm2i128 ymm5, ymm8, ymm7, 0x03
  305. vpsrldq ymm5, ymm5, 14 ; ymm5=(15 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  306. vperm2i128 ymm6, ymm8, ymm3, 0x20
  307. vpalignr ymm6, ymm3, ymm6, 14 ; ymm6=(-- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
  308. vpor ymm0, ymm0, ymm4 ; ymm0=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
  309. vpor ymm5, ymm5, ymm6 ; ymm5=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
  310. vperm2i128 ymm2, ymm8, ymm3, 0x03
  311. vpalignr ymm2, ymm2, ymm3, 2 ; ymm2=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 --)
  312. vperm2i128 ymm4, ymm8, ymm3, 0x03
  313. vpsrldq ymm4, ymm4, 14 ; ymm4=(31 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  314. vperm2i128 ymm1, ymm8, ymm7, 0x20
  315. vpalignr ymm1, ymm7, ymm1, 14 ; ymm1=(-- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
  316. vpor ymm1, ymm1, YMMWORD [wk(0)] ; ymm1=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
  317. vpor ymm2, ymm2, YMMWORD [wk(2)] ; ymm2=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
  318. vmovdqa YMMWORD [wk(0)], ymm4
  319. vpmullw ymm7, ymm7, [rel PW_THREE]
  320. vpmullw ymm3, ymm3, [rel PW_THREE]
  321. vpaddw ymm1, ymm1, [rel PW_EIGHT]
  322. vpaddw ymm5, ymm5, [rel PW_EIGHT]
  323. vpaddw ymm0, ymm0, [rel PW_SEVEN]
  324. vpaddw ymm2, [rel PW_SEVEN]
  325. vpaddw ymm1, ymm1, ymm7
  326. vpaddw ymm5, ymm5, ymm3
  327. vpsrlw ymm1, ymm1, 4 ; ymm1=Out0LE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
  328. vpsrlw ymm5, ymm5, 4 ; ymm5=Out0HE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
  329. vpaddw ymm0, ymm0, ymm7
  330. vpaddw ymm2, ymm2, ymm3
  331. vpsrlw ymm0, ymm0, 4 ; ymm0=Out0LO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
  332. vpsrlw ymm2, ymm2, 4 ; ymm2=Out0HO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
  333. vpsllw ymm0, ymm0, BYTE_BIT
  334. vpsllw ymm2, ymm2, BYTE_BIT
  335. vpor ymm1, ymm1, ymm0 ; ymm1=Out0L=( 0 1 2 ... 29 30 31)
  336. vpor ymm5, ymm5, ymm2 ; ymm5=Out0H=(32 33 34 ... 61 62 63)
  337. vmovdqu YMMWORD [rdx+0*SIZEOF_YMMWORD], ymm1
  338. vmovdqu YMMWORD [rdx+1*SIZEOF_YMMWORD], ymm5
  339. ; -- process the lower row
  340. vmovdqu ymm6, YMMWORD [rdi+0*SIZEOF_YMMWORD] ; ymm6=Int1L=( 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
  341. vmovdqu ymm4, YMMWORD [rdi+1*SIZEOF_YMMWORD] ; ymm4=Int1H=(16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31)
  342. vperm2i128 ymm7, ymm8, ymm6, 0x03
  343. vpalignr ymm7, ymm7, ymm6, 2 ; ymm7=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 --)
  344. vperm2i128 ymm3, ymm8, ymm4, 0x20
  345. vpslldq ymm3, ymm3, 14 ; ymm3=(-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- 16)
  346. vperm2i128 ymm0, ymm8, ymm6, 0x03
  347. vpsrldq ymm0, ymm0, 14 ; ymm0=(15 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  348. vperm2i128 ymm2, ymm8, ymm4, 0x20
  349. vpalignr ymm2, ymm4, ymm2, 14 ; ymm2=(-- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
  350. vpor ymm7, ymm7, ymm3 ; ymm7=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16)
  351. vpor ymm0, ymm0, ymm2 ; ymm0=(15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30)
  352. vperm2i128 ymm5, ymm8, ymm4, 0x03
  353. vpalignr ymm5, ymm5, ymm4, 2 ; ymm5=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 --)
  354. vperm2i128 ymm3, ymm8, ymm4, 0x03
  355. vpsrldq ymm3, ymm3, 14 ; ymm3=(31 -- -- -- -- -- -- -- -- -- -- -- -- -- -- --)
  356. vperm2i128 ymm1, ymm8, ymm6, 0x20
  357. vpalignr ymm1, ymm6, ymm1, 14 ; ymm1=(-- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
  358. vpor ymm1, ymm1, YMMWORD [wk(1)] ; ymm1=(-1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14)
  359. vpor ymm5, ymm5, YMMWORD [wk(3)] ; ymm5=(17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32)
  360. vmovdqa YMMWORD [wk(1)], ymm3
  361. vpmullw ymm6, ymm6, [rel PW_THREE]
  362. vpmullw ymm4, ymm4, [rel PW_THREE]
  363. vpaddw ymm1, ymm1, [rel PW_EIGHT]
  364. vpaddw ymm0, ymm0, [rel PW_EIGHT]
  365. vpaddw ymm7, ymm7, [rel PW_SEVEN]
  366. vpaddw ymm5, ymm5, [rel PW_SEVEN]
  367. vpaddw ymm1, ymm1, ymm6
  368. vpaddw ymm0, ymm0, ymm4
  369. vpsrlw ymm1, ymm1, 4 ; ymm1=Out1LE=( 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30)
  370. vpsrlw ymm0, ymm0, 4 ; ymm0=Out1HE=(32 34 36 38 40 42 44 46 48 50 52 54 56 58 60 62)
  371. vpaddw ymm7, ymm7, ymm6
  372. vpaddw ymm5, ymm5, ymm4
  373. vpsrlw ymm7, ymm7, 4 ; ymm7=Out1LO=( 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31)
  374. vpsrlw ymm5, ymm5, 4 ; ymm5=Out1HO=(33 35 37 39 41 43 45 47 49 51 53 55 57 59 61 63)
  375. vpsllw ymm7, ymm7, BYTE_BIT
  376. vpsllw ymm5, ymm5, BYTE_BIT
  377. vpor ymm1, ymm1, ymm7 ; ymm1=Out1L=( 0 1 2 ... 29 30 31)
  378. vpor ymm0, ymm0, ymm5 ; ymm0=Out1H=(32 33 34 ... 61 62 63)
  379. vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm1
  380. vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm0
  381. sub rax, byte SIZEOF_YMMWORD
  382. add rcx, byte 1*SIZEOF_YMMWORD ; inptr1(above)
  383. add rbx, byte 1*SIZEOF_YMMWORD ; inptr0
  384. add rsi, byte 1*SIZEOF_YMMWORD ; inptr1(below)
  385. add rdx, byte 2*SIZEOF_YMMWORD ; outptr0
  386. add rdi, byte 2*SIZEOF_YMMWORD ; outptr1
  387. cmp rax, byte SIZEOF_YMMWORD
  388. ja near .columnloop
  389. test rax, rax
  390. jnz near .columnloop_last
  391. pop rsi
  392. pop rdi
  393. pop rcx
  394. pop rax
  395. add rsi, byte 1*SIZEOF_JSAMPROW ; input_data
  396. add rdi, byte 2*SIZEOF_JSAMPROW ; output_data
  397. sub rcx, byte 2 ; rowctr
  398. jg near .rowloop
  399. .return:
  400. pop rbx
  401. vzeroupper
  402. uncollect_args 4
  403. pop_xmm 3
  404. mov rsp, rbp ; rsp <- aligned rbp
  405. pop rsp ; rsp <- original rbp
  406. pop rbp
  407. ret
  408. ; --------------------------------------------------------------------------
  409. ;
  410. ; Fast processing for the common case of 2:1 horizontal and 1:1 vertical.
  411. ; It's still a box filter.
  412. ;
  413. ; GLOBAL(void)
  414. ; jsimd_h2v1_upsample_avx2(int max_v_samp_factor, JDIMENSION output_width,
  415. ; JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr);
  416. ;
  417. ; r10 = int max_v_samp_factor
  418. ; r11d = JDIMENSION output_width
  419. ; r12 = JSAMPARRAY input_data
  420. ; r13 = JSAMPARRAY *output_data_ptr
  421. align 32
  422. GLOBAL_FUNCTION(jsimd_h2v1_upsample_avx2)
  423. EXTN(jsimd_h2v1_upsample_avx2):
  424. push rbp
  425. mov rax, rsp
  426. mov rbp, rsp
  427. collect_args 4
  428. mov edx, r11d
  429. add rdx, byte (SIZEOF_YMMWORD-1)
  430. and rdx, -SIZEOF_YMMWORD
  431. jz near .return
  432. mov rcx, r10 ; rowctr
  433. test rcx, rcx
  434. jz short .return
  435. mov rsi, r12 ; input_data
  436. mov rdi, r13
  437. mov rdip, JSAMPARRAY [rdi] ; output_data
  438. .rowloop:
  439. push rdi
  440. push rsi
  441. mov rsip, JSAMPROW [rsi] ; inptr
  442. mov rdip, JSAMPROW [rdi] ; outptr
  443. mov rax, rdx ; colctr
  444. .columnloop:
  445. cmp rax, byte SIZEOF_YMMWORD
  446. ja near .above_16
  447. vmovdqu xmm0, XMMWORD [rsi+0*SIZEOF_YMMWORD]
  448. vpunpckhbw xmm1, xmm0, xmm0
  449. vpunpcklbw xmm0, xmm0, xmm0
  450. vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0
  451. vmovdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1
  452. jmp short .nextrow
  453. .above_16:
  454. vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
  455. vpermq ymm0, ymm0, 0xd8
  456. vpunpckhbw ymm1, ymm0, ymm0
  457. vpunpcklbw ymm0, ymm0, ymm0
  458. vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
  459. vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm1
  460. sub rax, byte 2*SIZEOF_YMMWORD
  461. jz short .nextrow
  462. add rsi, byte SIZEOF_YMMWORD ; inptr
  463. add rdi, byte 2*SIZEOF_YMMWORD ; outptr
  464. jmp short .columnloop
  465. .nextrow:
  466. pop rsi
  467. pop rdi
  468. add rsi, byte SIZEOF_JSAMPROW ; input_data
  469. add rdi, byte SIZEOF_JSAMPROW ; output_data
  470. dec rcx ; rowctr
  471. jg short .rowloop
  472. .return:
  473. vzeroupper
  474. uncollect_args 4
  475. pop rbp
  476. ret
  477. ; --------------------------------------------------------------------------
  478. ;
  479. ; Fast processing for the common case of 2:1 horizontal and 2:1 vertical.
  480. ; It's still a box filter.
  481. ;
  482. ; GLOBAL(void)
  483. ; jsimd_h2v2_upsample_avx2(int max_v_samp_factor, JDIMENSION output_width,
  484. ; JSAMPARRAY input_data, JSAMPARRAY *output_data_ptr);
  485. ;
  486. ; r10 = int max_v_samp_factor
  487. ; r11d = JDIMENSION output_width
  488. ; r12 = JSAMPARRAY input_data
  489. ; r13 = JSAMPARRAY *output_data_ptr
  490. align 32
  491. GLOBAL_FUNCTION(jsimd_h2v2_upsample_avx2)
  492. EXTN(jsimd_h2v2_upsample_avx2):
  493. push rbp
  494. mov rax, rsp
  495. mov rbp, rsp
  496. collect_args 4
  497. push rbx
  498. mov edx, r11d
  499. add rdx, byte (SIZEOF_YMMWORD-1)
  500. and rdx, -SIZEOF_YMMWORD
  501. jz near .return
  502. mov rcx, r10 ; rowctr
  503. test rcx, rcx
  504. jz near .return
  505. mov rsi, r12 ; input_data
  506. mov rdi, r13
  507. mov rdip, JSAMPARRAY [rdi] ; output_data
  508. .rowloop:
  509. push rdi
  510. push rsi
  511. mov rsip, JSAMPROW [rsi] ; inptr
  512. mov rbxp, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0
  513. mov rdip, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1
  514. mov rax, rdx ; colctr
  515. .columnloop:
  516. cmp rax, byte SIZEOF_YMMWORD
  517. ja short .above_16
  518. vmovdqu xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD]
  519. vpunpckhbw xmm1, xmm0, xmm0
  520. vpunpcklbw xmm0, xmm0, xmm0
  521. vmovdqu XMMWORD [rbx+0*SIZEOF_XMMWORD], xmm0
  522. vmovdqu XMMWORD [rbx+1*SIZEOF_XMMWORD], xmm1
  523. vmovdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0
  524. vmovdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1
  525. jmp near .nextrow
  526. .above_16:
  527. vmovdqu ymm0, YMMWORD [rsi+0*SIZEOF_YMMWORD]
  528. vpermq ymm0, ymm0, 0xd8
  529. vpunpckhbw ymm1, ymm0, ymm0
  530. vpunpcklbw ymm0, ymm0, ymm0
  531. vmovdqu YMMWORD [rbx+0*SIZEOF_YMMWORD], ymm0
  532. vmovdqu YMMWORD [rbx+1*SIZEOF_YMMWORD], ymm1
  533. vmovdqu YMMWORD [rdi+0*SIZEOF_YMMWORD], ymm0
  534. vmovdqu YMMWORD [rdi+1*SIZEOF_YMMWORD], ymm1
  535. sub rax, byte 2*SIZEOF_YMMWORD
  536. jz short .nextrow
  537. add rsi, byte SIZEOF_YMMWORD ; inptr
  538. add rbx, 2*SIZEOF_YMMWORD ; outptr0
  539. add rdi, 2*SIZEOF_YMMWORD ; outptr1
  540. jmp short .columnloop
  541. .nextrow:
  542. pop rsi
  543. pop rdi
  544. add rsi, byte 1*SIZEOF_JSAMPROW ; input_data
  545. add rdi, byte 2*SIZEOF_JSAMPROW ; output_data
  546. sub rcx, byte 2 ; rowctr
  547. jg near .rowloop
  548. .return:
  549. pop rbx
  550. vzeroupper
  551. uncollect_args 4
  552. pop rbp
  553. ret
  554. ; For some reason, the OS X linker does not honor the request to align the
  555. ; segment unless we do this.
  556. align 32