You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

619 lines
23KB

  1. ;******************************************************************************
  2. ;* VP9 IDCT SIMD optimizations
  3. ;*
  4. ;* Copyright (C) 2013 Clément Bœsch <u pkh me>
  5. ;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  6. ;*
  7. ;* This file is part of FFmpeg.
  8. ;*
  9. ;* FFmpeg is free software; you can redistribute it and/or
  10. ;* modify it under the terms of the GNU Lesser General Public
  11. ;* License as published by the Free Software Foundation; either
  12. ;* version 2.1 of the License, or (at your option) any later version.
  13. ;*
  14. ;* FFmpeg is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. ;* Lesser General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU Lesser General Public
  20. ;* License along with FFmpeg; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. ;******************************************************************************
  23. %include "libavutil/x86/x86util.asm"
  24. SECTION_RODATA
  25. pw_11585x2: times 8 dw 23170
  26. %macro VP9_IDCT_COEFFS 2-3 0
  27. pw_m%1_%2: times 4 dw -%1, %2
  28. pw_%2_%1: times 4 dw %2, %1
  29. %if %3 == 1
  30. pw_m%2_m%1: times 4 dw -%2, -%1
  31. %endif
  32. %endmacro
  33. %macro VP9_IDCT_COEFFS_ALL 2-3 0
  34. pw_%1x2: times 8 dw %1*2
  35. pw_%2x2: times 8 dw %2*2
  36. VP9_IDCT_COEFFS %1, %2, %3
  37. %endmacro
  38. VP9_IDCT_COEFFS_ALL 15137, 6270, 1
  39. VP9_IDCT_COEFFS_ALL 16069, 3196
  40. VP9_IDCT_COEFFS_ALL 9102, 13623
  41. VP9_IDCT_COEFFS_ALL 16305, 1606
  42. VP9_IDCT_COEFFS_ALL 10394, 12665
  43. VP9_IDCT_COEFFS_ALL 14449, 7723
  44. VP9_IDCT_COEFFS_ALL 4756, 15679
  45. pd_8192: times 4 dd 8192
  46. pw_2048: times 8 dw 2048
  47. pw_1024: times 8 dw 1024
  48. pw_512: times 8 dw 512
  49. SECTION .text
  50. ; (a*x + b*y + round) >> shift
  51. %macro VP9_MULSUB_2W_2X 6 ; dst1, dst2, src (unchanged), round, coefs1, coefs2
  52. pmaddwd m%1, m%3, %5
  53. pmaddwd m%2, m%3, %6
  54. paddd m%1, %4
  55. paddd m%2, %4
  56. psrad m%1, 14
  57. psrad m%2, 14
  58. %endmacro
  59. %macro VP9_UNPACK_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1, tmp2
  60. punpckhwd m%6, m%2, m%1
  61. VP9_MULSUB_2W_2X %7, %6, %6, %5, [pw_m%3_%4], [pw_%4_%3]
  62. punpcklwd m%2, m%1
  63. VP9_MULSUB_2W_2X %1, %2, %2, %5, [pw_m%3_%4], [pw_%4_%3]
  64. packssdw m%1, m%7
  65. packssdw m%2, m%6
  66. %endmacro
  67. %macro VP9_STORE_2X 5 ; reg1, reg2, tmp1, tmp2, zero
  68. movh m%3, [dstq]
  69. movh m%4, [dstq+strideq]
  70. punpcklbw m%3, m%5
  71. punpcklbw m%4, m%5
  72. paddw m%3, m%1
  73. paddw m%4, m%2
  74. packuswb m%3, m%5
  75. packuswb m%4, m%5
  76. movh [dstq], m%3
  77. movh [dstq+strideq], m%4
  78. %endmacro
  79. ;-------------------------------------------------------------------------------------------
  80. ; void vp9_idct_idct_4x4_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
  81. ;-------------------------------------------------------------------------------------------
  82. %macro VP9_IDCT4_1D_FINALIZE 0
  83. SUMSUB_BA w, 3, 2, 4 ; m3=t3+t0, m2=-t3+t0
  84. SUMSUB_BA w, 1, 0, 4 ; m1=t2+t1, m0=-t2+t1
  85. SWAP 0, 3, 2 ; 3102 -> 0123
  86. %endmacro
  87. %macro VP9_IDCT4_1D 0
  88. SUMSUB_BA w, 2, 0, 4 ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
  89. pmulhrsw m2, m6 ; m2=t0
  90. pmulhrsw m0, m6 ; m0=t1
  91. VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5 ; m1=t2, m3=t3
  92. VP9_IDCT4_1D_FINALIZE
  93. %endmacro
  94. ; 2x2 top left corner
  95. %macro VP9_IDCT4_2x2_1D 0
  96. pmulhrsw m0, m5 ; m0=t1
  97. mova m2, m0 ; m2=t0
  98. mova m3, m1
  99. pmulhrsw m1, m6 ; m1=t2
  100. pmulhrsw m3, m7 ; m3=t3
  101. VP9_IDCT4_1D_FINALIZE
  102. %endmacro
  103. %macro VP9_IDCT4_WRITEOUT 0
  104. mova m5, [pw_2048]
  105. pmulhrsw m0, m5 ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
  106. pmulhrsw m1, m5
  107. VP9_STORE_2X 0, 1, 6, 7, 4
  108. lea dstq, [dstq+2*strideq]
  109. pmulhrsw m2, m5
  110. pmulhrsw m3, m5
  111. VP9_STORE_2X 2, 3, 6, 7, 4
  112. %endmacro
  113. INIT_MMX ssse3
  114. cglobal vp9_idct_idct_4x4_add, 4,4,0, dst, stride, block, eob
  115. cmp eobd, 4 ; 2x2 or smaller
  116. jg .idctfull
  117. cmp eobd, 1 ; faster path for when only DC is set
  118. jne .idct2x2
  119. movd m0, [blockq]
  120. mova m5, [pw_11585x2]
  121. pmulhrsw m0, m5
  122. pmulhrsw m0, m5
  123. pshufw m0, m0, 0
  124. pxor m4, m4
  125. movh [blockq], m4
  126. pmulhrsw m0, [pw_2048] ; (x*2048 + (1<<14))>>15 <=> (x+8)>>4
  127. VP9_STORE_2X 0, 0, 6, 7, 4
  128. lea dstq, [dstq+2*strideq]
  129. VP9_STORE_2X 0, 0, 6, 7, 4
  130. RET
  131. ; faster path for when only top left 2x2 block is set
  132. .idct2x2:
  133. movd m0, [blockq+0]
  134. movd m1, [blockq+8]
  135. mova m5, [pw_11585x2]
  136. mova m6, [pw_6270x2]
  137. mova m7, [pw_15137x2]
  138. VP9_IDCT4_2x2_1D
  139. TRANSPOSE4x4W 0, 1, 2, 3, 4
  140. VP9_IDCT4_2x2_1D
  141. pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
  142. movh [blockq+ 0], m4
  143. movh [blockq+ 8], m4
  144. VP9_IDCT4_WRITEOUT
  145. RET
  146. .idctfull: ; generic full 4x4 idct/idct
  147. mova m0, [blockq+ 0]
  148. mova m1, [blockq+ 8]
  149. mova m2, [blockq+16]
  150. mova m3, [blockq+24]
  151. mova m6, [pw_11585x2]
  152. mova m7, [pd_8192] ; rounding
  153. VP9_IDCT4_1D
  154. TRANSPOSE4x4W 0, 1, 2, 3, 4
  155. VP9_IDCT4_1D
  156. pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
  157. mova [blockq+ 0], m4
  158. mova [blockq+ 8], m4
  159. mova [blockq+16], m4
  160. mova [blockq+24], m4
  161. VP9_IDCT4_WRITEOUT
  162. RET
  163. %if ARCH_X86_64 ; TODO: 32-bit? (32-bit limited to 8 xmm reg, we use more)
  164. ;-------------------------------------------------------------------------------------------
  165. ; void vp9_idct_idct_8x8_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
  166. ;-------------------------------------------------------------------------------------------
  167. %macro VP9_IDCT8_1D_FINALIZE 0
  168. SUMSUB_BA w, 3, 10, 4 ; m3=t0+t7, m10=t0-t7
  169. SUMSUB_BA w, 1, 2, 4 ; m1=t1+t6, m2=t1-t6
  170. SUMSUB_BA w, 11, 0, 4 ; m11=t2+t5, m0=t2-t5
  171. SUMSUB_BA w, 9, 8, 4 ; m9=t3+t4, m8=t3-t4
  172. SWAP 11, 10, 2
  173. SWAP 3, 9, 0
  174. %endmacro
  175. %macro VP9_IDCT8_1D 0
  176. SUMSUB_BA w, 8, 0, 4 ; m8=IN(0)+IN(4) m0=IN(0)-IN(4)
  177. pmulhrsw m8, m12 ; m8=t0a
  178. pmulhrsw m0, m12 ; m0=t1a
  179. VP9_UNPACK_MULSUB_2W_4X 2, 10, 15137, 6270, m7, 4, 5 ; m2=t2a, m10=t3a
  180. VP9_UNPACK_MULSUB_2W_4X 1, 11, 16069, 3196, m7, 4, 5 ; m1=t4a, m11=t7a
  181. VP9_UNPACK_MULSUB_2W_4X 9, 3, 9102, 13623, m7, 4, 5 ; m9=t5a, m3=t6a
  182. SUMSUB_BA w, 10, 8, 4 ; m10=t0a+t3a (t0), m8=t0a-t3a (t3)
  183. SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
  184. SUMSUB_BA w, 9, 1, 4 ; m9=t4a+t5a (t4), m1=t4a-t5a (t5a)
  185. SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
  186. SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
  187. pmulhrsw m1, m12 ; m1=t6
  188. pmulhrsw m11, m12 ; m11=t5
  189. VP9_IDCT8_1D_FINALIZE
  190. %endmacro
  191. %macro VP9_IDCT8_4x4_1D 0
  192. pmulhrsw m0, m12 ; m0=t1a/t0a
  193. pmulhrsw m10, m2, [pw_15137x2] ; m10=t3a
  194. pmulhrsw m2, [pw_6270x2] ; m2=t2a
  195. pmulhrsw m11, m1, [pw_16069x2] ; m11=t7a
  196. pmulhrsw m1, [pw_3196x2] ; m1=t4a
  197. pmulhrsw m9, m3, [pw_9102x2] ; m9=-t5a
  198. pmulhrsw m3, [pw_13623x2] ; m3=t6a
  199. psubw m8, m0, m10 ; m8=t0a-t3a (t3)
  200. paddw m10, m0 ; m10=t0a+t3a (t0)
  201. SUMSUB_BA w, 2, 0, 4 ; m2=t1a+t2a (t1), m0=t1a-t2a (t2)
  202. SUMSUB_BA w, 9, 1, 4 ; m1=t4a+t5a (t4), m9=t4a-t5a (t5a)
  203. SWAP 1, 9
  204. SUMSUB_BA w, 3, 11, 4 ; m3=t7a+t6a (t7), m11=t7a-t6a (t6a)
  205. SUMSUB_BA w, 1, 11, 4 ; m1=t6a+t5a (t6), m11=t6a-t5a (t5)
  206. pmulhrsw m1, m12 ; m1=t6
  207. pmulhrsw m11, m12 ; m11=t5
  208. VP9_IDCT8_1D_FINALIZE
  209. %endmacro
  210. ; TODO: a lot of t* copies can probably be removed and merged with
  211. ; following SUMSUBs from VP9_IDCT8_1D_FINALIZE with AVX
  212. %macro VP9_IDCT8_2x2_1D 0
  213. pmulhrsw m0, m12 ; m0=t0
  214. mova m3, m1
  215. pmulhrsw m1, m6 ; m1=t4
  216. pmulhrsw m3, m7 ; m3=t7
  217. mova m2, m0 ; m2=t1
  218. mova m10, m0 ; m10=t2
  219. mova m8, m0 ; m8=t3
  220. mova m11, m3 ; t5 = t7a ...
  221. mova m9, m3 ; t6 = t7a ...
  222. psubw m11, m1 ; t5 = t7a - t4a
  223. paddw m9, m1 ; t6 = t7a + t4a
  224. pmulhrsw m11, m12 ; m11=t5
  225. pmulhrsw m9, m12 ; m9=t6
  226. SWAP 0, 10
  227. SWAP 9, 1
  228. VP9_IDCT8_1D_FINALIZE
  229. %endmacro
  230. %macro VP9_IDCT8_WRITEOUT 0
  231. mova m5, [pw_1024]
  232. pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
  233. pmulhrsw m1, m5
  234. VP9_STORE_2X 0, 1, 6, 7, 4
  235. lea dstq, [dstq+2*strideq]
  236. pmulhrsw m2, m5
  237. pmulhrsw m3, m5
  238. VP9_STORE_2X 2, 3, 6, 7, 4
  239. lea dstq, [dstq+2*strideq]
  240. pmulhrsw m8, m5
  241. pmulhrsw m9, m5
  242. VP9_STORE_2X 8, 9, 6, 7, 4
  243. lea dstq, [dstq+2*strideq]
  244. pmulhrsw m10, m5
  245. pmulhrsw m11, m5
  246. VP9_STORE_2X 10, 11, 6, 7, 4
  247. %endmacro
  248. INIT_XMM ssse3
  249. cglobal vp9_idct_idct_8x8_add, 4,4,13, dst, stride, block, eob
  250. mova m12, [pw_11585x2] ; often used
  251. cmp eobd, 12 ; top left half or less
  252. jg .idctfull
  253. cmp eobd, 3 ; top left corner or less
  254. jg .idcthalf
  255. cmp eobd, 1 ; faster path for when only DC is set
  256. jne .idcttopleftcorner
  257. movd m0, [blockq]
  258. pmulhrsw m0, m12
  259. pmulhrsw m0, m12
  260. SPLATW m0, m0, 0
  261. pxor m4, m4
  262. movd [blockq], m4
  263. mova m5, [pw_1024]
  264. pmulhrsw m0, m5 ; (x*1024 + (1<<14))>>15 <=> (x+16)>>5
  265. VP9_STORE_2X 0, 0, 6, 7, 4
  266. lea dstq, [dstq+2*strideq]
  267. VP9_STORE_2X 0, 0, 6, 7, 4
  268. lea dstq, [dstq+2*strideq]
  269. VP9_STORE_2X 0, 0, 6, 7, 4
  270. lea dstq, [dstq+2*strideq]
  271. VP9_STORE_2X 0, 0, 6, 7, 4
  272. RET
  273. ; faster path for when only left corner is set (3 input: DC, right to DC, below
  274. ; to DC). Note: also working with a 2x2 block
  275. .idcttopleftcorner:
  276. movd m0, [blockq+0]
  277. movd m1, [blockq+16]
  278. mova m6, [pw_3196x2]
  279. mova m7, [pw_16069x2]
  280. VP9_IDCT8_2x2_1D
  281. TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
  282. VP9_IDCT8_2x2_1D
  283. pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
  284. movd [blockq+ 0], m4
  285. movd [blockq+16], m4
  286. VP9_IDCT8_WRITEOUT
  287. RET
  288. .idcthalf:
  289. movh m0, [blockq + 0]
  290. movh m1, [blockq +16]
  291. movh m2, [blockq +32]
  292. movh m3, [blockq +48]
  293. VP9_IDCT8_4x4_1D
  294. TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
  295. VP9_IDCT8_4x4_1D
  296. pxor m4, m4
  297. movh [blockq+ 0], m4
  298. movh [blockq+16], m4
  299. movh [blockq+32], m4
  300. movh [blockq+48], m4
  301. VP9_IDCT8_WRITEOUT
  302. RET
  303. .idctfull: ; generic full 8x8 idct/idct
  304. mova m0, [blockq+ 0] ; IN(0)
  305. mova m1, [blockq+ 16] ; IN(1)
  306. mova m2, [blockq+ 32] ; IN(2)
  307. mova m3, [blockq+ 48] ; IN(3)
  308. mova m8, [blockq+ 64] ; IN(4)
  309. mova m9, [blockq+ 80] ; IN(5)
  310. mova m10, [blockq+ 96] ; IN(6)
  311. mova m11, [blockq+112] ; IN(7)
  312. mova m7, [pd_8192] ; rounding
  313. VP9_IDCT8_1D
  314. TRANSPOSE8x8W 0, 1, 2, 3, 8, 9, 10, 11, 4
  315. VP9_IDCT8_1D
  316. pxor m4, m4 ; used for the block reset, and VP9_STORE_2X
  317. mova [blockq+ 0], m4
  318. mova [blockq+ 16], m4
  319. mova [blockq+ 32], m4
  320. mova [blockq+ 48], m4
  321. mova [blockq+ 64], m4
  322. mova [blockq+ 80], m4
  323. mova [blockq+ 96], m4
  324. mova [blockq+112], m4
  325. VP9_IDCT8_WRITEOUT
  326. RET
  327. ;---------------------------------------------------------------------------------------------
  328. ; void vp9_idct_idct_16x16_add_<opt>(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob);
  329. ;---------------------------------------------------------------------------------------------
  330. %macro VP9_IDCT16_1D 2 ; src, pass
  331. mova m5, [%1+ 32] ; IN(1)
  332. mova m14, [%1+ 64] ; IN(2)
  333. mova m6, [%1+ 96] ; IN(3)
  334. mova m9, [%1+128] ; IN(4)
  335. mova m7, [%1+160] ; IN(5)
  336. mova m15, [%1+192] ; IN(6)
  337. mova m4, [%1+224] ; IN(7)
  338. mova m3, [%1+288] ; IN(9)
  339. mova m12, [%1+320] ; IN(10)
  340. mova m0, [%1+352] ; IN(11)
  341. mova m8, [%1+384] ; IN(12)
  342. mova m1, [%1+416] ; IN(13)
  343. mova m13, [%1+448] ; IN(14)
  344. mova m2, [%1+480] ; IN(15)
  345. ; m10=in0, m5=in1, m14=in2, m6=in3, m9=in4, m7=in5, m15=in6, m4=in7
  346. ; m11=in8, m3=in9, m12=in10 m0=in11, m8=in12, m1=in13, m13=in14, m2=in15
  347. VP9_UNPACK_MULSUB_2W_4X 9, 8, 15137, 6270, [pd_8192], 10, 11 ; t2, t3
  348. VP9_UNPACK_MULSUB_2W_4X 14, 13, 16069, 3196, [pd_8192], 10, 11 ; t4, t7
  349. VP9_UNPACK_MULSUB_2W_4X 12, 15, 9102, 13623, [pd_8192], 10, 11 ; t5, t6
  350. VP9_UNPACK_MULSUB_2W_4X 5, 2, 16305, 1606, [pd_8192], 10, 11 ; t8, t15
  351. VP9_UNPACK_MULSUB_2W_4X 3, 4, 10394, 12665, [pd_8192], 10, 11 ; t9, t14
  352. VP9_UNPACK_MULSUB_2W_4X 7, 0, 14449, 7723, [pd_8192], 10, 11 ; t10, t13
  353. VP9_UNPACK_MULSUB_2W_4X 1, 6, 4756, 15679, [pd_8192], 10, 11 ; t11, t12
  354. ; m11=t0, m10=t1, m9=t2, m8=t3, m14=t4, m12=t5, m15=t6, m13=t7
  355. ; m5=t8, m3=t9, m7=t10, m1=t11, m6=t12, m0=t13, m4=t14, m2=t15
  356. SUMSUB_BA w, 12, 14, 10 ; t4, t5
  357. SUMSUB_BA w, 15, 13, 10 ; t7, t6
  358. SUMSUB_BA w, 3, 5, 10 ; t8, t9
  359. SUMSUB_BA w, 7, 1, 10 ; t11, t10
  360. SUMSUB_BA w, 0, 6, 10 ; t12, t13
  361. SUMSUB_BA w, 4, 2, 10 ; t15, t14
  362. ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m14=t5, m13=t6, m15=t7
  363. ; m3=t8, m5=t9, m1=t10, m7=t11, m0=t12, m6=t13, m2=t14, m4=t15
  364. SUMSUB_BA w, 14, 13, 10
  365. pmulhrsw m13, [pw_11585x2] ; t5
  366. pmulhrsw m14, [pw_11585x2] ; t6
  367. VP9_UNPACK_MULSUB_2W_4X 2, 5, 15137, 6270, [pd_8192], 10, 11 ; t9, t14
  368. VP9_UNPACK_MULSUB_2W_4X 6, 1, 6270, m15137, [pd_8192], 10, 11 ; t10, t13
  369. ; m8=t0, m9=t1, m10=t2, m11=t3, m12=t4, m13=t5, m14=t6, m15=t7
  370. ; m3=t8, m2=t9, m6=t10, m7=t11, m0=t12, m1=t13, m5=t14, m4=t15
  371. SUMSUB_BA w, 7, 3, 10 ; t8, t11
  372. SUMSUB_BA w, 6, 2, 10 ; t9, t10
  373. SUMSUB_BA w, 0, 4, 10 ; t15, t12
  374. SUMSUB_BA w, 1, 5, 10 ; t14. t13
  375. ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
  376. ; m7=t8, m6=t9, m2=t10, m3=t11, m4=t12, m5=t13, m1=t14, m0=t15
  377. SUMSUB_BA w, 2, 5, 10
  378. SUMSUB_BA w, 3, 4, 10
  379. pmulhrsw m5, [pw_11585x2] ; t10
  380. pmulhrsw m4, [pw_11585x2] ; t11
  381. pmulhrsw m3, [pw_11585x2] ; t12
  382. pmulhrsw m2, [pw_11585x2] ; t13
  383. ; backup first register
  384. mova [rsp+32], m7
  385. ; m15=t0, m14=t1, m13=t2, m12=t3, m11=t4, m10=t5, m9=t6, m8=t7
  386. ; m7=t8, m6=t9, m5=t10, m4=t11, m3=t12, m2=t13, m1=t14, m0=t15
  387. ; from load/start
  388. mova m10, [%1+ 0] ; IN(0)
  389. mova m11, [%1+256] ; IN(8)
  390. ; from 3 stages back
  391. SUMSUB_BA w, 11, 10, 7
  392. pmulhrsw m11, [pw_11585x2] ; t0
  393. pmulhrsw m10, [pw_11585x2] ; t1
  394. ; from 2 stages back
  395. SUMSUB_BA w, 8, 11, 7 ; t0, t3
  396. SUMSUB_BA w, 9, 10, 7 ; t1, t2
  397. ; from 1 stage back
  398. SUMSUB_BA w, 15, 8, 7 ; t0, t7
  399. SUMSUB_BA w, 14, 9, 7 ; t1, t6
  400. SUMSUB_BA w, 13, 10, 7 ; t2, t5
  401. SUMSUB_BA w, 12, 11, 7 ; t3, t4
  402. SUMSUB_BA w, 0, 15, 7 ; t0, t15
  403. SUMSUB_BA w, 1, 14, 7 ; t1, t14
  404. SUMSUB_BA w, 2, 13, 7 ; t2, t13
  405. SUMSUB_BA w, 3, 12, 7 ; t3, t12
  406. SUMSUB_BA w, 4, 11, 7 ; t4, t11
  407. SUMSUB_BA w, 5, 10, 7 ; t5, t10
  408. %if %2 == 1
  409. ; backup a different register
  410. mova [rsp+16], m15
  411. mova m7, [rsp+32]
  412. SUMSUB_BA w, 6, 9, 15 ; t6, t9
  413. SUMSUB_BA w, 7, 8, 15 ; t7, t8
  414. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 15
  415. mova [rsp+ 0], m0
  416. mova [rsp+ 32], m1
  417. mova [rsp+ 64], m2
  418. mova [rsp+ 96], m3
  419. mova [rsp+128], m4
  420. mova [rsp+160], m5
  421. mova [rsp+192], m6
  422. mova [rsp+224], m7
  423. mova m15, [rsp+16]
  424. TRANSPOSE8x8W 8, 9, 10, 11, 12, 13, 14, 15, 0
  425. mova [rsp+ 16], m8
  426. mova [rsp+ 48], m9
  427. mova [rsp+ 80], m10
  428. mova [rsp+112], m11
  429. mova [rsp+144], m12
  430. mova [rsp+176], m13
  431. mova [rsp+208], m14
  432. mova [rsp+240], m15
  433. %else ; %2 == 2
  434. ; backup more registers
  435. mova [rsp+64], m8
  436. mova [rsp+96], m9
  437. pxor m7, m7
  438. pmulhrsw m0, [pw_512]
  439. pmulhrsw m1, [pw_512]
  440. VP9_STORE_2X 0, 1, 8, 9, 7
  441. lea dstq, [dstq+strideq*2]
  442. pmulhrsw m2, [pw_512]
  443. pmulhrsw m3, [pw_512]
  444. VP9_STORE_2X 2, 3, 8, 9, 7
  445. lea dstq, [dstq+strideq*2]
  446. pmulhrsw m4, [pw_512]
  447. pmulhrsw m5, [pw_512]
  448. VP9_STORE_2X 4, 5, 8, 9, 7
  449. lea dstq, [dstq+strideq*2]
  450. ; restore from cache
  451. SWAP 0, 7 ; move zero from m7 to m0
  452. mova m7, [rsp+32]
  453. mova m8, [rsp+64]
  454. mova m9, [rsp+96]
  455. SUMSUB_BA w, 6, 9, 1 ; t6, t9
  456. SUMSUB_BA w, 7, 8, 1 ; t7, t8
  457. pmulhrsw m6, [pw_512]
  458. pmulhrsw m7, [pw_512]
  459. VP9_STORE_2X 6, 7, 1, 2, 0
  460. lea dstq, [dstq+strideq*2]
  461. pmulhrsw m8, [pw_512]
  462. pmulhrsw m9, [pw_512]
  463. VP9_STORE_2X 8, 9, 1, 2, 0
  464. lea dstq, [dstq+strideq*2]
  465. pmulhrsw m10, [pw_512]
  466. pmulhrsw m11, [pw_512]
  467. VP9_STORE_2X 10, 11, 1, 2, 0
  468. lea dstq, [dstq+strideq*2]
  469. pmulhrsw m12, [pw_512]
  470. pmulhrsw m13, [pw_512]
  471. VP9_STORE_2X 12, 13, 1, 2, 0
  472. lea dstq, [dstq+strideq*2]
  473. pmulhrsw m14, [pw_512]
  474. pmulhrsw m15, [pw_512]
  475. VP9_STORE_2X 14, 15, 1, 2, 0
  476. %endif ; %2 == 1/2
  477. %endmacro
  478. %macro ZERO_BLOCK 3 ; mem, n_bytes, zero_reg
  479. %assign %%off 0
  480. %rep %2/mmsize
  481. mova [%1+%%off], %3
  482. %assign %%off (%%off+mmsize)
  483. %endrep
  484. %endmacro
  485. %macro VP9_STORE_2XFULL 6; dc, tmp1, tmp2, tmp3, tmp4, zero
  486. mova m%3, [dstq]
  487. mova m%5, [dstq+strideq]
  488. punpcklbw m%2, m%3, m%6
  489. punpckhbw m%3, m%6
  490. punpcklbw m%4, m%5, m%6
  491. punpckhbw m%5, m%6
  492. paddw m%2, m%1
  493. paddw m%3, m%1
  494. paddw m%4, m%1
  495. paddw m%5, m%1
  496. packuswb m%2, m%3
  497. packuswb m%4, m%5
  498. mova [dstq], m%2
  499. mova [dstq+strideq], m%4
  500. %endmacro
  501. INIT_XMM ssse3
  502. cglobal vp9_idct_idct_16x16_add, 4, 5, 16, 512, dst, stride, block, eob
  503. cmp eobd, 1 ; faster path for when only DC is set
  504. jne .idctfull
  505. ; dc-only
  506. movd m0, [blockq]
  507. mova m1, [pw_11585x2]
  508. pmulhrsw m0, m1
  509. pmulhrsw m0, m1
  510. SPLATW m0, m0, q0000
  511. pmulhrsw m0, [pw_512]
  512. pxor m5, m5
  513. movd [blockq], m5
  514. %rep 7
  515. VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
  516. lea dstq, [dstq+2*strideq]
  517. %endrep
  518. VP9_STORE_2XFULL 0, 1, 2, 3, 4, 5
  519. RET
  520. .idctfull:
  521. DEFINE_ARGS dst, stride, block, cnt, dst_bak
  522. mov cntd, 2
  523. .loop1_full:
  524. VP9_IDCT16_1D blockq, 1
  525. add blockq, 16
  526. add rsp, 256
  527. dec cntd
  528. jg .loop1_full
  529. sub blockq, 32
  530. sub rsp, 512
  531. mov cntd, 2
  532. mov dst_bakq, dstq
  533. .loop2_full:
  534. VP9_IDCT16_1D rsp, 2
  535. lea dstq, [dst_bakq+8]
  536. add rsp, 16
  537. dec cntd
  538. jg .loop2_full
  539. sub rsp, 32
  540. ; at the end of the loop, m0 should still be zero
  541. ; use that to zero out block coefficients
  542. ZERO_BLOCK blockq, 512, m0
  543. RET
  544. %endif ; x86-64