You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

710 lines
23KB

  1. ;******************************************************************************
  2. ;* MMX/SSE2-optimized functions for the VP3 decoder
  3. ;* Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
  4. ;*
  5. ;* This file is part of Libav.
  6. ;*
  7. ;* Libav is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* Libav is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with Libav; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "libavutil/x86/x86util.asm"
  22. ; MMX-optimized functions cribbed from the original VP3 source code.
  23. SECTION_RODATA
  24. vp3_idct_data: times 8 dw 64277
  25. times 8 dw 60547
  26. times 8 dw 54491
  27. times 8 dw 46341
  28. times 8 dw 36410
  29. times 8 dw 25080
  30. times 8 dw 12785
  31. pb_7: times 8 db 0x07
  32. pb_1F: times 8 db 0x1f
  33. pb_81: times 8 db 0x81
  34. cextern pb_1
  35. cextern pb_3
  36. cextern pb_80
  37. cextern pw_8
  38. SECTION .text
  39. ; this is off by one or two for some cases when filter_limit is greater than 63
  40. ; in: p0 in mm6, p1 in mm4, p2 in mm2, p3 in mm1
  41. ; out: p1 in mm4, p2 in mm3
  42. %macro VP3_LOOP_FILTER 0
  43. movq m7, m6
  44. pand m6, [pb_7] ; p0&7
  45. psrlw m7, 3
  46. pand m7, [pb_1F] ; p0>>3
  47. movq m3, m2 ; p2
  48. pxor m2, m4
  49. pand m2, [pb_1] ; (p2^p1)&1
  50. movq m5, m2
  51. paddb m2, m2
  52. paddb m2, m5 ; 3*(p2^p1)&1
  53. paddb m2, m6 ; extra bits lost in shifts
  54. pcmpeqb m0, m0
  55. pxor m1, m0 ; 255 - p3
  56. pavgb m1, m2 ; (256 - p3 + extrabits) >> 1
  57. pxor m0, m4 ; 255 - p1
  58. pavgb m0, m3 ; (256 + p2-p1) >> 1
  59. paddb m1, [pb_3]
  60. pavgb m1, m0 ; 128+2+( p2-p1 - p3) >> 2
  61. pavgb m1, m0 ; 128+1+(3*(p2-p1) - p3) >> 3
  62. paddusb m7, m1 ; d+128+1
  63. movq m6, [pb_81]
  64. psubusb m6, m7
  65. psubusb m7, [pb_81]
  66. movq m5, [r2+516] ; flim
  67. pminub m6, m5
  68. pminub m7, m5
  69. movq m0, m6
  70. movq m1, m7
  71. paddb m6, m6
  72. paddb m7, m7
  73. pminub m6, m5
  74. pminub m7, m5
  75. psubb m6, m0
  76. psubb m7, m1
  77. paddusb m4, m7
  78. psubusb m4, m6
  79. psubusb m3, m7
  80. paddusb m3, m6
  81. %endmacro
  82. %macro STORE_4_WORDS 1
  83. movd r2d, %1
  84. mov [r0 -1], r2w
  85. psrlq %1, 32
  86. shr r2, 16
  87. mov [r0+r1 -1], r2w
  88. movd r2d, %1
  89. mov [r0+r1*2-1], r2w
  90. shr r2, 16
  91. mov [r0+r3 -1], r2w
  92. %endmacro
  93. INIT_MMX mmxext
  94. cglobal vp3_v_loop_filter, 3, 4
  95. %if ARCH_X86_64
  96. movsxd r1, r1d
  97. %endif
  98. mov r3, r1
  99. neg r1
  100. movq m6, [r0+r1*2]
  101. movq m4, [r0+r1 ]
  102. movq m2, [r0 ]
  103. movq m1, [r0+r3 ]
  104. VP3_LOOP_FILTER
  105. movq [r0+r1], m4
  106. movq [r0 ], m3
  107. RET
  108. cglobal vp3_h_loop_filter, 3, 4
  109. %if ARCH_X86_64
  110. movsxd r1, r1d
  111. %endif
  112. lea r3, [r1*3]
  113. movd m6, [r0 -2]
  114. movd m4, [r0+r1 -2]
  115. movd m2, [r0+r1*2-2]
  116. movd m1, [r0+r3 -2]
  117. lea r0, [r0+r1*4 ]
  118. punpcklbw m6, [r0 -2]
  119. punpcklbw m4, [r0+r1 -2]
  120. punpcklbw m2, [r0+r1*2-2]
  121. punpcklbw m1, [r0+r3 -2]
  122. sub r0, r3
  123. sub r0, r1
  124. TRANSPOSE4x4B 6, 4, 2, 1, 0
  125. VP3_LOOP_FILTER
  126. SBUTTERFLY bw, 4, 3, 5
  127. STORE_4_WORDS m4
  128. lea r0, [r0+r1*4 ]
  129. STORE_4_WORDS m3
  130. RET
  131. ; from original comments: The Macro does IDct on 4 1-D Dcts
  132. %macro BeginIDCT 0
  133. movq m2, I(3)
  134. movq m6, C(3)
  135. movq m4, m2
  136. movq m7, J(5)
  137. pmulhw m4, m6 ; r4 = c3*i3 - i3
  138. movq m1, C(5)
  139. pmulhw m6, m7 ; r6 = c3*i5 - i5
  140. movq m5, m1
  141. pmulhw m1, m2 ; r1 = c5*i3 - i3
  142. movq m3, I(1)
  143. pmulhw m5, m7 ; r5 = c5*i5 - i5
  144. movq m0, C(1)
  145. paddw m4, m2 ; r4 = c3*i3
  146. paddw m6, m7 ; r6 = c3*i5
  147. paddw m2, m1 ; r2 = c5*i3
  148. movq m1, J(7)
  149. paddw m7, m5 ; r7 = c5*i5
  150. movq m5, m0 ; r5 = c1
  151. pmulhw m0, m3 ; r0 = c1*i1 - i1
  152. paddsw m4, m7 ; r4 = C = c3*i3 + c5*i5
  153. pmulhw m5, m1 ; r5 = c1*i7 - i7
  154. movq m7, C(7)
  155. psubsw m6, m2 ; r6 = D = c3*i5 - c5*i3
  156. paddw m0, m3 ; r0 = c1*i1
  157. pmulhw m3, m7 ; r3 = c7*i1
  158. movq m2, I(2)
  159. pmulhw m7, m1 ; r7 = c7*i7
  160. paddw m5, m1 ; r5 = c1*i7
  161. movq m1, m2 ; r1 = i2
  162. pmulhw m2, C(2) ; r2 = c2*i2 - i2
  163. psubsw m3, m5 ; r3 = B = c7*i1 - c1*i7
  164. movq m5, J(6)
  165. paddsw m0, m7 ; r0 = A = c1*i1 + c7*i7
  166. movq m7, m5 ; r7 = i6
  167. psubsw m0, m4 ; r0 = A - C
  168. pmulhw m5, C(2) ; r5 = c2*i6 - i6
  169. paddw m2, m1 ; r2 = c2*i2
  170. pmulhw m1, C(6) ; r1 = c6*i2
  171. paddsw m4, m4 ; r4 = C + C
  172. paddsw m4, m0 ; r4 = C. = A + C
  173. psubsw m3, m6 ; r3 = B - D
  174. paddw m5, m7 ; r5 = c2*i6
  175. paddsw m6, m6 ; r6 = D + D
  176. pmulhw m7, C(6) ; r7 = c6*i6
  177. paddsw m6, m3 ; r6 = D. = B + D
  178. movq I(1), m4 ; save C. at I(1)
  179. psubsw m1, m5 ; r1 = H = c6*i2 - c2*i6
  180. movq m4, C(4)
  181. movq m5, m3 ; r5 = B - D
  182. pmulhw m3, m4 ; r3 = (c4 - 1) * (B - D)
  183. paddsw m7, m2 ; r3 = (c4 - 1) * (B - D)
  184. movq I(2), m6 ; save D. at I(2)
  185. movq m2, m0 ; r2 = A - C
  186. movq m6, I(0)
  187. pmulhw m0, m4 ; r0 = (c4 - 1) * (A - C)
  188. paddw m5, m3 ; r5 = B. = c4 * (B - D)
  189. movq m3, J(4)
  190. psubsw m5, m1 ; r5 = B.. = B. - H
  191. paddw m2, m0 ; r0 = A. = c4 * (A - C)
  192. psubsw m6, m3 ; r6 = i0 - i4
  193. movq m0, m6
  194. pmulhw m6, m4 ; r6 = (c4 - 1) * (i0 - i4)
  195. paddsw m3, m3 ; r3 = i4 + i4
  196. paddsw m1, m1 ; r1 = H + H
  197. paddsw m3, m0 ; r3 = i0 + i4
  198. paddsw m1, m5 ; r1 = H. = B + H
  199. pmulhw m4, m3 ; r4 = (c4 - 1) * (i0 + i4)
  200. paddsw m6, m0 ; r6 = F = c4 * (i0 - i4)
  201. psubsw m6, m2 ; r6 = F. = F - A.
  202. paddsw m2, m2 ; r2 = A. + A.
  203. movq m0, I(1) ; r0 = C.
  204. paddsw m2, m6 ; r2 = A.. = F + A.
  205. paddw m4, m3 ; r4 = E = c4 * (i0 + i4)
  206. psubsw m2, m1 ; r2 = R2 = A.. - H.
  207. %endmacro
  208. ; RowIDCT gets ready to transpose
  209. %macro RowIDCT 0
  210. BeginIDCT
  211. movq m3, I(2) ; r3 = D.
  212. psubsw m4, m7 ; r4 = E. = E - G
  213. paddsw m1, m1 ; r1 = H. + H.
  214. paddsw m7, m7 ; r7 = G + G
  215. paddsw m1, m2 ; r1 = R1 = A.. + H.
  216. paddsw m7, m4 ; r1 = R1 = A.. + H.
  217. psubsw m4, m3 ; r4 = R4 = E. - D.
  218. paddsw m3, m3
  219. psubsw m6, m5 ; r6 = R6 = F. - B..
  220. paddsw m5, m5
  221. paddsw m3, m4 ; r3 = R3 = E. + D.
  222. paddsw m5, m6 ; r5 = R5 = F. + B..
  223. psubsw m7, m0 ; r7 = R7 = G. - C.
  224. paddsw m0, m0
  225. movq I(1), m1 ; save R1
  226. paddsw m0, m7 ; r0 = R0 = G. + C.
  227. %endmacro
  228. ; Column IDCT normalizes and stores final results
  229. %macro ColumnIDCT 0
  230. BeginIDCT
  231. paddsw m2, OC_8 ; adjust R2 (and R1) for shift
  232. paddsw m1, m1 ; r1 = H. + H.
  233. paddsw m1, m2 ; r1 = R1 = A.. + H.
  234. psraw m2, 4 ; r2 = NR2
  235. psubsw m4, m7 ; r4 = E. = E - G
  236. psraw m1, 4 ; r1 = NR2
  237. movq m3, I(2) ; r3 = D.
  238. paddsw m7, m7 ; r7 = G + G
  239. movq I(2), m2 ; store NR2 at I2
  240. paddsw m7, m4 ; r7 = G. = E + G
  241. movq I(1), m1 ; store NR1 at I1
  242. psubsw m4, m3 ; r4 = R4 = E. - D.
  243. paddsw m4, OC_8 ; adjust R4 (and R3) for shift
  244. paddsw m3, m3 ; r3 = D. + D.
  245. paddsw m3, m4 ; r3 = R3 = E. + D.
  246. psraw m4, 4 ; r4 = NR4
  247. psubsw m6, m5 ; r6 = R6 = F. - B..
  248. psraw m3, 4 ; r3 = NR3
  249. paddsw m6, OC_8 ; adjust R6 (and R5) for shift
  250. paddsw m5, m5 ; r5 = B.. + B..
  251. paddsw m5, m6 ; r5 = R5 = F. + B..
  252. psraw m6, 4 ; r6 = NR6
  253. movq J(4), m4 ; store NR4 at J4
  254. psraw m5, 4 ; r5 = NR5
  255. movq I(3), m3 ; store NR3 at I3
  256. psubsw m7, m0 ; r7 = R7 = G. - C.
  257. paddsw m7, OC_8 ; adjust R7 (and R0) for shift
  258. paddsw m0, m0 ; r0 = C. + C.
  259. paddsw m0, m7 ; r0 = R0 = G. + C.
  260. psraw m7, 4 ; r7 = NR7
  261. movq J(6), m6 ; store NR6 at J6
  262. psraw m0, 4 ; r0 = NR0
  263. movq J(5), m5 ; store NR5 at J5
  264. movq J(7), m7 ; store NR7 at J7
  265. movq I(0), m0 ; store NR0 at I0
  266. %endmacro
  267. ; Following macro does two 4x4 transposes in place.
  268. ;
  269. ; At entry (we assume):
  270. ;
  271. ; r0 = a3 a2 a1 a0
  272. ; I(1) = b3 b2 b1 b0
  273. ; r2 = c3 c2 c1 c0
  274. ; r3 = d3 d2 d1 d0
  275. ;
  276. ; r4 = e3 e2 e1 e0
  277. ; r5 = f3 f2 f1 f0
  278. ; r6 = g3 g2 g1 g0
  279. ; r7 = h3 h2 h1 h0
  280. ;
  281. ; At exit, we have:
  282. ;
  283. ; I(0) = d0 c0 b0 a0
  284. ; I(1) = d1 c1 b1 a1
  285. ; I(2) = d2 c2 b2 a2
  286. ; I(3) = d3 c3 b3 a3
  287. ;
  288. ; J(4) = h0 g0 f0 e0
  289. ; J(5) = h1 g1 f1 e1
  290. ; J(6) = h2 g2 f2 e2
  291. ; J(7) = h3 g3 f3 e3
  292. ;
  293. ; I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3.
  294. ; J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7.
  295. ;
  296. ; Since r1 is free at entry, we calculate the Js first.
  297. %macro Transpose 0
  298. movq m1, m4 ; r1 = e3 e2 e1 e0
  299. punpcklwd m4, m5 ; r4 = f1 e1 f0 e0
  300. movq I(0), m0 ; save a3 a2 a1 a0
  301. punpckhwd m1, m5 ; r1 = f3 e3 f2 e2
  302. movq m0, m6 ; r0 = g3 g2 g1 g0
  303. punpcklwd m6, m7 ; r6 = h1 g1 h0 g0
  304. movq m5, m4 ; r5 = f1 e1 f0 e0
  305. punpckldq m4, m6 ; r4 = h0 g0 f0 e0 = R4
  306. punpckhdq m5, m6 ; r5 = h1 g1 f1 e1 = R5
  307. movq m6, m1 ; r6 = f3 e3 f2 e2
  308. movq J(4), m4
  309. punpckhwd m0, m7 ; r0 = h3 g3 h2 g2
  310. movq J(5), m5
  311. punpckhdq m6, m0 ; r6 = h3 g3 f3 e3 = R7
  312. movq m4, I(0) ; r4 = a3 a2 a1 a0
  313. punpckldq m1, m0 ; r1 = h2 g2 f2 e2 = R6
  314. movq m5, I(1) ; r5 = b3 b2 b1 b0
  315. movq m0, m4 ; r0 = a3 a2 a1 a0
  316. movq J(7), m6
  317. punpcklwd m0, m5 ; r0 = b1 a1 b0 a0
  318. movq J(6), m1
  319. punpckhwd m4, m5 ; r4 = b3 a3 b2 a2
  320. movq m5, m2 ; r5 = c3 c2 c1 c0
  321. punpcklwd m2, m3 ; r2 = d1 c1 d0 c0
  322. movq m1, m0 ; r1 = b1 a1 b0 a0
  323. punpckldq m0, m2 ; r0 = d0 c0 b0 a0 = R0
  324. punpckhdq m1, m2 ; r1 = d1 c1 b1 a1 = R1
  325. movq m2, m4 ; r2 = b3 a3 b2 a2
  326. movq I(0), m0
  327. punpckhwd m5, m3 ; r5 = d3 c3 d2 c2
  328. movq I(1), m1
  329. punpckhdq m4, m5 ; r4 = d3 c3 b3 a3 = R3
  330. punpckldq m2, m5 ; r2 = d2 c2 b2 a2 = R2
  331. movq I(3), m4
  332. movq I(2), m2
  333. %endmacro
  334. %macro VP3_1D_IDCT_SSE2 0
  335. movdqa m2, I(3) ; xmm2 = i3
  336. movdqa m6, C(3) ; xmm6 = c3
  337. movdqa m4, m2 ; xmm4 = i3
  338. movdqa m7, I(5) ; xmm7 = i5
  339. pmulhw m4, m6 ; xmm4 = c3 * i3 - i3
  340. movdqa m1, C(5) ; xmm1 = c5
  341. pmulhw m6, m7 ; xmm6 = c3 * i5 - i5
  342. movdqa m5, m1 ; xmm5 = c5
  343. pmulhw m1, m2 ; xmm1 = c5 * i3 - i3
  344. movdqa m3, I(1) ; xmm3 = i1
  345. pmulhw m5, m7 ; xmm5 = c5 * i5 - i5
  346. movdqa m0, C(1) ; xmm0 = c1
  347. paddw m4, m2 ; xmm4 = c3 * i3
  348. paddw m6, m7 ; xmm6 = c3 * i5
  349. paddw m2, m1 ; xmm2 = c5 * i3
  350. movdqa m1, I(7) ; xmm1 = i7
  351. paddw m7, m5 ; xmm7 = c5 * i5
  352. movdqa m5, m0 ; xmm5 = c1
  353. pmulhw m0, m3 ; xmm0 = c1 * i1 - i1
  354. paddsw m4, m7 ; xmm4 = c3 * i3 + c5 * i5 = C
  355. pmulhw m5, m1 ; xmm5 = c1 * i7 - i7
  356. movdqa m7, C(7) ; xmm7 = c7
  357. psubsw m6, m2 ; xmm6 = c3 * i5 - c5 * i3 = D
  358. paddw m0, m3 ; xmm0 = c1 * i1
  359. pmulhw m3, m7 ; xmm3 = c7 * i1
  360. movdqa m2, I(2) ; xmm2 = i2
  361. pmulhw m7, m1 ; xmm7 = c7 * i7
  362. paddw m5, m1 ; xmm5 = c1 * i7
  363. movdqa m1, m2 ; xmm1 = i2
  364. pmulhw m2, C(2) ; xmm2 = i2 * c2 -i2
  365. psubsw m3, m5 ; xmm3 = c7 * i1 - c1 * i7 = B
  366. movdqa m5, I(6) ; xmm5 = i6
  367. paddsw m0, m7 ; xmm0 = c1 * i1 + c7 * i7 = A
  368. movdqa m7, m5 ; xmm7 = i6
  369. psubsw m0, m4 ; xmm0 = A - C
  370. pmulhw m5, C(2) ; xmm5 = c2 * i6 - i6
  371. paddw m2, m1 ; xmm2 = i2 * c2
  372. pmulhw m1, C(6) ; xmm1 = c6 * i2
  373. paddsw m4, m4 ; xmm4 = C + C
  374. paddsw m4, m0 ; xmm4 = A + C = C.
  375. psubsw m3, m6 ; xmm3 = B - D
  376. paddw m5, m7 ; xmm5 = c2 * i6
  377. paddsw m6, m6 ; xmm6 = D + D
  378. pmulhw m7, C(6) ; xmm7 = c6 * i6
  379. paddsw m6, m3 ; xmm6 = B + D = D.
  380. movdqa I(1), m4 ; Save C. at I(1)
  381. psubsw m1, m5 ; xmm1 = c6 * i2 - c2 * i6 = H
  382. movdqa m4, C(4) ; xmm4 = C4
  383. movdqa m5, m3 ; xmm5 = B - D
  384. pmulhw m3, m4 ; xmm3 = ( c4 -1 ) * ( B - D )
  385. paddsw m7, m2 ; xmm7 = c2 * i2 + c6 * i6 = G
  386. movdqa I(2), m6 ; save D. at I(2)
  387. movdqa m2, m0 ; xmm2 = A - C
  388. movdqa m6, I(0) ; xmm6 = i0
  389. pmulhw m0, m4 ; xmm0 = ( c4 - 1 ) * ( A - C ) = A.
  390. paddw m5, m3 ; xmm5 = c4 * ( B - D ) = B.
  391. movdqa m3, I(4) ; xmm3 = i4
  392. psubsw m5, m1 ; xmm5 = B. - H = B..
  393. paddw m2, m0 ; xmm2 = c4 * ( A - C) = A.
  394. psubsw m6, m3 ; xmm6 = i0 - i4
  395. movdqa m0, m6 ; xmm0 = i0 - i4
  396. pmulhw m6, m4 ; xmm6 = (c4 - 1) * (i0 - i4) = F
  397. paddsw m3, m3 ; xmm3 = i4 + i4
  398. paddsw m1, m1 ; xmm1 = H + H
  399. paddsw m3, m0 ; xmm3 = i0 + i4
  400. paddsw m1, m5 ; xmm1 = B. + H = H.
  401. pmulhw m4, m3 ; xmm4 = ( c4 - 1 ) * ( i0 + i4 )
  402. paddw m6, m0 ; xmm6 = c4 * ( i0 - i4 )
  403. psubsw m6, m2 ; xmm6 = F - A. = F.
  404. paddsw m2, m2 ; xmm2 = A. + A.
  405. movdqa m0, I(1) ; Load C. from I(1)
  406. paddsw m2, m6 ; xmm2 = F + A. = A..
  407. paddw m4, m3 ; xmm4 = c4 * ( i0 + i4 ) = 3
  408. psubsw m2, m1 ; xmm2 = A.. - H. = R2
  409. ADD(m2) ; Adjust R2 and R1 before shifting
  410. paddsw m1, m1 ; xmm1 = H. + H.
  411. paddsw m1, m2 ; xmm1 = A.. + H. = R1
  412. SHIFT(m2) ; xmm2 = op2
  413. psubsw m4, m7 ; xmm4 = E - G = E.
  414. SHIFT(m1) ; xmm1 = op1
  415. movdqa m3, I(2) ; Load D. from I(2)
  416. paddsw m7, m7 ; xmm7 = G + G
  417. paddsw m7, m4 ; xmm7 = E + G = G.
  418. psubsw m4, m3 ; xmm4 = E. - D. = R4
  419. ADD(m4) ; Adjust R4 and R3 before shifting
  420. paddsw m3, m3 ; xmm3 = D. + D.
  421. paddsw m3, m4 ; xmm3 = E. + D. = R3
  422. SHIFT(m4) ; xmm4 = op4
  423. psubsw m6, m5 ; xmm6 = F. - B..= R6
  424. SHIFT(m3) ; xmm3 = op3
  425. ADD(m6) ; Adjust R6 and R5 before shifting
  426. paddsw m5, m5 ; xmm5 = B.. + B..
  427. paddsw m5, m6 ; xmm5 = F. + B.. = R5
  428. SHIFT(m6) ; xmm6 = op6
  429. SHIFT(m5) ; xmm5 = op5
  430. psubsw m7, m0 ; xmm7 = G. - C. = R7
  431. ADD(m7) ; Adjust R7 and R0 before shifting
  432. paddsw m0, m0 ; xmm0 = C. + C.
  433. paddsw m0, m7 ; xmm0 = G. + C.
  434. SHIFT(m7) ; xmm7 = op7
  435. SHIFT(m0) ; xmm0 = op0
  436. %endmacro
  437. %macro PUT_BLOCK 8
  438. movdqa O(0), m%1
  439. movdqa O(1), m%2
  440. movdqa O(2), m%3
  441. movdqa O(3), m%4
  442. movdqa O(4), m%5
  443. movdqa O(5), m%6
  444. movdqa O(6), m%7
  445. movdqa O(7), m%8
  446. %endmacro
  447. %macro VP3_IDCT 1
  448. %if mmsize == 16
  449. %define I(x) [%1+16*x]
  450. %define O(x) [%1+16*x]
  451. %define C(x) [vp3_idct_data+16*(x-1)]
  452. %define SHIFT(x)
  453. %define ADD(x)
  454. VP3_1D_IDCT_SSE2
  455. %if ARCH_X86_64
  456. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
  457. %else
  458. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%1], [%1+16]
  459. %endif
  460. PUT_BLOCK 0, 1, 2, 3, 4, 5, 6, 7
  461. %define SHIFT(x) psraw x, 4
  462. %define ADD(x) paddsw x, [pw_8]
  463. VP3_1D_IDCT_SSE2
  464. PUT_BLOCK 0, 1, 2, 3, 4, 5, 6, 7
  465. %else ; mmsize == 8
  466. ; eax = quantized input
  467. ; ebx = dequantizer matrix
  468. ; ecx = IDCT constants
  469. ; M(I) = ecx + MaskOffset(0) + I * 8
  470. ; C(I) = ecx + CosineOffset(32) + (I-1) * 8
  471. ; edx = output
  472. ; r0..r7 = mm0..mm7
  473. %define OC_8 [pw_8]
  474. %define C(x) [vp3_idct_data+16*(x-1)]
  475. ; at this point, function has completed dequantization + dezigzag +
  476. ; partial transposition; now do the idct itself
  477. %define I(x) [%1+16*x]
  478. %define J(x) [%1+16*x]
  479. RowIDCT
  480. Transpose
  481. %define I(x) [%1+16*x+8]
  482. %define J(x) [%1+16*x+8]
  483. RowIDCT
  484. Transpose
  485. %define I(x) [%1+16* x]
  486. %define J(x) [%1+16*(x-4)+8]
  487. ColumnIDCT
  488. %define I(x) [%1+16* x +64]
  489. %define J(x) [%1+16*(x-4)+72]
  490. ColumnIDCT
  491. %endif ; mmsize == 16/8
  492. %endmacro
  493. %macro vp3_idct_funcs 0
  494. cglobal vp3_idct_put, 3, 4, 9
  495. VP3_IDCT r2
  496. movsxdifnidn r1, r1d
  497. mova m4, [pb_80]
  498. lea r3, [r1*3]
  499. %assign %%i 0
  500. %rep 16/mmsize
  501. mova m0, [r2+mmsize*0+%%i]
  502. mova m1, [r2+mmsize*2+%%i]
  503. mova m2, [r2+mmsize*4+%%i]
  504. mova m3, [r2+mmsize*6+%%i]
  505. %if mmsize == 8
  506. packsswb m0, [r2+mmsize*8+%%i]
  507. packsswb m1, [r2+mmsize*10+%%i]
  508. packsswb m2, [r2+mmsize*12+%%i]
  509. packsswb m3, [r2+mmsize*14+%%i]
  510. %else
  511. packsswb m0, [r2+mmsize*1+%%i]
  512. packsswb m1, [r2+mmsize*3+%%i]
  513. packsswb m2, [r2+mmsize*5+%%i]
  514. packsswb m3, [r2+mmsize*7+%%i]
  515. %endif
  516. paddb m0, m4
  517. paddb m1, m4
  518. paddb m2, m4
  519. paddb m3, m4
  520. movq [r0 ], m0
  521. %if mmsize == 8
  522. movq [r0+r1 ], m1
  523. movq [r0+r1*2], m2
  524. movq [r0+r3 ], m3
  525. %else
  526. movhps [r0+r1 ], m0
  527. movq [r0+r1*2], m1
  528. movhps [r0+r3 ], m1
  529. %endif
  530. %if %%i == 0
  531. lea r0, [r0+r1*4]
  532. %endif
  533. %if mmsize == 16
  534. movq [r0 ], m2
  535. movhps [r0+r1 ], m2
  536. movq [r0+r1*2], m3
  537. movhps [r0+r3 ], m3
  538. %endif
  539. %assign %%i %%i+8
  540. %endrep
  541. pxor m0, m0
  542. %assign %%offset 0
  543. %rep 128/mmsize
  544. mova [r2+%%offset], m0
  545. %assign %%offset %%offset+mmsize
  546. %endrep
  547. RET
  548. cglobal vp3_idct_add, 3, 4, 9
  549. VP3_IDCT r2
  550. movsxdifnidn r1, r1d
  551. lea r3, [r1*3]
  552. pxor m4, m4
  553. %if mmsize == 16
  554. %assign %%i 0
  555. %rep 2
  556. movq m0, [r0]
  557. movq m1, [r0+r1]
  558. movq m2, [r0+r1*2]
  559. movq m3, [r0+r3]
  560. punpcklbw m0, m4
  561. punpcklbw m1, m4
  562. punpcklbw m2, m4
  563. punpcklbw m3, m4
  564. paddsw m0, [r2+ 0+%%i]
  565. paddsw m1, [r2+16+%%i]
  566. paddsw m2, [r2+32+%%i]
  567. paddsw m3, [r2+48+%%i]
  568. packuswb m0, m1
  569. packuswb m2, m3
  570. movq [r0 ], m0
  571. movhps [r0+r1 ], m0
  572. movq [r0+r1*2], m2
  573. movhps [r0+r3 ], m2
  574. %if %%i == 0
  575. lea r0, [r0+r1*4]
  576. %endif
  577. %assign %%i %%i+64
  578. %endrep
  579. %else
  580. %assign %%i 0
  581. %rep 2
  582. movq m0, [r0]
  583. movq m1, [r0+r1]
  584. movq m2, [r0+r1*2]
  585. movq m3, [r0+r3]
  586. movq m5, m0
  587. movq m6, m1
  588. movq m7, m2
  589. punpcklbw m0, m4
  590. punpcklbw m1, m4
  591. punpcklbw m2, m4
  592. punpckhbw m5, m4
  593. punpckhbw m6, m4
  594. punpckhbw m7, m4
  595. paddsw m0, [r2+ 0+%%i]
  596. paddsw m1, [r2+16+%%i]
  597. paddsw m2, [r2+32+%%i]
  598. paddsw m5, [r2+64+%%i]
  599. paddsw m6, [r2+80+%%i]
  600. paddsw m7, [r2+96+%%i]
  601. packuswb m0, m5
  602. movq m5, m3
  603. punpcklbw m3, m4
  604. punpckhbw m5, m4
  605. packuswb m1, m6
  606. paddsw m3, [r2+48+%%i]
  607. paddsw m5, [r2+112+%%i]
  608. packuswb m2, m7
  609. packuswb m3, m5
  610. movq [r0 ], m0
  611. movq [r0+r1 ], m1
  612. movq [r0+r1*2], m2
  613. movq [r0+r3 ], m3
  614. %if %%i == 0
  615. lea r0, [r0+r1*4]
  616. %endif
  617. %assign %%i %%i+8
  618. %endrep
  619. %endif
  620. %assign %%i 0
  621. %rep 128/mmsize
  622. mova [r2+%%i], m4
  623. %assign %%i %%i+mmsize
  624. %endrep
  625. RET
  626. %endmacro
  627. %if ARCH_X86_32
  628. INIT_MMX mmx
  629. vp3_idct_funcs
  630. %endif
  631. INIT_XMM sse2
  632. vp3_idct_funcs
  633. %macro DC_ADD 0
  634. movq m2, [r0 ]
  635. movq m3, [r0+r1 ]
  636. paddusb m2, m0
  637. movq m4, [r0+r1*2]
  638. paddusb m3, m0
  639. movq m5, [r0+r2 ]
  640. paddusb m4, m0
  641. paddusb m5, m0
  642. psubusb m2, m1
  643. psubusb m3, m1
  644. movq [r0 ], m2
  645. psubusb m4, m1
  646. movq [r0+r1 ], m3
  647. psubusb m5, m1
  648. movq [r0+r1*2], m4
  649. movq [r0+r2 ], m5
  650. %endmacro
  651. INIT_MMX mmxext
  652. cglobal vp3_idct_dc_add, 3, 4
  653. %if ARCH_X86_64
  654. movsxd r1, r1d
  655. %endif
  656. movsx r3, word [r2]
  657. mov word [r2], 0
  658. lea r2, [r1*3]
  659. add r3, 15
  660. sar r3, 5
  661. movd m0, r3d
  662. pshufw m0, m0, 0x0
  663. pxor m1, m1
  664. psubw m1, m0
  665. packuswb m0, m0
  666. packuswb m1, m1
  667. DC_ADD
  668. lea r0, [r0+r1*4]
  669. DC_ADD
  670. RET