You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

619 lines
21KB

  1. ;******************************************************************************
  2. ;* MMX/SSE2-optimized functions for the VP3 decoder
  3. ;* Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org>
  4. ;*
  5. ;* This file is part of Libav.
  6. ;*
  7. ;* Libav is free software; you can redistribute it and/or
  8. ;* modify it under the terms of the GNU Lesser General Public
  9. ;* License as published by the Free Software Foundation; either
  10. ;* version 2.1 of the License, or (at your option) any later version.
  11. ;*
  12. ;* Libav is distributed in the hope that it will be useful,
  13. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. ;* Lesser General Public License for more details.
  16. ;*
  17. ;* You should have received a copy of the GNU Lesser General Public
  18. ;* License along with Libav; if not, write to the Free Software
  19. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. ;******************************************************************************
  21. %include "x86inc.asm"
  22. %include "x86util.asm"
  23. ; MMX-optimized functions cribbed from the original VP3 source code.
  24. SECTION_RODATA
  25. vp3_idct_data: times 8 dw 64277
  26. times 8 dw 60547
  27. times 8 dw 54491
  28. times 8 dw 46341
  29. times 8 dw 36410
  30. times 8 dw 25080
  31. times 8 dw 12785
  32. cextern pb_1
  33. cextern pb_3
  34. cextern pb_7
  35. cextern pb_1F
  36. cextern pb_81
  37. cextern pw_8
  38. cextern put_signed_pixels_clamped_mmx
  39. cextern add_pixels_clamped_mmx
  40. SECTION .text
  41. ; this is off by one or two for some cases when filter_limit is greater than 63
  42. ; in: p0 in mm6, p1 in mm4, p2 in mm2, p3 in mm1
  43. ; out: p1 in mm4, p2 in mm3
  44. %macro VP3_LOOP_FILTER 0
  45. movq m7, m6
  46. pand m6, [pb_7] ; p0&7
  47. psrlw m7, 3
  48. pand m7, [pb_1F] ; p0>>3
  49. movq m3, m2 ; p2
  50. pxor m2, m4
  51. pand m2, [pb_1] ; (p2^p1)&1
  52. movq m5, m2
  53. paddb m2, m2
  54. paddb m2, m5 ; 3*(p2^p1)&1
  55. paddb m2, m6 ; extra bits lost in shifts
  56. pcmpeqb m0, m0
  57. pxor m1, m0 ; 255 - p3
  58. pavgb m1, m2 ; (256 - p3 + extrabits) >> 1
  59. pxor m0, m4 ; 255 - p1
  60. pavgb m0, m3 ; (256 + p2-p1) >> 1
  61. paddb m1, [pb_3]
  62. pavgb m1, m0 ; 128+2+( p2-p1 - p3) >> 2
  63. pavgb m1, m0 ; 128+1+(3*(p2-p1) - p3) >> 3
  64. paddusb m7, m1 ; d+128+1
  65. movq m6, [pb_81]
  66. psubusb m6, m7
  67. psubusb m7, [pb_81]
  68. movq m5, [r2+516] ; flim
  69. pminub m6, m5
  70. pminub m7, m5
  71. movq m0, m6
  72. movq m1, m7
  73. paddb m6, m6
  74. paddb m7, m7
  75. pminub m6, m5
  76. pminub m7, m5
  77. psubb m6, m0
  78. psubb m7, m1
  79. paddusb m4, m7
  80. psubusb m4, m6
  81. psubusb m3, m7
  82. paddusb m3, m6
  83. %endmacro
  84. %macro STORE_4_WORDS 1
  85. movd r2d, %1
  86. mov [r0 -1], r2w
  87. psrlq %1, 32
  88. shr r2, 16
  89. mov [r0+r1 -1], r2w
  90. movd r2d, %1
  91. mov [r0+r1*2-1], r2w
  92. shr r2, 16
  93. mov [r0+r3 -1], r2w
  94. %endmacro
  95. INIT_MMX
  96. cglobal vp3_v_loop_filter_mmx2, 3, 4
  97. %if ARCH_X86_64
  98. movsxd r1, r1d
  99. %endif
  100. mov r3, r1
  101. neg r1
  102. movq m6, [r0+r1*2]
  103. movq m4, [r0+r1 ]
  104. movq m2, [r0 ]
  105. movq m1, [r0+r3 ]
  106. VP3_LOOP_FILTER
  107. movq [r0+r1], m4
  108. movq [r0 ], m3
  109. RET
  110. cglobal vp3_h_loop_filter_mmx2, 3, 4
  111. %if ARCH_X86_64
  112. movsxd r1, r1d
  113. %endif
  114. lea r3, [r1*3]
  115. movd m6, [r0 -2]
  116. movd m4, [r0+r1 -2]
  117. movd m2, [r0+r1*2-2]
  118. movd m1, [r0+r3 -2]
  119. lea r0, [r0+r1*4 ]
  120. punpcklbw m6, [r0 -2]
  121. punpcklbw m4, [r0+r1 -2]
  122. punpcklbw m2, [r0+r1*2-2]
  123. punpcklbw m1, [r0+r3 -2]
  124. sub r0, r3
  125. sub r0, r1
  126. TRANSPOSE4x4B 6, 4, 2, 1, 0
  127. VP3_LOOP_FILTER
  128. SBUTTERFLY bw, 4, 3, 5
  129. STORE_4_WORDS m4
  130. lea r0, [r0+r1*4 ]
  131. STORE_4_WORDS m3
  132. RET
  133. ; from original comments: The Macro does IDct on 4 1-D Dcts
  134. %macro BeginIDCT 0
  135. movq m2, I(3)
  136. movq m6, C(3)
  137. movq m4, m2
  138. movq m7, J(5)
  139. pmulhw m4, m6 ; r4 = c3*i3 - i3
  140. movq m1, C(5)
  141. pmulhw m6, m7 ; r6 = c3*i5 - i5
  142. movq m5, m1
  143. pmulhw m1, m2 ; r1 = c5*i3 - i3
  144. movq m3, I(1)
  145. pmulhw m5, m7 ; r5 = c5*i5 - i5
  146. movq m0, C(1)
  147. paddw m4, m2 ; r4 = c3*i3
  148. paddw m6, m7 ; r6 = c3*i5
  149. paddw m2, m1 ; r2 = c5*i3
  150. movq m1, J(7)
  151. paddw m7, m5 ; r7 = c5*i5
  152. movq m5, m0 ; r5 = c1
  153. pmulhw m0, m3 ; r0 = c1*i1 - i1
  154. paddsw m4, m7 ; r4 = C = c3*i3 + c5*i5
  155. pmulhw m5, m1 ; r5 = c1*i7 - i7
  156. movq m7, C(7)
  157. psubsw m6, m2 ; r6 = D = c3*i5 - c5*i3
  158. paddw m0, m3 ; r0 = c1*i1
  159. pmulhw m3, m7 ; r3 = c7*i1
  160. movq m2, I(2)
  161. pmulhw m7, m1 ; r7 = c7*i7
  162. paddw m5, m1 ; r5 = c1*i7
  163. movq m1, m2 ; r1 = i2
  164. pmulhw m2, C(2) ; r2 = c2*i2 - i2
  165. psubsw m3, m5 ; r3 = B = c7*i1 - c1*i7
  166. movq m5, J(6)
  167. paddsw m0, m7 ; r0 = A = c1*i1 + c7*i7
  168. movq m7, m5 ; r7 = i6
  169. psubsw m0, m4 ; r0 = A - C
  170. pmulhw m5, C(2) ; r5 = c2*i6 - i6
  171. paddw m2, m1 ; r2 = c2*i2
  172. pmulhw m1, C(6) ; r1 = c6*i2
  173. paddsw m4, m4 ; r4 = C + C
  174. paddsw m4, m0 ; r4 = C. = A + C
  175. psubsw m3, m6 ; r3 = B - D
  176. paddw m5, m7 ; r5 = c2*i6
  177. paddsw m6, m6 ; r6 = D + D
  178. pmulhw m7, C(6) ; r7 = c6*i6
  179. paddsw m6, m3 ; r6 = D. = B + D
  180. movq I(1), m4 ; save C. at I(1)
  181. psubsw m1, m5 ; r1 = H = c6*i2 - c2*i6
  182. movq m4, C(4)
  183. movq m5, m3 ; r5 = B - D
  184. pmulhw m3, m4 ; r3 = (c4 - 1) * (B - D)
  185. paddsw m7, m2 ; r3 = (c4 - 1) * (B - D)
  186. movq I(2), m6 ; save D. at I(2)
  187. movq m2, m0 ; r2 = A - C
  188. movq m6, I(0)
  189. pmulhw m0, m4 ; r0 = (c4 - 1) * (A - C)
  190. paddw m5, m3 ; r5 = B. = c4 * (B - D)
  191. movq m3, J(4)
  192. psubsw m5, m1 ; r5 = B.. = B. - H
  193. paddw m2, m0 ; r0 = A. = c4 * (A - C)
  194. psubsw m6, m3 ; r6 = i0 - i4
  195. movq m0, m6
  196. pmulhw m6, m4 ; r6 = (c4 - 1) * (i0 - i4)
  197. paddsw m3, m3 ; r3 = i4 + i4
  198. paddsw m1, m1 ; r1 = H + H
  199. paddsw m3, m0 ; r3 = i0 + i4
  200. paddsw m1, m5 ; r1 = H. = B + H
  201. pmulhw m4, m3 ; r4 = (c4 - 1) * (i0 + i4)
  202. paddsw m6, m0 ; r6 = F = c4 * (i0 - i4)
  203. psubsw m6, m2 ; r6 = F. = F - A.
  204. paddsw m2, m2 ; r2 = A. + A.
  205. movq m0, I(1) ; r0 = C.
  206. paddsw m2, m6 ; r2 = A.. = F + A.
  207. paddw m4, m3 ; r4 = E = c4 * (i0 + i4)
  208. psubsw m2, m1 ; r2 = R2 = A.. - H.
  209. %endmacro
  210. ; RowIDCT gets ready to transpose
  211. %macro RowIDCT 0
  212. BeginIDCT
  213. movq m3, I(2) ; r3 = D.
  214. psubsw m4, m7 ; r4 = E. = E - G
  215. paddsw m1, m1 ; r1 = H. + H.
  216. paddsw m7, m7 ; r7 = G + G
  217. paddsw m1, m2 ; r1 = R1 = A.. + H.
  218. paddsw m7, m4 ; r1 = R1 = A.. + H.
  219. psubsw m4, m3 ; r4 = R4 = E. - D.
  220. paddsw m3, m3
  221. psubsw m6, m5 ; r6 = R6 = F. - B..
  222. paddsw m5, m5
  223. paddsw m3, m4 ; r3 = R3 = E. + D.
  224. paddsw m5, m6 ; r5 = R5 = F. + B..
  225. psubsw m7, m0 ; r7 = R7 = G. - C.
  226. paddsw m0, m0
  227. movq I(1), m1 ; save R1
  228. paddsw m0, m7 ; r0 = R0 = G. + C.
  229. %endmacro
  230. ; Column IDCT normalizes and stores final results
  231. %macro ColumnIDCT 0
  232. BeginIDCT
  233. paddsw m2, OC_8 ; adjust R2 (and R1) for shift
  234. paddsw m1, m1 ; r1 = H. + H.
  235. paddsw m1, m2 ; r1 = R1 = A.. + H.
  236. psraw m2, 4 ; r2 = NR2
  237. psubsw m4, m7 ; r4 = E. = E - G
  238. psraw m1, 4 ; r1 = NR2
  239. movq m3, I(2) ; r3 = D.
  240. paddsw m7, m7 ; r7 = G + G
  241. movq I(2), m2 ; store NR2 at I2
  242. paddsw m7, m4 ; r7 = G. = E + G
  243. movq I(1), m1 ; store NR1 at I1
  244. psubsw m4, m3 ; r4 = R4 = E. - D.
  245. paddsw m4, OC_8 ; adjust R4 (and R3) for shift
  246. paddsw m3, m3 ; r3 = D. + D.
  247. paddsw m3, m4 ; r3 = R3 = E. + D.
  248. psraw m4, 4 ; r4 = NR4
  249. psubsw m6, m5 ; r6 = R6 = F. - B..
  250. psraw m3, 4 ; r3 = NR3
  251. paddsw m6, OC_8 ; adjust R6 (and R5) for shift
  252. paddsw m5, m5 ; r5 = B.. + B..
  253. paddsw m5, m6 ; r5 = R5 = F. + B..
  254. psraw m6, 4 ; r6 = NR6
  255. movq J(4), m4 ; store NR4 at J4
  256. psraw m5, 4 ; r5 = NR5
  257. movq I(3), m3 ; store NR3 at I3
  258. psubsw m7, m0 ; r7 = R7 = G. - C.
  259. paddsw m7, OC_8 ; adjust R7 (and R0) for shift
  260. paddsw m0, m0 ; r0 = C. + C.
  261. paddsw m0, m7 ; r0 = R0 = G. + C.
  262. psraw m7, 4 ; r7 = NR7
  263. movq J(6), m6 ; store NR6 at J6
  264. psraw m0, 4 ; r0 = NR0
  265. movq J(5), m5 ; store NR5 at J5
  266. movq J(7), m7 ; store NR7 at J7
  267. movq I(0), m0 ; store NR0 at I0
  268. %endmacro
  269. ; Following macro does two 4x4 transposes in place.
  270. ;
  271. ; At entry (we assume):
  272. ;
  273. ; r0 = a3 a2 a1 a0
  274. ; I(1) = b3 b2 b1 b0
  275. ; r2 = c3 c2 c1 c0
  276. ; r3 = d3 d2 d1 d0
  277. ;
  278. ; r4 = e3 e2 e1 e0
  279. ; r5 = f3 f2 f1 f0
  280. ; r6 = g3 g2 g1 g0
  281. ; r7 = h3 h2 h1 h0
  282. ;
  283. ; At exit, we have:
  284. ;
  285. ; I(0) = d0 c0 b0 a0
  286. ; I(1) = d1 c1 b1 a1
  287. ; I(2) = d2 c2 b2 a2
  288. ; I(3) = d3 c3 b3 a3
  289. ;
  290. ; J(4) = h0 g0 f0 e0
  291. ; J(5) = h1 g1 f1 e1
  292. ; J(6) = h2 g2 f2 e2
  293. ; J(7) = h3 g3 f3 e3
  294. ;
  295. ; I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3.
  296. ; J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7.
  297. ;
  298. ; Since r1 is free at entry, we calculate the Js first.
  299. %macro Transpose 0
  300. movq m1, m4 ; r1 = e3 e2 e1 e0
  301. punpcklwd m4, m5 ; r4 = f1 e1 f0 e0
  302. movq I(0), m0 ; save a3 a2 a1 a0
  303. punpckhwd m1, m5 ; r1 = f3 e3 f2 e2
  304. movq m0, m6 ; r0 = g3 g2 g1 g0
  305. punpcklwd m6, m7 ; r6 = h1 g1 h0 g0
  306. movq m5, m4 ; r5 = f1 e1 f0 e0
  307. punpckldq m4, m6 ; r4 = h0 g0 f0 e0 = R4
  308. punpckhdq m5, m6 ; r5 = h1 g1 f1 e1 = R5
  309. movq m6, m1 ; r6 = f3 e3 f2 e2
  310. movq J(4), m4
  311. punpckhwd m0, m7 ; r0 = h3 g3 h2 g2
  312. movq J(5), m5
  313. punpckhdq m6, m0 ; r6 = h3 g3 f3 e3 = R7
  314. movq m4, I(0) ; r4 = a3 a2 a1 a0
  315. punpckldq m1, m0 ; r1 = h2 g2 f2 e2 = R6
  316. movq m5, I(1) ; r5 = b3 b2 b1 b0
  317. movq m0, m4 ; r0 = a3 a2 a1 a0
  318. movq J(7), m6
  319. punpcklwd m0, m5 ; r0 = b1 a1 b0 a0
  320. movq J(6), m1
  321. punpckhwd m4, m5 ; r4 = b3 a3 b2 a2
  322. movq m5, m2 ; r5 = c3 c2 c1 c0
  323. punpcklwd m2, m3 ; r2 = d1 c1 d0 c0
  324. movq m1, m0 ; r1 = b1 a1 b0 a0
  325. punpckldq m0, m2 ; r0 = d0 c0 b0 a0 = R0
  326. punpckhdq m1, m2 ; r1 = d1 c1 b1 a1 = R1
  327. movq m2, m4 ; r2 = b3 a3 b2 a2
  328. movq I(0), m0
  329. punpckhwd m5, m3 ; r5 = d3 c3 d2 c2
  330. movq I(1), m1
  331. punpckhdq m4, m5 ; r4 = d3 c3 b3 a3 = R3
  332. punpckldq m2, m5 ; r2 = d2 c2 b2 a2 = R2
  333. movq I(3), m4
  334. movq I(2), m2
  335. %endmacro
  336. %macro VP3_IDCT_mmx 1
  337. ; eax = quantized input
  338. ; ebx = dequantizer matrix
  339. ; ecx = IDCT constants
  340. ; M(I) = ecx + MaskOffset(0) + I * 8
  341. ; C(I) = ecx + CosineOffset(32) + (I-1) * 8
  342. ; edx = output
  343. ; r0..r7 = mm0..mm7
  344. %define OC_8 [pw_8]
  345. %define C(x) [vp3_idct_data+16*(x-1)]
  346. ; at this point, function has completed dequantization + dezigzag +
  347. ; partial transposition; now do the idct itself
  348. %define I(x) [%1+16* x ]
  349. %define J(x) [%1+16*(x-4)+8]
  350. RowIDCT
  351. Transpose
  352. %define I(x) [%1+16* x +64]
  353. %define J(x) [%1+16*(x-4)+72]
  354. RowIDCT
  355. Transpose
  356. %define I(x) [%1+16*x]
  357. %define J(x) [%1+16*x]
  358. ColumnIDCT
  359. %define I(x) [%1+16*x+8]
  360. %define J(x) [%1+16*x+8]
  361. ColumnIDCT
  362. %endmacro
  363. %macro VP3_1D_IDCT_SSE2 0
  364. movdqa m2, I(3) ; xmm2 = i3
  365. movdqa m6, C(3) ; xmm6 = c3
  366. movdqa m4, m2 ; xmm4 = i3
  367. movdqa m7, I(5) ; xmm7 = i5
  368. pmulhw m4, m6 ; xmm4 = c3 * i3 - i3
  369. movdqa m1, C(5) ; xmm1 = c5
  370. pmulhw m6, m7 ; xmm6 = c3 * i5 - i5
  371. movdqa m5, m1 ; xmm5 = c5
  372. pmulhw m1, m2 ; xmm1 = c5 * i3 - i3
  373. movdqa m3, I(1) ; xmm3 = i1
  374. pmulhw m5, m7 ; xmm5 = c5 * i5 - i5
  375. movdqa m0, C(1) ; xmm0 = c1
  376. paddw m4, m2 ; xmm4 = c3 * i3
  377. paddw m6, m7 ; xmm6 = c3 * i5
  378. paddw m2, m1 ; xmm2 = c5 * i3
  379. movdqa m1, I(7) ; xmm1 = i7
  380. paddw m7, m5 ; xmm7 = c5 * i5
  381. movdqa m5, m0 ; xmm5 = c1
  382. pmulhw m0, m3 ; xmm0 = c1 * i1 - i1
  383. paddsw m4, m7 ; xmm4 = c3 * i3 + c5 * i5 = C
  384. pmulhw m5, m1 ; xmm5 = c1 * i7 - i7
  385. movdqa m7, C(7) ; xmm7 = c7
  386. psubsw m6, m2 ; xmm6 = c3 * i5 - c5 * i3 = D
  387. paddw m0, m3 ; xmm0 = c1 * i1
  388. pmulhw m3, m7 ; xmm3 = c7 * i1
  389. movdqa m2, I(2) ; xmm2 = i2
  390. pmulhw m7, m1 ; xmm7 = c7 * i7
  391. paddw m5, m1 ; xmm5 = c1 * i7
  392. movdqa m1, m2 ; xmm1 = i2
  393. pmulhw m2, C(2) ; xmm2 = i2 * c2 -i2
  394. psubsw m3, m5 ; xmm3 = c7 * i1 - c1 * i7 = B
  395. movdqa m5, I(6) ; xmm5 = i6
  396. paddsw m0, m7 ; xmm0 = c1 * i1 + c7 * i7 = A
  397. movdqa m7, m5 ; xmm7 = i6
  398. psubsw m0, m4 ; xmm0 = A - C
  399. pmulhw m5, C(2) ; xmm5 = c2 * i6 - i6
  400. paddw m2, m1 ; xmm2 = i2 * c2
  401. pmulhw m1, C(6) ; xmm1 = c6 * i2
  402. paddsw m4, m4 ; xmm4 = C + C
  403. paddsw m4, m0 ; xmm4 = A + C = C.
  404. psubsw m3, m6 ; xmm3 = B - D
  405. paddw m5, m7 ; xmm5 = c2 * i6
  406. paddsw m6, m6 ; xmm6 = D + D
  407. pmulhw m7, C(6) ; xmm7 = c6 * i6
  408. paddsw m6, m3 ; xmm6 = B + D = D.
  409. movdqa I(1), m4 ; Save C. at I(1)
  410. psubsw m1, m5 ; xmm1 = c6 * i2 - c2 * i6 = H
  411. movdqa m4, C(4) ; xmm4 = C4
  412. movdqa m5, m3 ; xmm5 = B - D
  413. pmulhw m3, m4 ; xmm3 = ( c4 -1 ) * ( B - D )
  414. paddsw m7, m2 ; xmm7 = c2 * i2 + c6 * i6 = G
  415. movdqa I(2), m6 ; save D. at I(2)
  416. movdqa m2, m0 ; xmm2 = A - C
  417. movdqa m6, I(0) ; xmm6 = i0
  418. pmulhw m0, m4 ; xmm0 = ( c4 - 1 ) * ( A - C ) = A.
  419. paddw m5, m3 ; xmm5 = c4 * ( B - D ) = B.
  420. movdqa m3, I(4) ; xmm3 = i4
  421. psubsw m5, m1 ; xmm5 = B. - H = B..
  422. paddw m2, m0 ; xmm2 = c4 * ( A - C) = A.
  423. psubsw m6, m3 ; xmm6 = i0 - i4
  424. movdqa m0, m6 ; xmm0 = i0 - i4
  425. pmulhw m6, m4 ; xmm6 = (c4 - 1) * (i0 - i4) = F
  426. paddsw m3, m3 ; xmm3 = i4 + i4
  427. paddsw m1, m1 ; xmm1 = H + H
  428. paddsw m3, m0 ; xmm3 = i0 + i4
  429. paddsw m1, m5 ; xmm1 = B. + H = H.
  430. pmulhw m4, m3 ; xmm4 = ( c4 - 1 ) * ( i0 + i4 )
  431. paddw m6, m0 ; xmm6 = c4 * ( i0 - i4 )
  432. psubsw m6, m2 ; xmm6 = F - A. = F.
  433. paddsw m2, m2 ; xmm2 = A. + A.
  434. movdqa m0, I(1) ; Load C. from I(1)
  435. paddsw m2, m6 ; xmm2 = F + A. = A..
  436. paddw m4, m3 ; xmm4 = c4 * ( i0 + i4 ) = 3
  437. psubsw m2, m1 ; xmm2 = A.. - H. = R2
  438. ADD(m2) ; Adjust R2 and R1 before shifting
  439. paddsw m1, m1 ; xmm1 = H. + H.
  440. paddsw m1, m2 ; xmm1 = A.. + H. = R1
  441. SHIFT(m2) ; xmm2 = op2
  442. psubsw m4, m7 ; xmm4 = E - G = E.
  443. SHIFT(m1) ; xmm1 = op1
  444. movdqa m3, I(2) ; Load D. from I(2)
  445. paddsw m7, m7 ; xmm7 = G + G
  446. paddsw m7, m4 ; xmm7 = E + G = G.
  447. psubsw m4, m3 ; xmm4 = E. - D. = R4
  448. ADD(m4) ; Adjust R4 and R3 before shifting
  449. paddsw m3, m3 ; xmm3 = D. + D.
  450. paddsw m3, m4 ; xmm3 = E. + D. = R3
  451. SHIFT(m4) ; xmm4 = op4
  452. psubsw m6, m5 ; xmm6 = F. - B..= R6
  453. SHIFT(m3) ; xmm3 = op3
  454. ADD(m6) ; Adjust R6 and R5 before shifting
  455. paddsw m5, m5 ; xmm5 = B.. + B..
  456. paddsw m5, m6 ; xmm5 = F. + B.. = R5
  457. SHIFT(m6) ; xmm6 = op6
  458. SHIFT(m5) ; xmm5 = op5
  459. psubsw m7, m0 ; xmm7 = G. - C. = R7
  460. ADD(m7) ; Adjust R7 and R0 before shifting
  461. paddsw m0, m0 ; xmm0 = C. + C.
  462. paddsw m0, m7 ; xmm0 = G. + C.
  463. SHIFT(m7) ; xmm7 = op7
  464. SHIFT(m0) ; xmm0 = op0
  465. %endmacro
  466. %macro PUT_BLOCK 8
  467. movdqa O(0), m%1
  468. movdqa O(1), m%2
  469. movdqa O(2), m%3
  470. movdqa O(3), m%4
  471. movdqa O(4), m%5
  472. movdqa O(5), m%6
  473. movdqa O(6), m%7
  474. movdqa O(7), m%8
  475. %endmacro
  476. %macro VP3_IDCT_sse2 1
  477. %define I(x) [%1+16*x]
  478. %define O(x) [%1+16*x]
  479. %define C(x) [vp3_idct_data+16*(x-1)]
  480. %define SHIFT(x)
  481. %define ADD(x)
  482. VP3_1D_IDCT_SSE2
  483. %if ARCH_X86_64
  484. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
  485. %else
  486. TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%1], [%1+16]
  487. %endif
  488. PUT_BLOCK 0, 1, 2, 3, 4, 5, 6, 7
  489. %define SHIFT(x) psraw x, 4
  490. %define ADD(x) paddsw x, [pw_8]
  491. VP3_1D_IDCT_SSE2
  492. PUT_BLOCK 0, 1, 2, 3, 4, 5, 6, 7
  493. %endmacro
  494. %macro vp3_idct_funcs 3
  495. cglobal vp3_idct_%1, 1, 1, %2
  496. VP3_IDCT_%1 r0
  497. RET
  498. cglobal vp3_idct_put_%1, 3, %3, %2
  499. VP3_IDCT_%1 r2
  500. %if ARCH_X86_64
  501. mov r3, r2
  502. mov r2, r1
  503. mov r1, r0
  504. mov r0, r3
  505. %else
  506. mov r0m, r2
  507. mov r1m, r0
  508. mov r2m, r1
  509. %endif
  510. %if WIN64
  511. call put_signed_pixels_clamped_mmx
  512. RET
  513. %else
  514. jmp put_signed_pixels_clamped_mmx
  515. %endif
  516. cglobal vp3_idct_add_%1, 3, %3, %2
  517. VP3_IDCT_%1 r2
  518. %if ARCH_X86_64
  519. mov r3, r2
  520. mov r2, r1
  521. mov r1, r0
  522. mov r0, r3
  523. %else
  524. mov r0m, r2
  525. mov r1m, r0
  526. mov r2m, r1
  527. %endif
  528. %if WIN64
  529. call add_pixels_clamped_mmx
  530. RET
  531. %else
  532. jmp add_pixels_clamped_mmx
  533. %endif
  534. %endmacro
  535. %if ARCH_X86_64
  536. %define REGS 4
  537. %else
  538. %define REGS 3
  539. %endif
  540. INIT_MMX
  541. vp3_idct_funcs mmx, 0, REGS
  542. INIT_XMM
  543. vp3_idct_funcs sse2, 9, REGS
  544. %undef REGS
  545. %macro DC_ADD 0
  546. movq m2, [r0 ]
  547. movq m3, [r0+r1 ]
  548. paddusb m2, m0
  549. movq m4, [r0+r1*2]
  550. paddusb m3, m0
  551. movq m5, [r0+r3 ]
  552. paddusb m4, m0
  553. paddusb m5, m0
  554. psubusb m2, m1
  555. psubusb m3, m1
  556. movq [r0 ], m2
  557. psubusb m4, m1
  558. movq [r0+r1 ], m3
  559. psubusb m5, m1
  560. movq [r0+r1*2], m4
  561. movq [r0+r3 ], m5
  562. %endmacro
  563. INIT_MMX
  564. cglobal vp3_idct_dc_add_mmx2, 3, 4
  565. %if ARCH_X86_64
  566. movsxd r1, r1d
  567. %endif
  568. lea r3, [r1*3]
  569. movsx r2, word [r2]
  570. add r2, 15
  571. sar r2, 5
  572. movd m0, r2d
  573. pshufw m0, m0, 0x0
  574. pxor m1, m1
  575. psubw m1, m0
  576. packuswb m0, m0
  577. packuswb m1, m1
  578. DC_ADD
  579. lea r0, [r0+r1*4]
  580. DC_ADD
  581. RET