You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1303 lines
51KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2 of the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /***********************************/
  19. /* IDCT */
  20. /* in/out: mma=mma+mmb, mmb=mmb-mma */
  21. #define SUMSUB_BA( a, b ) \
  22. "paddw "#b", "#a" \n\t"\
  23. "paddw "#b", "#b" \n\t"\
  24. "psubw "#a", "#b" \n\t"
  25. #define SUMSUB_BADC( a, b, c, d ) \
  26. "paddw "#b", "#a" \n\t"\
  27. "paddw "#d", "#c" \n\t"\
  28. "paddw "#b", "#b" \n\t"\
  29. "paddw "#d", "#d" \n\t"\
  30. "psubw "#a", "#b" \n\t"\
  31. "psubw "#c", "#d" \n\t"
  32. #define SUMSUBD2_AB( a, b, t ) \
  33. "movq "#b", "#t" \n\t"\
  34. "psraw $1 , "#b" \n\t"\
  35. "paddw "#a", "#b" \n\t"\
  36. "psraw $1 , "#a" \n\t"\
  37. "psubw "#t", "#a" \n\t"
  38. #define IDCT4_1D( s02, s13, d02, d13, t ) \
  39. SUMSUB_BA ( s02, d02 )\
  40. SUMSUBD2_AB( s13, d13, t )\
  41. SUMSUB_BADC( d13, s02, s13, d02 )
  42. #define SBUTTERFLY(a,b,t,n)\
  43. "movq " #a ", " #t " \n\t" /* abcd */\
  44. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  45. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  46. #define TRANSPOSE4(a,b,c,d,t)\
  47. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  48. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  49. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  50. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  51. #define STORE_DIFF_4P( p, t, z ) \
  52. "psraw $6, "#p" \n\t"\
  53. "movd (%0), "#t" \n\t"\
  54. "punpcklbw "#z", "#t" \n\t"\
  55. "paddsw "#t", "#p" \n\t"\
  56. "packuswb "#z", "#p" \n\t"\
  57. "movd "#p", (%0) \n\t"
  58. void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  59. {
  60. /* Load dct coeffs */
  61. asm volatile(
  62. "movq (%0), %%mm0 \n\t"
  63. "movq 8(%0), %%mm1 \n\t"
  64. "movq 16(%0), %%mm2 \n\t"
  65. "movq 24(%0), %%mm3 \n\t"
  66. :: "r"(block) );
  67. asm volatile(
  68. /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
  69. IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
  70. "movq %0, %%mm6 \n\t"
  71. /* in: 1,4,0,2 out: 1,2,3,0 */
  72. TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
  73. "paddw %%mm6, %%mm3 \n\t"
  74. /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
  75. IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
  76. "pxor %%mm7, %%mm7 \n\t"
  77. :: "m"(ff_pw_32));
  78. asm volatile(
  79. STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
  80. "add %1, %0 \n\t"
  81. STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
  82. "add %1, %0 \n\t"
  83. STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
  84. "add %1, %0 \n\t"
  85. STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
  86. : "+r"(dst)
  87. : "r" ((long)stride)
  88. );
  89. }
  90. void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  91. {
  92. int dc = (block[0] + 32) >> 6;
  93. asm volatile(
  94. "movd %0, %%mm0 \n\t"
  95. "pshufw $0, %%mm0, %%mm0 \n\t"
  96. "pxor %%mm1, %%mm1 \n\t"
  97. "psubw %%mm0, %%mm1 \n\t"
  98. "packuswb %%mm0, %%mm0 \n\t"
  99. "packuswb %%mm1, %%mm1 \n\t"
  100. ::"r"(dc)
  101. );
  102. asm volatile(
  103. "movd %0, %%mm2 \n\t"
  104. "movd %1, %%mm3 \n\t"
  105. "movd %2, %%mm4 \n\t"
  106. "movd %3, %%mm5 \n\t"
  107. "paddusb %%mm0, %%mm2 \n\t"
  108. "paddusb %%mm0, %%mm3 \n\t"
  109. "paddusb %%mm0, %%mm4 \n\t"
  110. "paddusb %%mm0, %%mm5 \n\t"
  111. "psubusb %%mm1, %%mm2 \n\t"
  112. "psubusb %%mm1, %%mm3 \n\t"
  113. "psubusb %%mm1, %%mm4 \n\t"
  114. "psubusb %%mm1, %%mm5 \n\t"
  115. "movd %%mm2, %0 \n\t"
  116. "movd %%mm3, %1 \n\t"
  117. "movd %%mm4, %2 \n\t"
  118. "movd %%mm5, %3 \n\t"
  119. :"+m"(*(uint32_t*)(dst+0*stride)),
  120. "+m"(*(uint32_t*)(dst+1*stride)),
  121. "+m"(*(uint32_t*)(dst+2*stride)),
  122. "+m"(*(uint32_t*)(dst+3*stride))
  123. );
  124. }
  125. void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  126. {
  127. int dc = (block[0] + 32) >> 6;
  128. int y;
  129. asm volatile(
  130. "movd %0, %%mm0 \n\t"
  131. "pshufw $0, %%mm0, %%mm0 \n\t"
  132. "pxor %%mm1, %%mm1 \n\t"
  133. "psubw %%mm0, %%mm1 \n\t"
  134. "packuswb %%mm0, %%mm0 \n\t"
  135. "packuswb %%mm1, %%mm1 \n\t"
  136. ::"r"(dc)
  137. );
  138. for(y=2; y--; dst += 4*stride){
  139. asm volatile(
  140. "movq %0, %%mm2 \n\t"
  141. "movq %1, %%mm3 \n\t"
  142. "movq %2, %%mm4 \n\t"
  143. "movq %3, %%mm5 \n\t"
  144. "paddusb %%mm0, %%mm2 \n\t"
  145. "paddusb %%mm0, %%mm3 \n\t"
  146. "paddusb %%mm0, %%mm4 \n\t"
  147. "paddusb %%mm0, %%mm5 \n\t"
  148. "psubusb %%mm1, %%mm2 \n\t"
  149. "psubusb %%mm1, %%mm3 \n\t"
  150. "psubusb %%mm1, %%mm4 \n\t"
  151. "psubusb %%mm1, %%mm5 \n\t"
  152. "movq %%mm2, %0 \n\t"
  153. "movq %%mm3, %1 \n\t"
  154. "movq %%mm4, %2 \n\t"
  155. "movq %%mm5, %3 \n\t"
  156. :"+m"(*(uint64_t*)(dst+0*stride)),
  157. "+m"(*(uint64_t*)(dst+1*stride)),
  158. "+m"(*(uint64_t*)(dst+2*stride)),
  159. "+m"(*(uint64_t*)(dst+3*stride))
  160. );
  161. }
  162. }
  163. /***********************************/
  164. /* deblocking */
  165. // out: o = |x-y|>a
  166. // clobbers: t
  167. #define DIFF_GT_MMX(x,y,a,o,t)\
  168. "movq "#y", "#t" \n\t"\
  169. "movq "#x", "#o" \n\t"\
  170. "psubusb "#x", "#t" \n\t"\
  171. "psubusb "#y", "#o" \n\t"\
  172. "por "#t", "#o" \n\t"\
  173. "psubusb "#a", "#o" \n\t"
  174. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  175. // out: mm5=beta-1, mm7=mask
  176. // clobbers: mm4,mm6
  177. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  178. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  179. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  180. "packuswb %%mm4, %%mm4 \n\t"\
  181. "packuswb %%mm5, %%mm5 \n\t"\
  182. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  183. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  184. "por %%mm4, %%mm7 \n\t"\
  185. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  186. "por %%mm4, %%mm7 \n\t"\
  187. "pxor %%mm6, %%mm6 \n\t"\
  188. "pcmpeqb %%mm6, %%mm7 \n\t"
  189. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  190. // out: mm1=p0' mm2=q0'
  191. // clobbers: mm0,3-6
  192. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  193. /* a = q0^p0^((p1-q1)>>2) */\
  194. "movq %%mm0, %%mm4 \n\t"\
  195. "psubb %%mm3, %%mm4 \n\t"\
  196. "psrlw $2, %%mm4 \n\t"\
  197. "pxor %%mm1, %%mm4 \n\t"\
  198. "pxor %%mm2, %%mm4 \n\t"\
  199. /* b = p0^(q1>>2) */\
  200. "psrlw $2, %%mm3 \n\t"\
  201. "pand "#pb_3f", %%mm3 \n\t"\
  202. "movq %%mm1, %%mm5 \n\t"\
  203. "pxor %%mm3, %%mm5 \n\t"\
  204. /* c = q0^(p1>>2) */\
  205. "psrlw $2, %%mm0 \n\t"\
  206. "pand "#pb_3f", %%mm0 \n\t"\
  207. "movq %%mm2, %%mm6 \n\t"\
  208. "pxor %%mm0, %%mm6 \n\t"\
  209. /* d = (c^b) & ~(b^a) & 1 */\
  210. "pxor %%mm5, %%mm6 \n\t"\
  211. "pxor %%mm4, %%mm5 \n\t"\
  212. "pandn %%mm6, %%mm5 \n\t"\
  213. "pand "#pb_01", %%mm5 \n\t"\
  214. /* delta = (avg(q0, p1>>2) + (d&a))
  215. * - (avg(p0, q1>>2) + (d&~a)) */\
  216. "pavgb %%mm2, %%mm0 \n\t"\
  217. "pand %%mm5, %%mm4 \n\t"\
  218. "paddusb %%mm4, %%mm0 \n\t"\
  219. "pavgb %%mm1, %%mm3 \n\t"\
  220. "pxor %%mm5, %%mm4 \n\t"\
  221. "paddusb %%mm4, %%mm3 \n\t"\
  222. /* p0 += clip(delta, -tc0, tc0)
  223. * q0 -= clip(delta, -tc0, tc0) */\
  224. "movq %%mm0, %%mm4 \n\t"\
  225. "psubusb %%mm3, %%mm0 \n\t"\
  226. "psubusb %%mm4, %%mm3 \n\t"\
  227. "pminub %%mm7, %%mm0 \n\t"\
  228. "pminub %%mm7, %%mm3 \n\t"\
  229. "paddusb %%mm0, %%mm1 \n\t"\
  230. "paddusb %%mm3, %%mm2 \n\t"\
  231. "psubusb %%mm3, %%mm1 \n\t"\
  232. "psubusb %%mm0, %%mm2 \n\t"
  233. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
  234. // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  235. // clobbers: q2, tmp, tc0
  236. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  237. "movq %%mm1, "#tmp" \n\t"\
  238. "pavgb %%mm2, "#tmp" \n\t"\
  239. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  240. "pxor "q2addr", "#tmp" \n\t"\
  241. "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  242. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  243. "movq "#p1", "#tmp" \n\t"\
  244. "psubusb "#tc0", "#tmp" \n\t"\
  245. "paddusb "#p1", "#tc0" \n\t"\
  246. "pmaxub "#tmp", "#q2" \n\t"\
  247. "pminub "#tc0", "#q2" \n\t"\
  248. "movq "#q2", "q1addr" \n\t"
  249. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  250. {
  251. uint64_t tmp0;
  252. uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
  253. // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
  254. uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
  255. asm volatile(
  256. "movq (%1,%3), %%mm0 \n\t" //p1
  257. "movq (%1,%3,2), %%mm1 \n\t" //p0
  258. "movq (%2), %%mm2 \n\t" //q0
  259. "movq (%2,%3), %%mm3 \n\t" //q1
  260. H264_DEBLOCK_MASK(%6, %7)
  261. "pand %5, %%mm7 \n\t"
  262. "movq %%mm7, %0 \n\t"
  263. /* filter p1 */
  264. "movq (%1), %%mm3 \n\t" //p2
  265. DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  266. "pandn %%mm7, %%mm6 \n\t"
  267. "pcmpeqb %%mm7, %%mm6 \n\t"
  268. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  269. "pshufw $80, %4, %%mm4 \n\t"
  270. "pand %%mm7, %%mm4 \n\t" // mask & tc0
  271. "movq %8, %%mm7 \n\t"
  272. "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
  273. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  274. "paddb %%mm4, %%mm7 \n\t" // tc++
  275. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
  276. /* filter q1 */
  277. "movq (%2,%3,2), %%mm4 \n\t" //q2
  278. DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  279. "pandn %0, %%mm6 \n\t"
  280. "pcmpeqb %0, %%mm6 \n\t"
  281. "pand %0, %%mm6 \n\t"
  282. "pshufw $80, %4, %%mm5 \n\t"
  283. "pand %%mm6, %%mm5 \n\t"
  284. "pand %8, %%mm6 \n\t"
  285. "paddb %%mm6, %%mm7 \n\t"
  286. "movq (%2,%3), %%mm3 \n\t"
  287. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
  288. /* filter p0, q0 */
  289. H264_DEBLOCK_P0_Q0(%8, %9)
  290. "movq %%mm1, (%1,%3,2) \n\t"
  291. "movq %%mm2, (%2) \n\t"
  292. : "=m"(tmp0)
  293. : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
  294. "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
  295. "m"(mm_bone), "m"(ff_pb_3F)
  296. );
  297. }
  298. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  299. {
  300. if((tc0[0] & tc0[1]) >= 0)
  301. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  302. if((tc0[2] & tc0[3]) >= 0)
  303. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  304. }
  305. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  306. {
  307. //FIXME: could cut some load/stores by merging transpose with filter
  308. // also, it only needs to transpose 6x8
  309. uint8_t trans[8*8];
  310. int i;
  311. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  312. if((tc0[0] & tc0[1]) < 0)
  313. continue;
  314. transpose4x4(trans, pix-4, 8, stride);
  315. transpose4x4(trans +4*8, pix, 8, stride);
  316. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  317. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  318. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  319. transpose4x4(pix-2, trans +2*8, stride, 8);
  320. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  321. }
  322. }
  323. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  324. {
  325. asm volatile(
  326. "movq (%0), %%mm0 \n\t" //p1
  327. "movq (%0,%2), %%mm1 \n\t" //p0
  328. "movq (%1), %%mm2 \n\t" //q0
  329. "movq (%1,%2), %%mm3 \n\t" //q1
  330. H264_DEBLOCK_MASK(%4, %5)
  331. "movd %3, %%mm6 \n\t"
  332. "punpcklbw %%mm6, %%mm6 \n\t"
  333. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  334. H264_DEBLOCK_P0_Q0(%6, %7)
  335. "movq %%mm1, (%0,%2) \n\t"
  336. "movq %%mm2, (%1) \n\t"
  337. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  338. "r"(*(uint32_t*)tc0),
  339. "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
  340. );
  341. }
  342. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  343. {
  344. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  345. }
  346. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  347. {
  348. //FIXME: could cut some load/stores by merging transpose with filter
  349. uint8_t trans[8*4];
  350. transpose4x4(trans, pix-2, 8, stride);
  351. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  352. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  353. transpose4x4(pix-2, trans, stride, 8);
  354. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  355. }
  356. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  357. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  358. "movq "#p0", %%mm4 \n\t"\
  359. "pxor "#q1", %%mm4 \n\t"\
  360. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  361. "pavgb "#q1", "#p0" \n\t"\
  362. "psubusb %%mm4, "#p0" \n\t"\
  363. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  364. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  365. {
  366. asm volatile(
  367. "movq (%0), %%mm0 \n\t"
  368. "movq (%0,%2), %%mm1 \n\t"
  369. "movq (%1), %%mm2 \n\t"
  370. "movq (%1,%2), %%mm3 \n\t"
  371. H264_DEBLOCK_MASK(%3, %4)
  372. "movq %%mm1, %%mm5 \n\t"
  373. "movq %%mm2, %%mm6 \n\t"
  374. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  375. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  376. "psubb %%mm5, %%mm1 \n\t"
  377. "psubb %%mm6, %%mm2 \n\t"
  378. "pand %%mm7, %%mm1 \n\t"
  379. "pand %%mm7, %%mm2 \n\t"
  380. "paddb %%mm5, %%mm1 \n\t"
  381. "paddb %%mm6, %%mm2 \n\t"
  382. "movq %%mm1, (%0,%2) \n\t"
  383. "movq %%mm2, (%1) \n\t"
  384. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  385. "m"(alpha1), "m"(beta1), "m"(mm_bone)
  386. );
  387. }
  388. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  389. {
  390. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  391. }
  392. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  393. {
  394. //FIXME: could cut some load/stores by merging transpose with filter
  395. uint8_t trans[8*4];
  396. transpose4x4(trans, pix-2, 8, stride);
  397. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  398. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  399. transpose4x4(pix-2, trans, stride, 8);
  400. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  401. }
  402. /***********************************/
  403. /* motion compensation */
  404. #define QPEL_H264V(A,B,C,D,E,F,OP)\
  405. "movd (%0), "#F" \n\t"\
  406. "movq "#C", %%mm6 \n\t"\
  407. "paddw "#D", %%mm6 \n\t"\
  408. "psllw $2, %%mm6 \n\t"\
  409. "psubw "#B", %%mm6 \n\t"\
  410. "psubw "#E", %%mm6 \n\t"\
  411. "pmullw %4, %%mm6 \n\t"\
  412. "add %2, %0 \n\t"\
  413. "punpcklbw %%mm7, "#F" \n\t"\
  414. "paddw %5, "#A" \n\t"\
  415. "paddw "#F", "#A" \n\t"\
  416. "paddw "#A", %%mm6 \n\t"\
  417. "psraw $5, %%mm6 \n\t"\
  418. "packuswb %%mm6, %%mm6 \n\t"\
  419. OP(%%mm6, (%1), A, d)\
  420. "add %3, %1 \n\t"
  421. #define QPEL_H264HV(A,B,C,D,E,F,OF)\
  422. "movd (%0), "#F" \n\t"\
  423. "movq "#C", %%mm6 \n\t"\
  424. "paddw "#D", %%mm6 \n\t"\
  425. "psllw $2, %%mm6 \n\t"\
  426. "psubw "#B", %%mm6 \n\t"\
  427. "psubw "#E", %%mm6 \n\t"\
  428. "pmullw %3, %%mm6 \n\t"\
  429. "add %2, %0 \n\t"\
  430. "punpcklbw %%mm7, "#F" \n\t"\
  431. "paddw "#F", "#A" \n\t"\
  432. "paddw "#A", %%mm6 \n\t"\
  433. "movq %%mm6, "#OF"(%1) \n\t"
  434. #define QPEL_H264(OPNAME, OP, MMX)\
  435. static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  436. int h=4;\
  437. \
  438. asm volatile(\
  439. "pxor %%mm7, %%mm7 \n\t"\
  440. "movq %5, %%mm4 \n\t"\
  441. "movq %6, %%mm5 \n\t"\
  442. "1: \n\t"\
  443. "movd -1(%0), %%mm1 \n\t"\
  444. "movd (%0), %%mm2 \n\t"\
  445. "movd 1(%0), %%mm3 \n\t"\
  446. "movd 2(%0), %%mm0 \n\t"\
  447. "punpcklbw %%mm7, %%mm1 \n\t"\
  448. "punpcklbw %%mm7, %%mm2 \n\t"\
  449. "punpcklbw %%mm7, %%mm3 \n\t"\
  450. "punpcklbw %%mm7, %%mm0 \n\t"\
  451. "paddw %%mm0, %%mm1 \n\t"\
  452. "paddw %%mm3, %%mm2 \n\t"\
  453. "movd -2(%0), %%mm0 \n\t"\
  454. "movd 3(%0), %%mm3 \n\t"\
  455. "punpcklbw %%mm7, %%mm0 \n\t"\
  456. "punpcklbw %%mm7, %%mm3 \n\t"\
  457. "paddw %%mm3, %%mm0 \n\t"\
  458. "psllw $2, %%mm2 \n\t"\
  459. "psubw %%mm1, %%mm2 \n\t"\
  460. "pmullw %%mm4, %%mm2 \n\t"\
  461. "paddw %%mm5, %%mm0 \n\t"\
  462. "paddw %%mm2, %%mm0 \n\t"\
  463. "psraw $5, %%mm0 \n\t"\
  464. "packuswb %%mm0, %%mm0 \n\t"\
  465. OP(%%mm0, (%1),%%mm6, d)\
  466. "add %3, %0 \n\t"\
  467. "add %4, %1 \n\t"\
  468. "decl %2 \n\t"\
  469. " jnz 1b \n\t"\
  470. : "+a"(src), "+c"(dst), "+m"(h)\
  471. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  472. : "memory"\
  473. );\
  474. }\
  475. static void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  476. int h=4;\
  477. asm volatile(\
  478. "pxor %%mm7, %%mm7 \n\t"\
  479. "movq %0, %%mm4 \n\t"\
  480. "movq %1, %%mm5 \n\t"\
  481. :: "m"(ff_pw_5), "m"(ff_pw_16)\
  482. );\
  483. do{\
  484. asm volatile(\
  485. "movd -1(%0), %%mm1 \n\t"\
  486. "movd (%0), %%mm2 \n\t"\
  487. "movd 1(%0), %%mm3 \n\t"\
  488. "movd 2(%0), %%mm0 \n\t"\
  489. "punpcklbw %%mm7, %%mm1 \n\t"\
  490. "punpcklbw %%mm7, %%mm2 \n\t"\
  491. "punpcklbw %%mm7, %%mm3 \n\t"\
  492. "punpcklbw %%mm7, %%mm0 \n\t"\
  493. "paddw %%mm0, %%mm1 \n\t"\
  494. "paddw %%mm3, %%mm2 \n\t"\
  495. "movd -2(%0), %%mm0 \n\t"\
  496. "movd 3(%0), %%mm3 \n\t"\
  497. "punpcklbw %%mm7, %%mm0 \n\t"\
  498. "punpcklbw %%mm7, %%mm3 \n\t"\
  499. "paddw %%mm3, %%mm0 \n\t"\
  500. "psllw $2, %%mm2 \n\t"\
  501. "psubw %%mm1, %%mm2 \n\t"\
  502. "pmullw %%mm4, %%mm2 \n\t"\
  503. "paddw %%mm5, %%mm0 \n\t"\
  504. "paddw %%mm2, %%mm0 \n\t"\
  505. "movd (%2), %%mm3 \n\t"\
  506. "psraw $5, %%mm0 \n\t"\
  507. "packuswb %%mm0, %%mm0 \n\t"\
  508. PAVGB" %%mm3, %%mm0 \n\t"\
  509. OP(%%mm0, (%1),%%mm6, d)\
  510. "add %4, %0 \n\t"\
  511. "add %4, %1 \n\t"\
  512. "add %3, %2 \n\t"\
  513. : "+a"(src), "+c"(dst), "+d"(src2)\
  514. : "D"((long)src2Stride), "S"((long)dstStride)\
  515. : "memory"\
  516. );\
  517. }while(--h);\
  518. }\
  519. static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  520. src -= 2*srcStride;\
  521. asm volatile(\
  522. "pxor %%mm7, %%mm7 \n\t"\
  523. "movd (%0), %%mm0 \n\t"\
  524. "add %2, %0 \n\t"\
  525. "movd (%0), %%mm1 \n\t"\
  526. "add %2, %0 \n\t"\
  527. "movd (%0), %%mm2 \n\t"\
  528. "add %2, %0 \n\t"\
  529. "movd (%0), %%mm3 \n\t"\
  530. "add %2, %0 \n\t"\
  531. "movd (%0), %%mm4 \n\t"\
  532. "add %2, %0 \n\t"\
  533. "punpcklbw %%mm7, %%mm0 \n\t"\
  534. "punpcklbw %%mm7, %%mm1 \n\t"\
  535. "punpcklbw %%mm7, %%mm2 \n\t"\
  536. "punpcklbw %%mm7, %%mm3 \n\t"\
  537. "punpcklbw %%mm7, %%mm4 \n\t"\
  538. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  539. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  540. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  541. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  542. \
  543. : "+a"(src), "+c"(dst)\
  544. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  545. : "memory"\
  546. );\
  547. }\
  548. static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  549. int h=4;\
  550. int w=3;\
  551. src -= 2*srcStride+2;\
  552. while(w--){\
  553. asm volatile(\
  554. "pxor %%mm7, %%mm7 \n\t"\
  555. "movd (%0), %%mm0 \n\t"\
  556. "add %2, %0 \n\t"\
  557. "movd (%0), %%mm1 \n\t"\
  558. "add %2, %0 \n\t"\
  559. "movd (%0), %%mm2 \n\t"\
  560. "add %2, %0 \n\t"\
  561. "movd (%0), %%mm3 \n\t"\
  562. "add %2, %0 \n\t"\
  563. "movd (%0), %%mm4 \n\t"\
  564. "add %2, %0 \n\t"\
  565. "punpcklbw %%mm7, %%mm0 \n\t"\
  566. "punpcklbw %%mm7, %%mm1 \n\t"\
  567. "punpcklbw %%mm7, %%mm2 \n\t"\
  568. "punpcklbw %%mm7, %%mm3 \n\t"\
  569. "punpcklbw %%mm7, %%mm4 \n\t"\
  570. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  571. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  572. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  573. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  574. \
  575. : "+a"(src)\
  576. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  577. : "memory"\
  578. );\
  579. tmp += 4;\
  580. src += 4 - 9*srcStride;\
  581. }\
  582. tmp -= 3*4;\
  583. asm volatile(\
  584. "movq %4, %%mm6 \n\t"\
  585. "1: \n\t"\
  586. "movq (%0), %%mm0 \n\t"\
  587. "paddw 10(%0), %%mm0 \n\t"\
  588. "movq 2(%0), %%mm1 \n\t"\
  589. "paddw 8(%0), %%mm1 \n\t"\
  590. "movq 4(%0), %%mm2 \n\t"\
  591. "paddw 6(%0), %%mm2 \n\t"\
  592. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  593. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  594. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  595. "paddsw %%mm2, %%mm0 \n\t"\
  596. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
  597. "paddw %%mm6, %%mm2 \n\t"\
  598. "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\
  599. "psraw $6, %%mm0 \n\t"\
  600. "packuswb %%mm0, %%mm0 \n\t"\
  601. OP(%%mm0, (%1),%%mm7, d)\
  602. "add $24, %0 \n\t"\
  603. "add %3, %1 \n\t"\
  604. "decl %2 \n\t"\
  605. " jnz 1b \n\t"\
  606. : "+a"(tmp), "+c"(dst), "+m"(h)\
  607. : "S"((long)dstStride), "m"(ff_pw_32)\
  608. : "memory"\
  609. );\
  610. }\
  611. \
  612. static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  613. int h=8;\
  614. asm volatile(\
  615. "pxor %%mm7, %%mm7 \n\t"\
  616. "movq %5, %%mm6 \n\t"\
  617. "1: \n\t"\
  618. "movq (%0), %%mm0 \n\t"\
  619. "movq 1(%0), %%mm2 \n\t"\
  620. "movq %%mm0, %%mm1 \n\t"\
  621. "movq %%mm2, %%mm3 \n\t"\
  622. "punpcklbw %%mm7, %%mm0 \n\t"\
  623. "punpckhbw %%mm7, %%mm1 \n\t"\
  624. "punpcklbw %%mm7, %%mm2 \n\t"\
  625. "punpckhbw %%mm7, %%mm3 \n\t"\
  626. "paddw %%mm2, %%mm0 \n\t"\
  627. "paddw %%mm3, %%mm1 \n\t"\
  628. "psllw $2, %%mm0 \n\t"\
  629. "psllw $2, %%mm1 \n\t"\
  630. "movq -1(%0), %%mm2 \n\t"\
  631. "movq 2(%0), %%mm4 \n\t"\
  632. "movq %%mm2, %%mm3 \n\t"\
  633. "movq %%mm4, %%mm5 \n\t"\
  634. "punpcklbw %%mm7, %%mm2 \n\t"\
  635. "punpckhbw %%mm7, %%mm3 \n\t"\
  636. "punpcklbw %%mm7, %%mm4 \n\t"\
  637. "punpckhbw %%mm7, %%mm5 \n\t"\
  638. "paddw %%mm4, %%mm2 \n\t"\
  639. "paddw %%mm3, %%mm5 \n\t"\
  640. "psubw %%mm2, %%mm0 \n\t"\
  641. "psubw %%mm5, %%mm1 \n\t"\
  642. "pmullw %%mm6, %%mm0 \n\t"\
  643. "pmullw %%mm6, %%mm1 \n\t"\
  644. "movd -2(%0), %%mm2 \n\t"\
  645. "movd 7(%0), %%mm5 \n\t"\
  646. "punpcklbw %%mm7, %%mm2 \n\t"\
  647. "punpcklbw %%mm7, %%mm5 \n\t"\
  648. "paddw %%mm3, %%mm2 \n\t"\
  649. "paddw %%mm5, %%mm4 \n\t"\
  650. "movq %6, %%mm5 \n\t"\
  651. "paddw %%mm5, %%mm2 \n\t"\
  652. "paddw %%mm5, %%mm4 \n\t"\
  653. "paddw %%mm2, %%mm0 \n\t"\
  654. "paddw %%mm4, %%mm1 \n\t"\
  655. "psraw $5, %%mm0 \n\t"\
  656. "psraw $5, %%mm1 \n\t"\
  657. "packuswb %%mm1, %%mm0 \n\t"\
  658. OP(%%mm0, (%1),%%mm5, q)\
  659. "add %3, %0 \n\t"\
  660. "add %4, %1 \n\t"\
  661. "decl %2 \n\t"\
  662. " jnz 1b \n\t"\
  663. : "+a"(src), "+c"(dst), "+m"(h)\
  664. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  665. : "memory"\
  666. );\
  667. }\
  668. \
  669. static void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  670. int h=8;\
  671. asm volatile(\
  672. "pxor %%mm7, %%mm7 \n\t"\
  673. "movq %0, %%mm6 \n\t"\
  674. :: "m"(ff_pw_5)\
  675. );\
  676. do{\
  677. asm volatile(\
  678. "movq (%0), %%mm0 \n\t"\
  679. "movq 1(%0), %%mm2 \n\t"\
  680. "movq %%mm0, %%mm1 \n\t"\
  681. "movq %%mm2, %%mm3 \n\t"\
  682. "punpcklbw %%mm7, %%mm0 \n\t"\
  683. "punpckhbw %%mm7, %%mm1 \n\t"\
  684. "punpcklbw %%mm7, %%mm2 \n\t"\
  685. "punpckhbw %%mm7, %%mm3 \n\t"\
  686. "paddw %%mm2, %%mm0 \n\t"\
  687. "paddw %%mm3, %%mm1 \n\t"\
  688. "psllw $2, %%mm0 \n\t"\
  689. "psllw $2, %%mm1 \n\t"\
  690. "movq -1(%0), %%mm2 \n\t"\
  691. "movq 2(%0), %%mm4 \n\t"\
  692. "movq %%mm2, %%mm3 \n\t"\
  693. "movq %%mm4, %%mm5 \n\t"\
  694. "punpcklbw %%mm7, %%mm2 \n\t"\
  695. "punpckhbw %%mm7, %%mm3 \n\t"\
  696. "punpcklbw %%mm7, %%mm4 \n\t"\
  697. "punpckhbw %%mm7, %%mm5 \n\t"\
  698. "paddw %%mm4, %%mm2 \n\t"\
  699. "paddw %%mm3, %%mm5 \n\t"\
  700. "psubw %%mm2, %%mm0 \n\t"\
  701. "psubw %%mm5, %%mm1 \n\t"\
  702. "pmullw %%mm6, %%mm0 \n\t"\
  703. "pmullw %%mm6, %%mm1 \n\t"\
  704. "movd -2(%0), %%mm2 \n\t"\
  705. "movd 7(%0), %%mm5 \n\t"\
  706. "punpcklbw %%mm7, %%mm2 \n\t"\
  707. "punpcklbw %%mm7, %%mm5 \n\t"\
  708. "paddw %%mm3, %%mm2 \n\t"\
  709. "paddw %%mm5, %%mm4 \n\t"\
  710. "movq %5, %%mm5 \n\t"\
  711. "paddw %%mm5, %%mm2 \n\t"\
  712. "paddw %%mm5, %%mm4 \n\t"\
  713. "paddw %%mm2, %%mm0 \n\t"\
  714. "paddw %%mm4, %%mm1 \n\t"\
  715. "psraw $5, %%mm0 \n\t"\
  716. "psraw $5, %%mm1 \n\t"\
  717. "movq (%2), %%mm4 \n\t"\
  718. "packuswb %%mm1, %%mm0 \n\t"\
  719. PAVGB" %%mm4, %%mm0 \n\t"\
  720. OP(%%mm0, (%1),%%mm5, q)\
  721. "add %4, %0 \n\t"\
  722. "add %4, %1 \n\t"\
  723. "add %3, %2 \n\t"\
  724. : "+a"(src), "+c"(dst), "+d"(src2)\
  725. : "D"((long)src2Stride), "S"((long)dstStride),\
  726. "m"(ff_pw_16)\
  727. : "memory"\
  728. );\
  729. }while(--h);\
  730. }\
  731. \
  732. static inline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  733. int w= 2;\
  734. src -= 2*srcStride;\
  735. \
  736. while(w--){\
  737. asm volatile(\
  738. "pxor %%mm7, %%mm7 \n\t"\
  739. "movd (%0), %%mm0 \n\t"\
  740. "add %2, %0 \n\t"\
  741. "movd (%0), %%mm1 \n\t"\
  742. "add %2, %0 \n\t"\
  743. "movd (%0), %%mm2 \n\t"\
  744. "add %2, %0 \n\t"\
  745. "movd (%0), %%mm3 \n\t"\
  746. "add %2, %0 \n\t"\
  747. "movd (%0), %%mm4 \n\t"\
  748. "add %2, %0 \n\t"\
  749. "punpcklbw %%mm7, %%mm0 \n\t"\
  750. "punpcklbw %%mm7, %%mm1 \n\t"\
  751. "punpcklbw %%mm7, %%mm2 \n\t"\
  752. "punpcklbw %%mm7, %%mm3 \n\t"\
  753. "punpcklbw %%mm7, %%mm4 \n\t"\
  754. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  755. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  756. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  757. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  758. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  759. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  760. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  761. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  762. \
  763. : "+a"(src), "+c"(dst)\
  764. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  765. : "memory"\
  766. );\
  767. if(h==16){\
  768. asm volatile(\
  769. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  770. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  771. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  772. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  773. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  774. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  775. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  776. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  777. \
  778. : "+a"(src), "+c"(dst)\
  779. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  780. : "memory"\
  781. );\
  782. }\
  783. src += 4-(h+5)*srcStride;\
  784. dst += 4-h*dstStride;\
  785. }\
  786. }\
  787. static inline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  788. int h = size;\
  789. int w = (size+8)>>2;\
  790. src -= 2*srcStride+2;\
  791. while(w--){\
  792. asm volatile(\
  793. "pxor %%mm7, %%mm7 \n\t"\
  794. "movd (%0), %%mm0 \n\t"\
  795. "add %2, %0 \n\t"\
  796. "movd (%0), %%mm1 \n\t"\
  797. "add %2, %0 \n\t"\
  798. "movd (%0), %%mm2 \n\t"\
  799. "add %2, %0 \n\t"\
  800. "movd (%0), %%mm3 \n\t"\
  801. "add %2, %0 \n\t"\
  802. "movd (%0), %%mm4 \n\t"\
  803. "add %2, %0 \n\t"\
  804. "punpcklbw %%mm7, %%mm0 \n\t"\
  805. "punpcklbw %%mm7, %%mm1 \n\t"\
  806. "punpcklbw %%mm7, %%mm2 \n\t"\
  807. "punpcklbw %%mm7, %%mm3 \n\t"\
  808. "punpcklbw %%mm7, %%mm4 \n\t"\
  809. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
  810. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
  811. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
  812. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
  813. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
  814. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
  815. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
  816. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
  817. : "+a"(src)\
  818. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  819. : "memory"\
  820. );\
  821. if(size==16){\
  822. asm volatile(\
  823. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
  824. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
  825. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
  826. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
  827. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
  828. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
  829. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
  830. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
  831. : "+a"(src)\
  832. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  833. : "memory"\
  834. );\
  835. }\
  836. tmp += 4;\
  837. src += 4 - (size+5)*srcStride;\
  838. }\
  839. tmp -= size+8;\
  840. w = size>>4;\
  841. do{\
  842. h = size;\
  843. asm volatile(\
  844. "movq %4, %%mm6 \n\t"\
  845. "1: \n\t"\
  846. "movq (%0), %%mm0 \n\t"\
  847. "movq 8(%0), %%mm3 \n\t"\
  848. "movq 2(%0), %%mm1 \n\t"\
  849. "movq 10(%0), %%mm4 \n\t"\
  850. "paddw %%mm4, %%mm0 \n\t"\
  851. "paddw %%mm3, %%mm1 \n\t"\
  852. "paddw 18(%0), %%mm3 \n\t"\
  853. "paddw 16(%0), %%mm4 \n\t"\
  854. "movq 4(%0), %%mm2 \n\t"\
  855. "movq 12(%0), %%mm5 \n\t"\
  856. "paddw 6(%0), %%mm2 \n\t"\
  857. "paddw 14(%0), %%mm5 \n\t"\
  858. "psubw %%mm1, %%mm0 \n\t"\
  859. "psubw %%mm4, %%mm3 \n\t"\
  860. "psraw $2, %%mm0 \n\t"\
  861. "psraw $2, %%mm3 \n\t"\
  862. "psubw %%mm1, %%mm0 \n\t"\
  863. "psubw %%mm4, %%mm3 \n\t"\
  864. "paddsw %%mm2, %%mm0 \n\t"\
  865. "paddsw %%mm5, %%mm3 \n\t"\
  866. "psraw $2, %%mm0 \n\t"\
  867. "psraw $2, %%mm3 \n\t"\
  868. "paddw %%mm6, %%mm2 \n\t"\
  869. "paddw %%mm6, %%mm5 \n\t"\
  870. "paddw %%mm2, %%mm0 \n\t"\
  871. "paddw %%mm5, %%mm3 \n\t"\
  872. "psraw $6, %%mm0 \n\t"\
  873. "psraw $6, %%mm3 \n\t"\
  874. "packuswb %%mm3, %%mm0 \n\t"\
  875. OP(%%mm0, (%1),%%mm7, q)\
  876. "add $48, %0 \n\t"\
  877. "add %3, %1 \n\t"\
  878. "decl %2 \n\t"\
  879. " jnz 1b \n\t"\
  880. : "+a"(tmp), "+c"(dst), "+m"(h)\
  881. : "S"((long)dstStride), "m"(ff_pw_32)\
  882. : "memory"\
  883. );\
  884. tmp += 8 - size*24;\
  885. dst += 8 - size*dstStride;\
  886. }while(w--);\
  887. }\
  888. \
  889. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  890. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  891. }\
  892. static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  893. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  894. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  895. }\
  896. \
  897. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  898. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  899. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  900. src += 8*srcStride;\
  901. dst += 8*dstStride;\
  902. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  903. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  904. }\
  905. \
  906. static void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  907. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  908. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  909. src += 8*dstStride;\
  910. dst += 8*dstStride;\
  911. src2 += 8*src2Stride;\
  912. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  913. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  914. }\
  915. \
  916. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  917. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
  918. }\
  919. \
  920. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  921. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
  922. }\
  923. \
  924. static void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  925. {\
  926. asm volatile(\
  927. "movq %5, %%mm6 \n\t"\
  928. "movq (%1), %%mm0 \n\t"\
  929. "movq 24(%1), %%mm1 \n\t"\
  930. "paddw %%mm6, %%mm0 \n\t"\
  931. "paddw %%mm6, %%mm1 \n\t"\
  932. "psraw $5, %%mm0 \n\t"\
  933. "psraw $5, %%mm1 \n\t"\
  934. "packuswb %%mm0, %%mm0 \n\t"\
  935. "packuswb %%mm1, %%mm1 \n\t"\
  936. PAVGB" (%0), %%mm0 \n\t"\
  937. PAVGB" (%0,%3), %%mm1 \n\t"\
  938. OP(%%mm0, (%2), %%mm4, d)\
  939. OP(%%mm1, (%2,%4), %%mm5, d)\
  940. "lea (%0,%3,2), %0 \n\t"\
  941. "lea (%2,%4,2), %2 \n\t"\
  942. "movq 48(%1), %%mm0 \n\t"\
  943. "movq 72(%1), %%mm1 \n\t"\
  944. "paddw %%mm6, %%mm0 \n\t"\
  945. "paddw %%mm6, %%mm1 \n\t"\
  946. "psraw $5, %%mm0 \n\t"\
  947. "psraw $5, %%mm1 \n\t"\
  948. "packuswb %%mm0, %%mm0 \n\t"\
  949. "packuswb %%mm1, %%mm1 \n\t"\
  950. PAVGB" (%0), %%mm0 \n\t"\
  951. PAVGB" (%0,%3), %%mm1 \n\t"\
  952. OP(%%mm0, (%2), %%mm4, d)\
  953. OP(%%mm1, (%2,%4), %%mm5, d)\
  954. :"+a"(src8), "+c"(src16), "+d"(dst)\
  955. :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
  956. :"memory");\
  957. }\
  958. static void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  959. {\
  960. asm volatile(\
  961. "movq %0, %%mm6 \n\t"\
  962. ::"m"(ff_pw_16)\
  963. );\
  964. while(h--){\
  965. asm volatile(\
  966. "movq (%1), %%mm0 \n\t"\
  967. "movq 8(%1), %%mm1 \n\t"\
  968. "paddw %%mm6, %%mm0 \n\t"\
  969. "paddw %%mm6, %%mm1 \n\t"\
  970. "psraw $5, %%mm0 \n\t"\
  971. "psraw $5, %%mm1 \n\t"\
  972. "packuswb %%mm1, %%mm0 \n\t"\
  973. PAVGB" (%0), %%mm0 \n\t"\
  974. OP(%%mm0, (%2), %%mm5, q)\
  975. ::"a"(src8), "c"(src16), "d"(dst)\
  976. :"memory");\
  977. src8 += src8Stride;\
  978. src16 += 24;\
  979. dst += dstStride;\
  980. }\
  981. }\
  982. static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  983. {\
  984. OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
  985. OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
  986. }\
  987. #define H264_MC(OPNAME, SIZE, MMX) \
  988. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  989. OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
  990. }\
  991. \
  992. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  993. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
  994. }\
  995. \
  996. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  997. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  998. }\
  999. \
  1000. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1001. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
  1002. }\
  1003. \
  1004. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1005. uint64_t temp[SIZE*SIZE/8];\
  1006. uint8_t * const half= (uint8_t*)temp;\
  1007. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  1008. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  1009. }\
  1010. \
  1011. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1012. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  1013. }\
  1014. \
  1015. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1016. uint64_t temp[SIZE*SIZE/8];\
  1017. uint8_t * const half= (uint8_t*)temp;\
  1018. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  1019. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
  1020. }\
  1021. \
  1022. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1023. uint64_t temp[SIZE*SIZE/8];\
  1024. uint8_t * const halfV= (uint8_t*)temp;\
  1025. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  1026. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
  1027. }\
  1028. \
  1029. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1030. uint64_t temp[SIZE*SIZE/8];\
  1031. uint8_t * const halfV= (uint8_t*)temp;\
  1032. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  1033. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
  1034. }\
  1035. \
  1036. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1037. uint64_t temp[SIZE*SIZE/8];\
  1038. uint8_t * const halfV= (uint8_t*)temp;\
  1039. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  1040. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
  1041. }\
  1042. \
  1043. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1044. uint64_t temp[SIZE*SIZE/8];\
  1045. uint8_t * const halfV= (uint8_t*)temp;\
  1046. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  1047. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
  1048. }\
  1049. \
  1050. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1051. uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
  1052. int16_t * const tmp= (int16_t*)temp;\
  1053. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
  1054. }\
  1055. \
  1056. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1057. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  1058. uint8_t * const halfHV= (uint8_t*)temp;\
  1059. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
  1060. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  1061. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
  1062. }\
  1063. \
  1064. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1065. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  1066. uint8_t * const halfHV= (uint8_t*)temp;\
  1067. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
  1068. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  1069. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
  1070. }\
  1071. \
  1072. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1073. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  1074. int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
  1075. uint8_t * const halfHV= ((uint8_t*)temp);\
  1076. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1077. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
  1078. }\
  1079. \
  1080. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1081. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  1082. int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
  1083. uint8_t * const halfHV= ((uint8_t*)temp);\
  1084. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1085. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
  1086. }\
  1087. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1088. #define AVG_3DNOW_OP(a,b,temp, size) \
  1089. "mov" #size " " #b ", " #temp " \n\t"\
  1090. "pavgusb " #temp ", " #a " \n\t"\
  1091. "mov" #size " " #a ", " #b " \n\t"
  1092. #define AVG_MMX2_OP(a,b,temp, size) \
  1093. "mov" #size " " #b ", " #temp " \n\t"\
  1094. "pavgb " #temp ", " #a " \n\t"\
  1095. "mov" #size " " #a ", " #b " \n\t"
  1096. #define PAVGB "pavgusb"
  1097. QPEL_H264(put_, PUT_OP, 3dnow)
  1098. QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
  1099. #undef PAVGB
  1100. #define PAVGB "pavgb"
  1101. QPEL_H264(put_, PUT_OP, mmx2)
  1102. QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
  1103. #undef PAVGB
  1104. H264_MC(put_, 4, 3dnow)
  1105. H264_MC(put_, 8, 3dnow)
  1106. H264_MC(put_, 16,3dnow)
  1107. H264_MC(avg_, 4, 3dnow)
  1108. H264_MC(avg_, 8, 3dnow)
  1109. H264_MC(avg_, 16,3dnow)
  1110. H264_MC(put_, 4, mmx2)
  1111. H264_MC(put_, 8, mmx2)
  1112. H264_MC(put_, 16,mmx2)
  1113. H264_MC(avg_, 4, mmx2)
  1114. H264_MC(avg_, 8, mmx2)
  1115. H264_MC(avg_, 16,mmx2)
  1116. #define H264_CHROMA_OP(S,D)
  1117. #define H264_CHROMA_OP4(S,D,T)
  1118. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
  1119. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
  1120. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  1121. #include "dsputil_h264_template_mmx.c"
  1122. #undef H264_CHROMA_OP
  1123. #undef H264_CHROMA_OP4
  1124. #undef H264_CHROMA_MC8_TMPL
  1125. #undef H264_CHROMA_MC4_TMPL
  1126. #undef H264_CHROMA_MC8_MV0
  1127. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  1128. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1129. "pavgb " #T ", " #D " \n\t"
  1130. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
  1131. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
  1132. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  1133. #include "dsputil_h264_template_mmx.c"
  1134. #undef H264_CHROMA_OP
  1135. #undef H264_CHROMA_OP4
  1136. #undef H264_CHROMA_MC8_TMPL
  1137. #undef H264_CHROMA_MC4_TMPL
  1138. #undef H264_CHROMA_MC8_MV0
  1139. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  1140. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1141. "pavgusb " #T ", " #D " \n\t"
  1142. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
  1143. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
  1144. #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
  1145. #include "dsputil_h264_template_mmx.c"
  1146. #undef H264_CHROMA_OP
  1147. #undef H264_CHROMA_OP4
  1148. #undef H264_CHROMA_MC8_TMPL
  1149. #undef H264_CHROMA_MC4_TMPL
  1150. #undef H264_CHROMA_MC8_MV0
  1151. /***********************************/
  1152. /* weighted prediction */
  1153. static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
  1154. {
  1155. int x, y;
  1156. offset <<= log2_denom;
  1157. offset += (1 << log2_denom) >> 1;
  1158. asm volatile(
  1159. "movd %0, %%mm4 \n\t"
  1160. "movd %1, %%mm5 \n\t"
  1161. "movd %2, %%mm6 \n\t"
  1162. "pshufw $0, %%mm4, %%mm4 \n\t"
  1163. "pshufw $0, %%mm5, %%mm5 \n\t"
  1164. "pxor %%mm7, %%mm7 \n\t"
  1165. :: "g"(weight), "g"(offset), "g"(log2_denom)
  1166. );
  1167. for(y=0; y<h; y+=2){
  1168. for(x=0; x<w; x+=4){
  1169. asm volatile(
  1170. "movd %0, %%mm0 \n\t"
  1171. "movd %1, %%mm1 \n\t"
  1172. "punpcklbw %%mm7, %%mm0 \n\t"
  1173. "punpcklbw %%mm7, %%mm1 \n\t"
  1174. "pmullw %%mm4, %%mm0 \n\t"
  1175. "pmullw %%mm4, %%mm1 \n\t"
  1176. "paddsw %%mm5, %%mm0 \n\t"
  1177. "paddsw %%mm5, %%mm1 \n\t"
  1178. "psraw %%mm6, %%mm0 \n\t"
  1179. "psraw %%mm6, %%mm1 \n\t"
  1180. "packuswb %%mm7, %%mm0 \n\t"
  1181. "packuswb %%mm7, %%mm1 \n\t"
  1182. "movd %%mm0, %0 \n\t"
  1183. "movd %%mm1, %1 \n\t"
  1184. : "+m"(*(uint32_t*)(dst+x)),
  1185. "+m"(*(uint32_t*)(dst+x+stride))
  1186. );
  1187. }
  1188. dst += 2*stride;
  1189. }
  1190. }
  1191. static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
  1192. {
  1193. int x, y;
  1194. offset = ((offset + 1) | 1) << log2_denom;
  1195. asm volatile(
  1196. "movd %0, %%mm3 \n\t"
  1197. "movd %1, %%mm4 \n\t"
  1198. "movd %2, %%mm5 \n\t"
  1199. "movd %3, %%mm6 \n\t"
  1200. "pshufw $0, %%mm3, %%mm3 \n\t"
  1201. "pshufw $0, %%mm4, %%mm4 \n\t"
  1202. "pshufw $0, %%mm5, %%mm5 \n\t"
  1203. "pxor %%mm7, %%mm7 \n\t"
  1204. :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
  1205. );
  1206. for(y=0; y<h; y++){
  1207. for(x=0; x<w; x+=4){
  1208. asm volatile(
  1209. "movd %0, %%mm0 \n\t"
  1210. "movd %1, %%mm1 \n\t"
  1211. "punpcklbw %%mm7, %%mm0 \n\t"
  1212. "punpcklbw %%mm7, %%mm1 \n\t"
  1213. "pmullw %%mm3, %%mm0 \n\t"
  1214. "pmullw %%mm4, %%mm1 \n\t"
  1215. "paddsw %%mm1, %%mm0 \n\t"
  1216. "paddsw %%mm5, %%mm0 \n\t"
  1217. "psraw %%mm6, %%mm0 \n\t"
  1218. "packuswb %%mm0, %%mm0 \n\t"
  1219. "movd %%mm0, %0 \n\t"
  1220. : "+m"(*(uint32_t*)(dst+x))
  1221. : "m"(*(uint32_t*)(src+x))
  1222. );
  1223. }
  1224. src += stride;
  1225. dst += stride;
  1226. }
  1227. }
  1228. #define H264_WEIGHT(W,H) \
  1229. static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
  1230. ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
  1231. } \
  1232. static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
  1233. ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
  1234. }
  1235. H264_WEIGHT(16,16)
  1236. H264_WEIGHT(16, 8)
  1237. H264_WEIGHT( 8,16)
  1238. H264_WEIGHT( 8, 8)
  1239. H264_WEIGHT( 8, 4)
  1240. H264_WEIGHT( 4, 8)
  1241. H264_WEIGHT( 4, 4)
  1242. H264_WEIGHT( 4, 2)