You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1218 lines
48KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2 of the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /***********************************/
  19. /* IDCT */
  20. /* in/out: mma=mma+mmb, mmb=mmb-mma */
  21. #define SUMSUB_BA( a, b ) \
  22. "paddw "#b", "#a" \n\t"\
  23. "paddw "#b", "#b" \n\t"\
  24. "psubw "#a", "#b" \n\t"
  25. #define SUMSUB_BADC( a, b, c, d ) \
  26. "paddw "#b", "#a" \n\t"\
  27. "paddw "#d", "#c" \n\t"\
  28. "paddw "#b", "#b" \n\t"\
  29. "paddw "#d", "#d" \n\t"\
  30. "psubw "#a", "#b" \n\t"\
  31. "psubw "#c", "#d" \n\t"
  32. #define SUMSUBD2_AB( a, b, t ) \
  33. "movq "#b", "#t" \n\t"\
  34. "psraw $1 , "#b" \n\t"\
  35. "paddw "#a", "#b" \n\t"\
  36. "psraw $1 , "#a" \n\t"\
  37. "psubw "#t", "#a" \n\t"
  38. #define IDCT4_1D( s02, s13, d02, d13, t ) \
  39. SUMSUB_BA ( s02, d02 )\
  40. SUMSUBD2_AB( s13, d13, t )\
  41. SUMSUB_BADC( d13, s02, s13, d02 )
  42. #define SBUTTERFLY(a,b,t,n)\
  43. "movq " #a ", " #t " \n\t" /* abcd */\
  44. "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
  45. "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
  46. #define TRANSPOSE4(a,b,c,d,t)\
  47. SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
  48. SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
  49. SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
  50. SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
  51. #define STORE_DIFF_4P( p, t, z ) \
  52. "psraw $6, "#p" \n\t"\
  53. "movd (%0), "#t" \n\t"\
  54. "punpcklbw "#z", "#t" \n\t"\
  55. "paddsw "#t", "#p" \n\t"\
  56. "packuswb "#z", "#p" \n\t"\
  57. "movd "#p", (%0) \n\t"
  58. void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  59. {
  60. /* Load dct coeffs */
  61. asm volatile(
  62. "movq (%0), %%mm0 \n\t"
  63. "movq 8(%0), %%mm1 \n\t"
  64. "movq 16(%0), %%mm2 \n\t"
  65. "movq 24(%0), %%mm3 \n\t"
  66. :: "r"(block) );
  67. asm volatile(
  68. /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
  69. IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
  70. "movq %0, %%mm6 \n\t"
  71. /* in: 1,4,0,2 out: 1,2,3,0 */
  72. TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
  73. "paddw %%mm6, %%mm3 \n\t"
  74. /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
  75. IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
  76. "pxor %%mm7, %%mm7 \n\t"
  77. :: "m"(ff_pw_32));
  78. asm volatile(
  79. STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
  80. "add %1, %0 \n\t"
  81. STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
  82. "add %1, %0 \n\t"
  83. STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
  84. "add %1, %0 \n\t"
  85. STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
  86. : "+r"(dst)
  87. : "r" ((long)stride)
  88. );
  89. }
  90. void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  91. {
  92. int dc = (block[0] + 32) >> 6;
  93. asm volatile(
  94. "movd %0, %%mm0 \n\t"
  95. "pxor %%mm7, %%mm7 \n\t"
  96. "pshufw $0, %%mm0, %%mm0 \n\t"
  97. "pxor %%mm1, %%mm1 \n\t"
  98. "psubw %%mm0, %%mm1 \n\t"
  99. "pmaxsw %%mm7, %%mm0 \n\t"
  100. "pmaxsw %%mm7, %%mm1 \n\t"
  101. "packuswb %%mm0, %%mm0 \n\t"
  102. "packuswb %%mm1, %%mm1 \n\t"
  103. ::"r"(dc)
  104. );
  105. asm volatile(
  106. "movd %0, %%mm2 \n\t"
  107. "movd %1, %%mm3 \n\t"
  108. "movd %2, %%mm4 \n\t"
  109. "movd %3, %%mm5 \n\t"
  110. "paddusb %%mm0, %%mm2 \n\t"
  111. "paddusb %%mm0, %%mm3 \n\t"
  112. "paddusb %%mm0, %%mm4 \n\t"
  113. "paddusb %%mm0, %%mm5 \n\t"
  114. "psubusb %%mm1, %%mm2 \n\t"
  115. "psubusb %%mm1, %%mm3 \n\t"
  116. "psubusb %%mm1, %%mm4 \n\t"
  117. "psubusb %%mm1, %%mm5 \n\t"
  118. "movd %%mm2, %0 \n\t"
  119. "movd %%mm3, %1 \n\t"
  120. "movd %%mm4, %2 \n\t"
  121. "movd %%mm5, %3 \n\t"
  122. :"+m"(*(uint32_t*)(dst+0*stride)),
  123. "+m"(*(uint32_t*)(dst+1*stride)),
  124. "+m"(*(uint32_t*)(dst+2*stride)),
  125. "+m"(*(uint32_t*)(dst+3*stride))
  126. );
  127. }
  128. void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  129. {
  130. int dc = (block[0] + 32) >> 6;
  131. int y;
  132. asm volatile(
  133. "movd %0, %%mm0 \n\t"
  134. "pxor %%mm7, %%mm7 \n\t"
  135. "pshufw $0, %%mm0, %%mm0 \n\t"
  136. "pxor %%mm1, %%mm1 \n\t"
  137. "psubw %%mm0, %%mm1 \n\t"
  138. "pmaxsw %%mm7, %%mm0 \n\t"
  139. "pmaxsw %%mm7, %%mm1 \n\t"
  140. "packuswb %%mm0, %%mm0 \n\t"
  141. "packuswb %%mm1, %%mm1 \n\t"
  142. ::"r"(dc)
  143. );
  144. for(y=2; y--; dst += 4*stride){
  145. asm volatile(
  146. "movq %0, %%mm2 \n\t"
  147. "movq %1, %%mm3 \n\t"
  148. "movq %2, %%mm4 \n\t"
  149. "movq %3, %%mm5 \n\t"
  150. "paddusb %%mm0, %%mm2 \n\t"
  151. "paddusb %%mm0, %%mm3 \n\t"
  152. "paddusb %%mm0, %%mm4 \n\t"
  153. "paddusb %%mm0, %%mm5 \n\t"
  154. "psubusb %%mm1, %%mm2 \n\t"
  155. "psubusb %%mm1, %%mm3 \n\t"
  156. "psubusb %%mm1, %%mm4 \n\t"
  157. "psubusb %%mm1, %%mm5 \n\t"
  158. "movq %%mm2, %0 \n\t"
  159. "movq %%mm3, %1 \n\t"
  160. "movq %%mm4, %2 \n\t"
  161. "movq %%mm5, %3 \n\t"
  162. :"+m"(*(uint64_t*)(dst+0*stride)),
  163. "+m"(*(uint64_t*)(dst+1*stride)),
  164. "+m"(*(uint64_t*)(dst+2*stride)),
  165. "+m"(*(uint64_t*)(dst+3*stride))
  166. );
  167. }
  168. }
  169. /***********************************/
  170. /* deblocking */
  171. // out: o = |x-y|>a
  172. // clobbers: t
  173. #define DIFF_GT_MMX(x,y,a,o,t)\
  174. "movq "#y", "#t" \n\t"\
  175. "movq "#x", "#o" \n\t"\
  176. "psubusb "#x", "#t" \n\t"\
  177. "psubusb "#y", "#o" \n\t"\
  178. "por "#t", "#o" \n\t"\
  179. "psubusb "#a", "#o" \n\t"
  180. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  181. // out: mm5=beta-1, mm7=mask
  182. // clobbers: mm4,mm6
  183. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  184. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  185. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  186. "packuswb %%mm4, %%mm4 \n\t"\
  187. "packuswb %%mm5, %%mm5 \n\t"\
  188. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  189. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  190. "por %%mm4, %%mm7 \n\t"\
  191. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  192. "por %%mm4, %%mm7 \n\t"\
  193. "pxor %%mm6, %%mm6 \n\t"\
  194. "pcmpeqb %%mm6, %%mm7 \n\t"
  195. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  196. // out: mm1=p0' mm2=q0'
  197. // clobbers: mm0,3-6
  198. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  199. /* a = q0^p0^((p1-q1)>>2) */\
  200. "movq %%mm0, %%mm4 \n\t"\
  201. "psubb %%mm3, %%mm4 \n\t"\
  202. "psrlw $2, %%mm4 \n\t"\
  203. "pxor %%mm1, %%mm4 \n\t"\
  204. "pxor %%mm2, %%mm4 \n\t"\
  205. /* b = p0^(q1>>2) */\
  206. "psrlw $2, %%mm3 \n\t"\
  207. "pand "#pb_3f", %%mm3 \n\t"\
  208. "movq %%mm1, %%mm5 \n\t"\
  209. "pxor %%mm3, %%mm5 \n\t"\
  210. /* c = q0^(p1>>2) */\
  211. "psrlw $2, %%mm0 \n\t"\
  212. "pand "#pb_3f", %%mm0 \n\t"\
  213. "movq %%mm2, %%mm6 \n\t"\
  214. "pxor %%mm0, %%mm6 \n\t"\
  215. /* d = (c^b) & ~(b^a) & 1 */\
  216. "pxor %%mm5, %%mm6 \n\t"\
  217. "pxor %%mm4, %%mm5 \n\t"\
  218. "pandn %%mm6, %%mm5 \n\t"\
  219. "pand "#pb_01", %%mm5 \n\t"\
  220. /* delta = (avg(q0, p1>>2) + (d&a))
  221. * - (avg(p0, q1>>2) + (d&~a)) */\
  222. "pavgb %%mm2, %%mm0 \n\t"\
  223. "pand %%mm5, %%mm4 \n\t"\
  224. "paddusb %%mm4, %%mm0 \n\t"\
  225. "pavgb %%mm1, %%mm3 \n\t"\
  226. "pxor %%mm5, %%mm4 \n\t"\
  227. "paddusb %%mm4, %%mm3 \n\t"\
  228. /* p0 += clip(delta, -tc0, tc0)
  229. * q0 -= clip(delta, -tc0, tc0) */\
  230. "movq %%mm0, %%mm4 \n\t"\
  231. "psubusb %%mm3, %%mm0 \n\t"\
  232. "psubusb %%mm4, %%mm3 \n\t"\
  233. "pminub %%mm7, %%mm0 \n\t"\
  234. "pminub %%mm7, %%mm3 \n\t"\
  235. "paddusb %%mm0, %%mm1 \n\t"\
  236. "paddusb %%mm3, %%mm2 \n\t"\
  237. "psubusb %%mm3, %%mm1 \n\t"\
  238. "psubusb %%mm0, %%mm2 \n\t"
  239. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
  240. // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  241. // clobbers: q2, tmp, tc0
  242. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  243. "movq %%mm1, "#tmp" \n\t"\
  244. "pavgb %%mm2, "#tmp" \n\t"\
  245. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  246. "pxor "q2addr", "#tmp" \n\t"\
  247. "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  248. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  249. "movq "#p1", "#tmp" \n\t"\
  250. "psubusb "#tc0", "#tmp" \n\t"\
  251. "paddusb "#p1", "#tc0" \n\t"\
  252. "pmaxub "#tmp", "#q2" \n\t"\
  253. "pminub "#tc0", "#q2" \n\t"\
  254. "movq "#q2", "q1addr" \n\t"
  255. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  256. {
  257. uint64_t tmp0;
  258. uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
  259. // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
  260. uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
  261. asm volatile(
  262. "movq (%1,%3), %%mm0 \n\t" //p1
  263. "movq (%1,%3,2), %%mm1 \n\t" //p0
  264. "movq (%2), %%mm2 \n\t" //q0
  265. "movq (%2,%3), %%mm3 \n\t" //q1
  266. H264_DEBLOCK_MASK(%6, %7)
  267. "pand %5, %%mm7 \n\t"
  268. "movq %%mm7, %0 \n\t"
  269. /* filter p1 */
  270. "movq (%1), %%mm3 \n\t" //p2
  271. DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  272. "pandn %%mm7, %%mm6 \n\t"
  273. "pcmpeqb %%mm7, %%mm6 \n\t"
  274. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  275. "pshufw $80, %4, %%mm4 \n\t"
  276. "pand %%mm7, %%mm4 \n\t" // mask & tc0
  277. "movq %8, %%mm7 \n\t"
  278. "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
  279. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  280. "paddb %%mm4, %%mm7 \n\t" // tc++
  281. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
  282. /* filter q1 */
  283. "movq (%2,%3,2), %%mm4 \n\t" //q2
  284. DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  285. "pandn %0, %%mm6 \n\t"
  286. "pcmpeqb %0, %%mm6 \n\t"
  287. "pand %0, %%mm6 \n\t"
  288. "pshufw $80, %4, %%mm5 \n\t"
  289. "pand %%mm6, %%mm5 \n\t"
  290. "pand %8, %%mm6 \n\t"
  291. "paddb %%mm6, %%mm7 \n\t"
  292. "movq (%2,%3), %%mm3 \n\t"
  293. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
  294. /* filter p0, q0 */
  295. H264_DEBLOCK_P0_Q0(%8, %9)
  296. "movq %%mm1, (%1,%3,2) \n\t"
  297. "movq %%mm2, (%2) \n\t"
  298. : "=m"(tmp0)
  299. : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
  300. "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
  301. "m"(mm_bone), "m"(ff_pb_3F)
  302. );
  303. }
  304. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  305. {
  306. if((tc0[0] & tc0[1]) >= 0)
  307. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  308. if((tc0[2] & tc0[3]) >= 0)
  309. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  310. }
  311. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  312. {
  313. //FIXME: could cut some load/stores by merging transpose with filter
  314. // also, it only needs to transpose 6x8
  315. uint8_t trans[8*8];
  316. int i;
  317. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  318. if((tc0[0] & tc0[1]) < 0)
  319. continue;
  320. transpose4x4(trans, pix-4, 8, stride);
  321. transpose4x4(trans +4*8, pix, 8, stride);
  322. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  323. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  324. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  325. transpose4x4(pix-2, trans +2*8, stride, 8);
  326. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  327. }
  328. }
  329. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  330. {
  331. asm volatile(
  332. "movq (%0), %%mm0 \n\t" //p1
  333. "movq (%0,%2), %%mm1 \n\t" //p0
  334. "movq (%1), %%mm2 \n\t" //q0
  335. "movq (%1,%2), %%mm3 \n\t" //q1
  336. H264_DEBLOCK_MASK(%4, %5)
  337. "movd %3, %%mm6 \n\t"
  338. "punpcklbw %%mm6, %%mm6 \n\t"
  339. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  340. H264_DEBLOCK_P0_Q0(%6, %7)
  341. "movq %%mm1, (%0,%2) \n\t"
  342. "movq %%mm2, (%1) \n\t"
  343. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  344. "r"(*(uint32_t*)tc0),
  345. "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
  346. );
  347. }
  348. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  349. {
  350. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  351. }
  352. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  353. {
  354. //FIXME: could cut some load/stores by merging transpose with filter
  355. uint8_t trans[8*4];
  356. transpose4x4(trans, pix-2, 8, stride);
  357. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  358. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  359. transpose4x4(pix-2, trans, stride, 8);
  360. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  361. }
  362. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  363. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  364. "movq "#p0", %%mm4 \n\t"\
  365. "pxor "#q1", %%mm4 \n\t"\
  366. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  367. "pavgb "#q1", "#p0" \n\t"\
  368. "psubusb %%mm4, "#p0" \n\t"\
  369. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  370. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  371. {
  372. asm volatile(
  373. "movq (%0), %%mm0 \n\t"
  374. "movq (%0,%2), %%mm1 \n\t"
  375. "movq (%1), %%mm2 \n\t"
  376. "movq (%1,%2), %%mm3 \n\t"
  377. H264_DEBLOCK_MASK(%3, %4)
  378. "movq %%mm1, %%mm5 \n\t"
  379. "movq %%mm2, %%mm6 \n\t"
  380. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  381. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  382. "psubb %%mm5, %%mm1 \n\t"
  383. "psubb %%mm6, %%mm2 \n\t"
  384. "pand %%mm7, %%mm1 \n\t"
  385. "pand %%mm7, %%mm2 \n\t"
  386. "paddb %%mm5, %%mm1 \n\t"
  387. "paddb %%mm6, %%mm2 \n\t"
  388. "movq %%mm1, (%0,%2) \n\t"
  389. "movq %%mm2, (%1) \n\t"
  390. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  391. "m"(alpha1), "m"(beta1), "m"(mm_bone)
  392. );
  393. }
  394. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  395. {
  396. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  397. }
  398. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  399. {
  400. //FIXME: could cut some load/stores by merging transpose with filter
  401. uint8_t trans[8*4];
  402. transpose4x4(trans, pix-2, 8, stride);
  403. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  404. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  405. transpose4x4(pix-2, trans, stride, 8);
  406. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  407. }
  408. /***********************************/
  409. /* motion compensation */
  410. #define QPEL_H264V(A,B,C,D,E,F,OP)\
  411. "movd (%0), "#F" \n\t"\
  412. "movq "#C", %%mm6 \n\t"\
  413. "paddw "#D", %%mm6 \n\t"\
  414. "psllw $2, %%mm6 \n\t"\
  415. "psubw "#B", %%mm6 \n\t"\
  416. "psubw "#E", %%mm6 \n\t"\
  417. "pmullw %4, %%mm6 \n\t"\
  418. "add %2, %0 \n\t"\
  419. "punpcklbw %%mm7, "#F" \n\t"\
  420. "paddw %5, "#A" \n\t"\
  421. "paddw "#F", "#A" \n\t"\
  422. "paddw "#A", %%mm6 \n\t"\
  423. "psraw $5, %%mm6 \n\t"\
  424. "packuswb %%mm6, %%mm6 \n\t"\
  425. OP(%%mm6, (%1), A, d)\
  426. "add %3, %1 \n\t"
  427. #define QPEL_H264HV(A,B,C,D,E,F,OF)\
  428. "movd (%0), "#F" \n\t"\
  429. "movq "#C", %%mm6 \n\t"\
  430. "paddw "#D", %%mm6 \n\t"\
  431. "psllw $2, %%mm6 \n\t"\
  432. "psubw "#B", %%mm6 \n\t"\
  433. "psubw "#E", %%mm6 \n\t"\
  434. "pmullw %3, %%mm6 \n\t"\
  435. "add %2, %0 \n\t"\
  436. "punpcklbw %%mm7, "#F" \n\t"\
  437. "paddw "#F", "#A" \n\t"\
  438. "paddw "#A", %%mm6 \n\t"\
  439. "movq %%mm6, "#OF"(%1) \n\t"
  440. #define QPEL_H264(OPNAME, OP, MMX)\
  441. static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  442. int h=4;\
  443. \
  444. asm volatile(\
  445. "pxor %%mm7, %%mm7 \n\t"\
  446. "movq %5, %%mm4 \n\t"\
  447. "movq %6, %%mm5 \n\t"\
  448. "1: \n\t"\
  449. "movd -1(%0), %%mm1 \n\t"\
  450. "movd (%0), %%mm2 \n\t"\
  451. "movd 1(%0), %%mm3 \n\t"\
  452. "movd 2(%0), %%mm0 \n\t"\
  453. "punpcklbw %%mm7, %%mm1 \n\t"\
  454. "punpcklbw %%mm7, %%mm2 \n\t"\
  455. "punpcklbw %%mm7, %%mm3 \n\t"\
  456. "punpcklbw %%mm7, %%mm0 \n\t"\
  457. "paddw %%mm0, %%mm1 \n\t"\
  458. "paddw %%mm3, %%mm2 \n\t"\
  459. "movd -2(%0), %%mm0 \n\t"\
  460. "movd 3(%0), %%mm3 \n\t"\
  461. "punpcklbw %%mm7, %%mm0 \n\t"\
  462. "punpcklbw %%mm7, %%mm3 \n\t"\
  463. "paddw %%mm3, %%mm0 \n\t"\
  464. "psllw $2, %%mm2 \n\t"\
  465. "psubw %%mm1, %%mm2 \n\t"\
  466. "pmullw %%mm4, %%mm2 \n\t"\
  467. "paddw %%mm5, %%mm0 \n\t"\
  468. "paddw %%mm2, %%mm0 \n\t"\
  469. "psraw $5, %%mm0 \n\t"\
  470. "packuswb %%mm0, %%mm0 \n\t"\
  471. OP(%%mm0, (%1),%%mm6, d)\
  472. "add %3, %0 \n\t"\
  473. "add %4, %1 \n\t"\
  474. "decl %2 \n\t"\
  475. " jnz 1b \n\t"\
  476. : "+a"(src), "+c"(dst), "+m"(h)\
  477. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  478. : "memory"\
  479. );\
  480. }\
  481. static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  482. src -= 2*srcStride;\
  483. asm volatile(\
  484. "pxor %%mm7, %%mm7 \n\t"\
  485. "movd (%0), %%mm0 \n\t"\
  486. "add %2, %0 \n\t"\
  487. "movd (%0), %%mm1 \n\t"\
  488. "add %2, %0 \n\t"\
  489. "movd (%0), %%mm2 \n\t"\
  490. "add %2, %0 \n\t"\
  491. "movd (%0), %%mm3 \n\t"\
  492. "add %2, %0 \n\t"\
  493. "movd (%0), %%mm4 \n\t"\
  494. "add %2, %0 \n\t"\
  495. "punpcklbw %%mm7, %%mm0 \n\t"\
  496. "punpcklbw %%mm7, %%mm1 \n\t"\
  497. "punpcklbw %%mm7, %%mm2 \n\t"\
  498. "punpcklbw %%mm7, %%mm3 \n\t"\
  499. "punpcklbw %%mm7, %%mm4 \n\t"\
  500. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  501. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  502. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  503. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  504. \
  505. : "+a"(src), "+c"(dst)\
  506. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  507. : "memory"\
  508. );\
  509. }\
  510. static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  511. int h=4;\
  512. int w=3;\
  513. src -= 2*srcStride+2;\
  514. while(w--){\
  515. asm volatile(\
  516. "pxor %%mm7, %%mm7 \n\t"\
  517. "movd (%0), %%mm0 \n\t"\
  518. "add %2, %0 \n\t"\
  519. "movd (%0), %%mm1 \n\t"\
  520. "add %2, %0 \n\t"\
  521. "movd (%0), %%mm2 \n\t"\
  522. "add %2, %0 \n\t"\
  523. "movd (%0), %%mm3 \n\t"\
  524. "add %2, %0 \n\t"\
  525. "movd (%0), %%mm4 \n\t"\
  526. "add %2, %0 \n\t"\
  527. "punpcklbw %%mm7, %%mm0 \n\t"\
  528. "punpcklbw %%mm7, %%mm1 \n\t"\
  529. "punpcklbw %%mm7, %%mm2 \n\t"\
  530. "punpcklbw %%mm7, %%mm3 \n\t"\
  531. "punpcklbw %%mm7, %%mm4 \n\t"\
  532. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  533. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  534. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  535. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  536. \
  537. : "+a"(src)\
  538. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  539. : "memory"\
  540. );\
  541. tmp += 4;\
  542. src += 4 - 9*srcStride;\
  543. }\
  544. tmp -= 3*4;\
  545. asm volatile(\
  546. "movq %4, %%mm6 \n\t"\
  547. "1: \n\t"\
  548. "movq (%0), %%mm0 \n\t"\
  549. "paddw 10(%0), %%mm0 \n\t"\
  550. "movq 2(%0), %%mm1 \n\t"\
  551. "paddw 8(%0), %%mm1 \n\t"\
  552. "movq 4(%0), %%mm2 \n\t"\
  553. "paddw 6(%0), %%mm2 \n\t"\
  554. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  555. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  556. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  557. "paddsw %%mm2, %%mm0 \n\t"\
  558. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
  559. "paddw %%mm6, %%mm2 \n\t"\
  560. "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\
  561. "psraw $6, %%mm0 \n\t"\
  562. "packuswb %%mm0, %%mm0 \n\t"\
  563. OP(%%mm0, (%1),%%mm7, d)\
  564. "add $24, %0 \n\t"\
  565. "add %3, %1 \n\t"\
  566. "decl %2 \n\t"\
  567. " jnz 1b \n\t"\
  568. : "+a"(tmp), "+c"(dst), "+m"(h)\
  569. : "S"((long)dstStride), "m"(ff_pw_32)\
  570. : "memory"\
  571. );\
  572. }\
  573. \
  574. static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  575. int h=8;\
  576. asm volatile(\
  577. "pxor %%mm7, %%mm7 \n\t"\
  578. "movq %5, %%mm6 \n\t"\
  579. "1: \n\t"\
  580. "movq (%0), %%mm0 \n\t"\
  581. "movq 1(%0), %%mm2 \n\t"\
  582. "movq %%mm0, %%mm1 \n\t"\
  583. "movq %%mm2, %%mm3 \n\t"\
  584. "punpcklbw %%mm7, %%mm0 \n\t"\
  585. "punpckhbw %%mm7, %%mm1 \n\t"\
  586. "punpcklbw %%mm7, %%mm2 \n\t"\
  587. "punpckhbw %%mm7, %%mm3 \n\t"\
  588. "paddw %%mm2, %%mm0 \n\t"\
  589. "paddw %%mm3, %%mm1 \n\t"\
  590. "psllw $2, %%mm0 \n\t"\
  591. "psllw $2, %%mm1 \n\t"\
  592. "movq -1(%0), %%mm2 \n\t"\
  593. "movq 2(%0), %%mm4 \n\t"\
  594. "movq %%mm2, %%mm3 \n\t"\
  595. "movq %%mm4, %%mm5 \n\t"\
  596. "punpcklbw %%mm7, %%mm2 \n\t"\
  597. "punpckhbw %%mm7, %%mm3 \n\t"\
  598. "punpcklbw %%mm7, %%mm4 \n\t"\
  599. "punpckhbw %%mm7, %%mm5 \n\t"\
  600. "paddw %%mm4, %%mm2 \n\t"\
  601. "paddw %%mm3, %%mm5 \n\t"\
  602. "psubw %%mm2, %%mm0 \n\t"\
  603. "psubw %%mm5, %%mm1 \n\t"\
  604. "pmullw %%mm6, %%mm0 \n\t"\
  605. "pmullw %%mm6, %%mm1 \n\t"\
  606. "movd -2(%0), %%mm2 \n\t"\
  607. "movd 7(%0), %%mm5 \n\t"\
  608. "punpcklbw %%mm7, %%mm2 \n\t"\
  609. "punpcklbw %%mm7, %%mm5 \n\t"\
  610. "paddw %%mm3, %%mm2 \n\t"\
  611. "paddw %%mm5, %%mm4 \n\t"\
  612. "movq %6, %%mm5 \n\t"\
  613. "paddw %%mm5, %%mm2 \n\t"\
  614. "paddw %%mm5, %%mm4 \n\t"\
  615. "paddw %%mm2, %%mm0 \n\t"\
  616. "paddw %%mm4, %%mm1 \n\t"\
  617. "psraw $5, %%mm0 \n\t"\
  618. "psraw $5, %%mm1 \n\t"\
  619. "packuswb %%mm1, %%mm0 \n\t"\
  620. OP(%%mm0, (%1),%%mm5, q)\
  621. "add %3, %0 \n\t"\
  622. "add %4, %1 \n\t"\
  623. "decl %2 \n\t"\
  624. " jnz 1b \n\t"\
  625. : "+a"(src), "+c"(dst), "+m"(h)\
  626. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  627. : "memory"\
  628. );\
  629. }\
  630. \
  631. static inline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  632. int w= 2;\
  633. src -= 2*srcStride;\
  634. \
  635. while(w--){\
  636. asm volatile(\
  637. "pxor %%mm7, %%mm7 \n\t"\
  638. "movd (%0), %%mm0 \n\t"\
  639. "add %2, %0 \n\t"\
  640. "movd (%0), %%mm1 \n\t"\
  641. "add %2, %0 \n\t"\
  642. "movd (%0), %%mm2 \n\t"\
  643. "add %2, %0 \n\t"\
  644. "movd (%0), %%mm3 \n\t"\
  645. "add %2, %0 \n\t"\
  646. "movd (%0), %%mm4 \n\t"\
  647. "add %2, %0 \n\t"\
  648. "punpcklbw %%mm7, %%mm0 \n\t"\
  649. "punpcklbw %%mm7, %%mm1 \n\t"\
  650. "punpcklbw %%mm7, %%mm2 \n\t"\
  651. "punpcklbw %%mm7, %%mm3 \n\t"\
  652. "punpcklbw %%mm7, %%mm4 \n\t"\
  653. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  654. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  655. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  656. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  657. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  658. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  659. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  660. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  661. \
  662. : "+a"(src), "+c"(dst)\
  663. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  664. : "memory"\
  665. );\
  666. if(h==16){\
  667. asm volatile(\
  668. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  669. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  670. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  671. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  672. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  673. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  674. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  675. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  676. \
  677. : "+a"(src), "+c"(dst)\
  678. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  679. : "memory"\
  680. );\
  681. }\
  682. src += 4-(h+5)*srcStride;\
  683. dst += 4-h*dstStride;\
  684. }\
  685. }\
  686. static inline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  687. int h = size;\
  688. int w = (size+8)>>2;\
  689. src -= 2*srcStride+2;\
  690. while(w--){\
  691. asm volatile(\
  692. "pxor %%mm7, %%mm7 \n\t"\
  693. "movd (%0), %%mm0 \n\t"\
  694. "add %2, %0 \n\t"\
  695. "movd (%0), %%mm1 \n\t"\
  696. "add %2, %0 \n\t"\
  697. "movd (%0), %%mm2 \n\t"\
  698. "add %2, %0 \n\t"\
  699. "movd (%0), %%mm3 \n\t"\
  700. "add %2, %0 \n\t"\
  701. "movd (%0), %%mm4 \n\t"\
  702. "add %2, %0 \n\t"\
  703. "punpcklbw %%mm7, %%mm0 \n\t"\
  704. "punpcklbw %%mm7, %%mm1 \n\t"\
  705. "punpcklbw %%mm7, %%mm2 \n\t"\
  706. "punpcklbw %%mm7, %%mm3 \n\t"\
  707. "punpcklbw %%mm7, %%mm4 \n\t"\
  708. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
  709. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
  710. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
  711. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
  712. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
  713. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
  714. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
  715. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
  716. : "+a"(src)\
  717. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  718. : "memory"\
  719. );\
  720. if(size==16){\
  721. asm volatile(\
  722. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
  723. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
  724. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
  725. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
  726. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
  727. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
  728. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
  729. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
  730. : "+a"(src)\
  731. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
  732. : "memory"\
  733. );\
  734. }\
  735. tmp += 4;\
  736. src += 4 - (size+5)*srcStride;\
  737. }\
  738. tmp -= size+8;\
  739. w = size>>4;\
  740. do{\
  741. h = size;\
  742. asm volatile(\
  743. "movq %4, %%mm6 \n\t"\
  744. "1: \n\t"\
  745. "movq (%0), %%mm0 \n\t"\
  746. "movq 8(%0), %%mm3 \n\t"\
  747. "movq 2(%0), %%mm1 \n\t"\
  748. "movq 10(%0), %%mm4 \n\t"\
  749. "paddw %%mm4, %%mm0 \n\t"\
  750. "paddw %%mm3, %%mm1 \n\t"\
  751. "paddw 18(%0), %%mm3 \n\t"\
  752. "paddw 16(%0), %%mm4 \n\t"\
  753. "movq 4(%0), %%mm2 \n\t"\
  754. "movq 12(%0), %%mm5 \n\t"\
  755. "paddw 6(%0), %%mm2 \n\t"\
  756. "paddw 14(%0), %%mm5 \n\t"\
  757. "psubw %%mm1, %%mm0 \n\t"\
  758. "psubw %%mm4, %%mm3 \n\t"\
  759. "psraw $2, %%mm0 \n\t"\
  760. "psraw $2, %%mm3 \n\t"\
  761. "psubw %%mm1, %%mm0 \n\t"\
  762. "psubw %%mm4, %%mm3 \n\t"\
  763. "paddsw %%mm2, %%mm0 \n\t"\
  764. "paddsw %%mm5, %%mm3 \n\t"\
  765. "psraw $2, %%mm0 \n\t"\
  766. "psraw $2, %%mm3 \n\t"\
  767. "paddw %%mm6, %%mm2 \n\t"\
  768. "paddw %%mm6, %%mm5 \n\t"\
  769. "paddw %%mm2, %%mm0 \n\t"\
  770. "paddw %%mm5, %%mm3 \n\t"\
  771. "psraw $6, %%mm0 \n\t"\
  772. "psraw $6, %%mm3 \n\t"\
  773. "packuswb %%mm3, %%mm0 \n\t"\
  774. OP(%%mm0, (%1),%%mm7, q)\
  775. "add $48, %0 \n\t"\
  776. "add %3, %1 \n\t"\
  777. "decl %2 \n\t"\
  778. " jnz 1b \n\t"\
  779. : "+a"(tmp), "+c"(dst), "+m"(h)\
  780. : "S"((long)dstStride), "m"(ff_pw_32)\
  781. : "memory"\
  782. );\
  783. tmp += 8 - size*24;\
  784. dst += 8 - size*dstStride;\
  785. }while(w--);\
  786. }\
  787. \
  788. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  789. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  790. }\
  791. static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  792. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  793. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  794. }\
  795. \
  796. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  797. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  798. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  799. src += 8*srcStride;\
  800. dst += 8*dstStride;\
  801. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  802. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  803. }\
  804. \
  805. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  806. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
  807. }\
  808. \
  809. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  810. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
  811. }\
  812. \
  813. static void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  814. {\
  815. asm volatile(\
  816. "pxor %%mm7, %%mm7 \n\t"\
  817. "movq %5, %%mm6 \n\t"\
  818. "movq (%1), %%mm0 \n\t"\
  819. "movq 24(%1), %%mm1 \n\t"\
  820. "paddw %%mm6, %%mm0 \n\t"\
  821. "paddw %%mm6, %%mm1 \n\t"\
  822. "psraw $5, %%mm0 \n\t"\
  823. "psraw $5, %%mm1 \n\t"\
  824. "pmaxsw %%mm7, %%mm0 \n\t"\
  825. "pmaxsw %%mm7, %%mm1 \n\t"\
  826. "packuswb %%mm7, %%mm0 \n\t"\
  827. "packuswb %%mm7, %%mm1 \n\t"\
  828. PAVGB" (%0), %%mm0 \n\t"\
  829. PAVGB" (%0,%3), %%mm1 \n\t"\
  830. OP(%%mm0, (%2), %%mm4, d)\
  831. OP(%%mm1, (%2,%4), %%mm5, d)\
  832. "lea (%0,%3,2), %0 \n\t"\
  833. "lea (%2,%4,2), %2 \n\t"\
  834. "movq 48(%1), %%mm0 \n\t"\
  835. "movq 72(%1), %%mm1 \n\t"\
  836. "paddw %%mm6, %%mm0 \n\t"\
  837. "paddw %%mm6, %%mm1 \n\t"\
  838. "psraw $5, %%mm0 \n\t"\
  839. "psraw $5, %%mm1 \n\t"\
  840. "pmaxsw %%mm7, %%mm0 \n\t"\
  841. "pmaxsw %%mm7, %%mm1 \n\t"\
  842. "packuswb %%mm7, %%mm0 \n\t"\
  843. "packuswb %%mm7, %%mm1 \n\t"\
  844. PAVGB" (%0), %%mm0 \n\t"\
  845. PAVGB" (%0,%3), %%mm1 \n\t"\
  846. OP(%%mm0, (%2), %%mm4, d)\
  847. OP(%%mm1, (%2,%4), %%mm5, d)\
  848. :"+a"(src8), "+c"(src16), "+d"(dst)\
  849. :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
  850. :"memory");\
  851. }\
  852. static void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  853. {\
  854. asm volatile(\
  855. "pxor %%mm7, %%mm7 \n\t"\
  856. "movq %0, %%mm6 \n\t"\
  857. ::"m"(ff_pw_16)\
  858. );\
  859. while(h--){\
  860. asm volatile(\
  861. "movq (%1), %%mm0 \n\t"\
  862. "movq 8(%1), %%mm1 \n\t"\
  863. "paddw %%mm6, %%mm0 \n\t"\
  864. "paddw %%mm6, %%mm1 \n\t"\
  865. "psraw $5, %%mm0 \n\t"\
  866. "psraw $5, %%mm1 \n\t"\
  867. "pmaxsw %%mm7, %%mm0 \n\t"\
  868. "pmaxsw %%mm7, %%mm1 \n\t"\
  869. "packuswb %%mm1, %%mm0 \n\t"\
  870. PAVGB" (%0), %%mm0 \n\t"\
  871. OP(%%mm0, (%2), %%mm5, q)\
  872. ::"a"(src8), "c"(src16), "d"(dst)\
  873. :"memory");\
  874. src8 += src8Stride;\
  875. src16 += 24;\
  876. dst += dstStride;\
  877. }\
  878. }\
  879. static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  880. {\
  881. OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
  882. OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
  883. }\
  884. #define H264_MC(OPNAME, SIZE, MMX) \
  885. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  886. OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
  887. }\
  888. \
  889. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  890. uint64_t temp[SIZE*SIZE/8];\
  891. uint8_t * const half= (uint8_t*)temp;\
  892. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  893. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  894. }\
  895. \
  896. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  897. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  898. }\
  899. \
  900. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  901. uint64_t temp[SIZE*SIZE/8];\
  902. uint8_t * const half= (uint8_t*)temp;\
  903. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
  904. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\
  905. }\
  906. \
  907. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  908. uint64_t temp[SIZE*SIZE/8];\
  909. uint8_t * const half= (uint8_t*)temp;\
  910. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  911. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
  912. }\
  913. \
  914. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  915. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  916. }\
  917. \
  918. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  919. uint64_t temp[SIZE*SIZE/8];\
  920. uint8_t * const half= (uint8_t*)temp;\
  921. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
  922. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
  923. }\
  924. \
  925. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  926. uint64_t temp[SIZE*SIZE/4];\
  927. uint8_t * const halfH= (uint8_t*)temp;\
  928. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  929. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  930. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  931. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  932. }\
  933. \
  934. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  935. uint64_t temp[SIZE*SIZE/4];\
  936. uint8_t * const halfH= (uint8_t*)temp;\
  937. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  938. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  939. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  940. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  941. }\
  942. \
  943. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  944. uint64_t temp[SIZE*SIZE/4];\
  945. uint8_t * const halfH= (uint8_t*)temp;\
  946. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  947. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  948. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
  949. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  950. }\
  951. \
  952. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  953. uint64_t temp[SIZE*SIZE/4];\
  954. uint8_t * const halfH= (uint8_t*)temp;\
  955. uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
  956. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  957. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
  958. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
  959. }\
  960. \
  961. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  962. uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
  963. int16_t * const tmp= (int16_t*)temp;\
  964. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
  965. }\
  966. \
  967. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  968. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/4];\
  969. uint8_t * const halfH= (uint8_t*)temp;\
  970. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  971. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  972. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
  973. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  974. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  975. }\
  976. \
  977. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  978. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/4];\
  979. uint8_t * const halfH= (uint8_t*)temp;\
  980. uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
  981. int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
  982. put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
  983. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
  984. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
  985. }\
  986. \
  987. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  988. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  989. int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
  990. uint8_t * const halfHV= ((uint8_t*)temp);\
  991. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  992. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
  993. }\
  994. \
  995. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  996. uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
  997. int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
  998. uint8_t * const halfHV= ((uint8_t*)temp);\
  999. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1000. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
  1001. }\
  1002. #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
  1003. #define AVG_3DNOW_OP(a,b,temp, size) \
  1004. "mov" #size " " #b ", " #temp " \n\t"\
  1005. "pavgusb " #temp ", " #a " \n\t"\
  1006. "mov" #size " " #a ", " #b " \n\t"
  1007. #define AVG_MMX2_OP(a,b,temp, size) \
  1008. "mov" #size " " #b ", " #temp " \n\t"\
  1009. "pavgb " #temp ", " #a " \n\t"\
  1010. "mov" #size " " #a ", " #b " \n\t"
  1011. #define PAVGB "pavgusb"
  1012. QPEL_H264(put_, PUT_OP, 3dnow)
  1013. QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
  1014. #undef PAVGB
  1015. #define PAVGB "pavgb"
  1016. QPEL_H264(put_, PUT_OP, mmx2)
  1017. QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
  1018. #undef PAVGB
  1019. H264_MC(put_, 4, 3dnow)
  1020. H264_MC(put_, 8, 3dnow)
  1021. H264_MC(put_, 16,3dnow)
  1022. H264_MC(avg_, 4, 3dnow)
  1023. H264_MC(avg_, 8, 3dnow)
  1024. H264_MC(avg_, 16,3dnow)
  1025. H264_MC(put_, 4, mmx2)
  1026. H264_MC(put_, 8, mmx2)
  1027. H264_MC(put_, 16,mmx2)
  1028. H264_MC(avg_, 4, mmx2)
  1029. H264_MC(avg_, 8, mmx2)
  1030. H264_MC(avg_, 16,mmx2)
  1031. #define H264_CHROMA_OP(S,D)
  1032. #define H264_CHROMA_OP4(S,D,T)
  1033. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
  1034. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
  1035. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  1036. #include "dsputil_h264_template_mmx.c"
  1037. #undef H264_CHROMA_OP
  1038. #undef H264_CHROMA_OP4
  1039. #undef H264_CHROMA_MC8_TMPL
  1040. #undef H264_CHROMA_MC4_TMPL
  1041. #undef H264_CHROMA_MC8_MV0
  1042. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  1043. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1044. "pavgb " #T ", " #D " \n\t"
  1045. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
  1046. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
  1047. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  1048. #include "dsputil_h264_template_mmx.c"
  1049. #undef H264_CHROMA_OP
  1050. #undef H264_CHROMA_OP4
  1051. #undef H264_CHROMA_MC8_TMPL
  1052. #undef H264_CHROMA_MC4_TMPL
  1053. #undef H264_CHROMA_MC8_MV0
  1054. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  1055. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1056. "pavgusb " #T ", " #D " \n\t"
  1057. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
  1058. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
  1059. #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
  1060. #include "dsputil_h264_template_mmx.c"
  1061. #undef H264_CHROMA_OP
  1062. #undef H264_CHROMA_OP4
  1063. #undef H264_CHROMA_MC8_TMPL
  1064. #undef H264_CHROMA_MC4_TMPL
  1065. #undef H264_CHROMA_MC8_MV0
  1066. /***********************************/
  1067. /* weighted prediction */
  1068. static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
  1069. {
  1070. int x, y;
  1071. offset <<= log2_denom;
  1072. offset += (1 << log2_denom) >> 1;
  1073. asm volatile(
  1074. "movd %0, %%mm4 \n\t"
  1075. "movd %1, %%mm5 \n\t"
  1076. "movd %2, %%mm6 \n\t"
  1077. "pshufw $0, %%mm4, %%mm4 \n\t"
  1078. "pshufw $0, %%mm5, %%mm5 \n\t"
  1079. "pxor %%mm7, %%mm7 \n\t"
  1080. :: "g"(weight), "g"(offset), "g"(log2_denom)
  1081. );
  1082. for(y=0; y<h; y+=2){
  1083. for(x=0; x<w; x+=4){
  1084. asm volatile(
  1085. "movd %0, %%mm0 \n\t"
  1086. "movd %1, %%mm1 \n\t"
  1087. "punpcklbw %%mm7, %%mm0 \n\t"
  1088. "punpcklbw %%mm7, %%mm1 \n\t"
  1089. "pmullw %%mm4, %%mm0 \n\t"
  1090. "pmullw %%mm4, %%mm1 \n\t"
  1091. "paddsw %%mm5, %%mm0 \n\t"
  1092. "paddsw %%mm5, %%mm1 \n\t"
  1093. "psraw %%mm6, %%mm0 \n\t"
  1094. "psraw %%mm6, %%mm1 \n\t"
  1095. "packuswb %%mm7, %%mm0 \n\t"
  1096. "packuswb %%mm7, %%mm1 \n\t"
  1097. "movd %%mm0, %0 \n\t"
  1098. "movd %%mm1, %1 \n\t"
  1099. : "+m"(*(uint32_t*)(dst+x)),
  1100. "+m"(*(uint32_t*)(dst+x+stride))
  1101. );
  1102. }
  1103. dst += 2*stride;
  1104. }
  1105. }
  1106. static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
  1107. {
  1108. int x, y;
  1109. offset = ((offset + 1) | 1) << log2_denom;
  1110. asm volatile(
  1111. "movd %0, %%mm3 \n\t"
  1112. "movd %1, %%mm4 \n\t"
  1113. "movd %2, %%mm5 \n\t"
  1114. "movd %3, %%mm6 \n\t"
  1115. "pshufw $0, %%mm3, %%mm3 \n\t"
  1116. "pshufw $0, %%mm4, %%mm4 \n\t"
  1117. "pshufw $0, %%mm5, %%mm5 \n\t"
  1118. "pxor %%mm7, %%mm7 \n\t"
  1119. :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
  1120. );
  1121. for(y=0; y<h; y++){
  1122. for(x=0; x<w; x+=4){
  1123. asm volatile(
  1124. "movd %0, %%mm0 \n\t"
  1125. "movd %1, %%mm1 \n\t"
  1126. "punpcklbw %%mm7, %%mm0 \n\t"
  1127. "punpcklbw %%mm7, %%mm1 \n\t"
  1128. "pmullw %%mm3, %%mm0 \n\t"
  1129. "pmullw %%mm4, %%mm1 \n\t"
  1130. "paddsw %%mm1, %%mm0 \n\t"
  1131. "paddsw %%mm5, %%mm0 \n\t"
  1132. "psraw %%mm6, %%mm0 \n\t"
  1133. "packuswb %%mm0, %%mm0 \n\t"
  1134. "movd %%mm0, %0 \n\t"
  1135. : "+m"(*(uint32_t*)(dst+x))
  1136. : "m"(*(uint32_t*)(src+x))
  1137. );
  1138. }
  1139. src += stride;
  1140. dst += stride;
  1141. }
  1142. }
  1143. #define H264_WEIGHT(W,H) \
  1144. static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
  1145. ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
  1146. } \
  1147. static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
  1148. ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
  1149. }
  1150. H264_WEIGHT(16,16)
  1151. H264_WEIGHT(16, 8)
  1152. H264_WEIGHT( 8,16)
  1153. H264_WEIGHT( 8, 8)
  1154. H264_WEIGHT( 8, 4)
  1155. H264_WEIGHT( 4, 8)
  1156. H264_WEIGHT( 4, 4)
  1157. H264_WEIGHT( 4, 2)