You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2068 lines
83KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "dsputil_mmx.h"
  21. /***********************************/
  22. /* IDCT */
  23. #define SUMSUB_BADC( a, b, c, d ) \
  24. "paddw "#b", "#a" \n\t"\
  25. "paddw "#d", "#c" \n\t"\
  26. "paddw "#b", "#b" \n\t"\
  27. "paddw "#d", "#d" \n\t"\
  28. "psubw "#a", "#b" \n\t"\
  29. "psubw "#c", "#d" \n\t"
  30. #define SUMSUBD2_AB( a, b, t ) \
  31. "movq "#b", "#t" \n\t"\
  32. "psraw $1 , "#b" \n\t"\
  33. "paddw "#a", "#b" \n\t"\
  34. "psraw $1 , "#a" \n\t"\
  35. "psubw "#t", "#a" \n\t"
  36. #define IDCT4_1D( s02, s13, d02, d13, t ) \
  37. SUMSUB_BA ( s02, d02 )\
  38. SUMSUBD2_AB( s13, d13, t )\
  39. SUMSUB_BADC( d13, s02, s13, d02 )
  40. #define STORE_DIFF_4P( p, t, z ) \
  41. "psraw $6, "#p" \n\t"\
  42. "movd (%0), "#t" \n\t"\
  43. "punpcklbw "#z", "#t" \n\t"\
  44. "paddsw "#t", "#p" \n\t"\
  45. "packuswb "#z", "#p" \n\t"\
  46. "movd "#p", (%0) \n\t"
  47. static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
  48. {
  49. /* Load dct coeffs */
  50. asm volatile(
  51. "movq (%0), %%mm0 \n\t"
  52. "movq 8(%0), %%mm1 \n\t"
  53. "movq 16(%0), %%mm2 \n\t"
  54. "movq 24(%0), %%mm3 \n\t"
  55. :: "r"(block) );
  56. asm volatile(
  57. /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
  58. IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
  59. "movq %0, %%mm6 \n\t"
  60. /* in: 1,4,0,2 out: 1,2,3,0 */
  61. TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
  62. "paddw %%mm6, %%mm3 \n\t"
  63. /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
  64. IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
  65. "pxor %%mm7, %%mm7 \n\t"
  66. :: "m"(ff_pw_32));
  67. asm volatile(
  68. STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
  69. "add %1, %0 \n\t"
  70. STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
  71. "add %1, %0 \n\t"
  72. STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
  73. "add %1, %0 \n\t"
  74. STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
  75. : "+r"(dst)
  76. : "r" ((long)stride)
  77. );
  78. }
  79. static inline void h264_idct8_1d(int16_t *block)
  80. {
  81. asm volatile(
  82. "movq 112(%0), %%mm7 \n\t"
  83. "movq 80(%0), %%mm0 \n\t"
  84. "movq 48(%0), %%mm3 \n\t"
  85. "movq 16(%0), %%mm5 \n\t"
  86. "movq %%mm0, %%mm4 \n\t"
  87. "movq %%mm5, %%mm1 \n\t"
  88. "psraw $1, %%mm4 \n\t"
  89. "psraw $1, %%mm1 \n\t"
  90. "paddw %%mm0, %%mm4 \n\t"
  91. "paddw %%mm5, %%mm1 \n\t"
  92. "paddw %%mm7, %%mm4 \n\t"
  93. "paddw %%mm0, %%mm1 \n\t"
  94. "psubw %%mm5, %%mm4 \n\t"
  95. "paddw %%mm3, %%mm1 \n\t"
  96. "psubw %%mm3, %%mm5 \n\t"
  97. "psubw %%mm3, %%mm0 \n\t"
  98. "paddw %%mm7, %%mm5 \n\t"
  99. "psubw %%mm7, %%mm0 \n\t"
  100. "psraw $1, %%mm3 \n\t"
  101. "psraw $1, %%mm7 \n\t"
  102. "psubw %%mm3, %%mm5 \n\t"
  103. "psubw %%mm7, %%mm0 \n\t"
  104. "movq %%mm4, %%mm3 \n\t"
  105. "movq %%mm1, %%mm7 \n\t"
  106. "psraw $2, %%mm1 \n\t"
  107. "psraw $2, %%mm3 \n\t"
  108. "paddw %%mm5, %%mm3 \n\t"
  109. "psraw $2, %%mm5 \n\t"
  110. "paddw %%mm0, %%mm1 \n\t"
  111. "psraw $2, %%mm0 \n\t"
  112. "psubw %%mm4, %%mm5 \n\t"
  113. "psubw %%mm0, %%mm7 \n\t"
  114. "movq 32(%0), %%mm2 \n\t"
  115. "movq 96(%0), %%mm6 \n\t"
  116. "movq %%mm2, %%mm4 \n\t"
  117. "movq %%mm6, %%mm0 \n\t"
  118. "psraw $1, %%mm4 \n\t"
  119. "psraw $1, %%mm6 \n\t"
  120. "psubw %%mm0, %%mm4 \n\t"
  121. "paddw %%mm2, %%mm6 \n\t"
  122. "movq (%0), %%mm2 \n\t"
  123. "movq 64(%0), %%mm0 \n\t"
  124. SUMSUB_BA( %%mm0, %%mm2 )
  125. SUMSUB_BA( %%mm6, %%mm0 )
  126. SUMSUB_BA( %%mm4, %%mm2 )
  127. SUMSUB_BA( %%mm7, %%mm6 )
  128. SUMSUB_BA( %%mm5, %%mm4 )
  129. SUMSUB_BA( %%mm3, %%mm2 )
  130. SUMSUB_BA( %%mm1, %%mm0 )
  131. :: "r"(block)
  132. );
  133. }
  134. static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
  135. {
  136. int i;
  137. int16_t __attribute__ ((aligned(8))) b2[64];
  138. block[0] += 32;
  139. for(i=0; i<2; i++){
  140. DECLARE_ALIGNED_8(uint64_t, tmp);
  141. h264_idct8_1d(block+4*i);
  142. asm volatile(
  143. "movq %%mm7, %0 \n\t"
  144. TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
  145. "movq %%mm0, 8(%1) \n\t"
  146. "movq %%mm6, 24(%1) \n\t"
  147. "movq %%mm7, 40(%1) \n\t"
  148. "movq %%mm4, 56(%1) \n\t"
  149. "movq %0, %%mm7 \n\t"
  150. TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
  151. "movq %%mm7, (%1) \n\t"
  152. "movq %%mm1, 16(%1) \n\t"
  153. "movq %%mm0, 32(%1) \n\t"
  154. "movq %%mm3, 48(%1) \n\t"
  155. : "=m"(tmp)
  156. : "r"(b2+32*i)
  157. : "memory"
  158. );
  159. }
  160. for(i=0; i<2; i++){
  161. h264_idct8_1d(b2+4*i);
  162. asm volatile(
  163. "psraw $6, %%mm7 \n\t"
  164. "psraw $6, %%mm6 \n\t"
  165. "psraw $6, %%mm5 \n\t"
  166. "psraw $6, %%mm4 \n\t"
  167. "psraw $6, %%mm3 \n\t"
  168. "psraw $6, %%mm2 \n\t"
  169. "psraw $6, %%mm1 \n\t"
  170. "psraw $6, %%mm0 \n\t"
  171. "movq %%mm7, (%0) \n\t"
  172. "movq %%mm5, 16(%0) \n\t"
  173. "movq %%mm3, 32(%0) \n\t"
  174. "movq %%mm1, 48(%0) \n\t"
  175. "movq %%mm0, 64(%0) \n\t"
  176. "movq %%mm2, 80(%0) \n\t"
  177. "movq %%mm4, 96(%0) \n\t"
  178. "movq %%mm6, 112(%0) \n\t"
  179. :: "r"(b2+4*i)
  180. : "memory"
  181. );
  182. }
  183. add_pixels_clamped_mmx(b2, dst, stride);
  184. }
  185. #define STORE_DIFF_8P( p, d, t, z )\
  186. "movq "#d", "#t" \n"\
  187. "psraw $6, "#p" \n"\
  188. "punpcklbw "#z", "#t" \n"\
  189. "paddsw "#t", "#p" \n"\
  190. "packuswb "#p", "#p" \n"\
  191. "movq "#p", "#d" \n"
  192. #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
  193. "movdqa "#c", "#a" \n"\
  194. "movdqa "#g", "#e" \n"\
  195. "psraw $1, "#c" \n"\
  196. "psraw $1, "#g" \n"\
  197. "psubw "#e", "#c" \n"\
  198. "paddw "#a", "#g" \n"\
  199. "movdqa "#b", "#e" \n"\
  200. "psraw $1, "#e" \n"\
  201. "paddw "#b", "#e" \n"\
  202. "paddw "#d", "#e" \n"\
  203. "paddw "#f", "#e" \n"\
  204. "movdqa "#f", "#a" \n"\
  205. "psraw $1, "#a" \n"\
  206. "paddw "#f", "#a" \n"\
  207. "paddw "#h", "#a" \n"\
  208. "psubw "#b", "#a" \n"\
  209. "psubw "#d", "#b" \n"\
  210. "psubw "#d", "#f" \n"\
  211. "paddw "#h", "#b" \n"\
  212. "psubw "#h", "#f" \n"\
  213. "psraw $1, "#d" \n"\
  214. "psraw $1, "#h" \n"\
  215. "psubw "#d", "#b" \n"\
  216. "psubw "#h", "#f" \n"\
  217. "movdqa "#e", "#d" \n"\
  218. "movdqa "#a", "#h" \n"\
  219. "psraw $2, "#d" \n"\
  220. "psraw $2, "#h" \n"\
  221. "paddw "#f", "#d" \n"\
  222. "paddw "#b", "#h" \n"\
  223. "psraw $2, "#f" \n"\
  224. "psraw $2, "#b" \n"\
  225. "psubw "#f", "#e" \n"\
  226. "psubw "#a", "#b" \n"\
  227. "movdqa 0x00(%1), "#a" \n"\
  228. "movdqa 0x40(%1), "#f" \n"\
  229. SUMSUB_BA(f, a)\
  230. SUMSUB_BA(g, f)\
  231. SUMSUB_BA(c, a)\
  232. SUMSUB_BA(e, g)\
  233. SUMSUB_BA(b, c)\
  234. SUMSUB_BA(h, a)\
  235. SUMSUB_BA(d, f)
  236. static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
  237. {
  238. asm volatile(
  239. "movdqa 0x10(%1), %%xmm1 \n"
  240. "movdqa 0x20(%1), %%xmm2 \n"
  241. "movdqa 0x30(%1), %%xmm3 \n"
  242. "movdqa 0x50(%1), %%xmm5 \n"
  243. "movdqa 0x60(%1), %%xmm6 \n"
  244. "movdqa 0x70(%1), %%xmm7 \n"
  245. H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
  246. TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
  247. "paddw %4, %%xmm4 \n"
  248. "movdqa %%xmm4, 0x00(%1) \n"
  249. "movdqa %%xmm2, 0x40(%1) \n"
  250. H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
  251. "movdqa %%xmm6, 0x60(%1) \n"
  252. "movdqa %%xmm7, 0x70(%1) \n"
  253. "pxor %%xmm7, %%xmm7 \n"
  254. STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
  255. STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
  256. STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
  257. STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
  258. "lea (%0,%2,4), %0 \n"
  259. STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
  260. STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
  261. "movdqa 0x60(%1), %%xmm0 \n"
  262. "movdqa 0x70(%1), %%xmm1 \n"
  263. STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
  264. STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
  265. :"+r"(dst)
  266. :"r"(block), "r"((long)stride), "r"(3L*stride), "m"(ff_pw_32)
  267. );
  268. }
  269. static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  270. {
  271. int dc = (block[0] + 32) >> 6;
  272. asm volatile(
  273. "movd %0, %%mm0 \n\t"
  274. "pshufw $0, %%mm0, %%mm0 \n\t"
  275. "pxor %%mm1, %%mm1 \n\t"
  276. "psubw %%mm0, %%mm1 \n\t"
  277. "packuswb %%mm0, %%mm0 \n\t"
  278. "packuswb %%mm1, %%mm1 \n\t"
  279. ::"r"(dc)
  280. );
  281. asm volatile(
  282. "movd %0, %%mm2 \n\t"
  283. "movd %1, %%mm3 \n\t"
  284. "movd %2, %%mm4 \n\t"
  285. "movd %3, %%mm5 \n\t"
  286. "paddusb %%mm0, %%mm2 \n\t"
  287. "paddusb %%mm0, %%mm3 \n\t"
  288. "paddusb %%mm0, %%mm4 \n\t"
  289. "paddusb %%mm0, %%mm5 \n\t"
  290. "psubusb %%mm1, %%mm2 \n\t"
  291. "psubusb %%mm1, %%mm3 \n\t"
  292. "psubusb %%mm1, %%mm4 \n\t"
  293. "psubusb %%mm1, %%mm5 \n\t"
  294. "movd %%mm2, %0 \n\t"
  295. "movd %%mm3, %1 \n\t"
  296. "movd %%mm4, %2 \n\t"
  297. "movd %%mm5, %3 \n\t"
  298. :"+m"(*(uint32_t*)(dst+0*stride)),
  299. "+m"(*(uint32_t*)(dst+1*stride)),
  300. "+m"(*(uint32_t*)(dst+2*stride)),
  301. "+m"(*(uint32_t*)(dst+3*stride))
  302. );
  303. }
  304. static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  305. {
  306. int dc = (block[0] + 32) >> 6;
  307. int y;
  308. asm volatile(
  309. "movd %0, %%mm0 \n\t"
  310. "pshufw $0, %%mm0, %%mm0 \n\t"
  311. "pxor %%mm1, %%mm1 \n\t"
  312. "psubw %%mm0, %%mm1 \n\t"
  313. "packuswb %%mm0, %%mm0 \n\t"
  314. "packuswb %%mm1, %%mm1 \n\t"
  315. ::"r"(dc)
  316. );
  317. for(y=2; y--; dst += 4*stride){
  318. asm volatile(
  319. "movq %0, %%mm2 \n\t"
  320. "movq %1, %%mm3 \n\t"
  321. "movq %2, %%mm4 \n\t"
  322. "movq %3, %%mm5 \n\t"
  323. "paddusb %%mm0, %%mm2 \n\t"
  324. "paddusb %%mm0, %%mm3 \n\t"
  325. "paddusb %%mm0, %%mm4 \n\t"
  326. "paddusb %%mm0, %%mm5 \n\t"
  327. "psubusb %%mm1, %%mm2 \n\t"
  328. "psubusb %%mm1, %%mm3 \n\t"
  329. "psubusb %%mm1, %%mm4 \n\t"
  330. "psubusb %%mm1, %%mm5 \n\t"
  331. "movq %%mm2, %0 \n\t"
  332. "movq %%mm3, %1 \n\t"
  333. "movq %%mm4, %2 \n\t"
  334. "movq %%mm5, %3 \n\t"
  335. :"+m"(*(uint64_t*)(dst+0*stride)),
  336. "+m"(*(uint64_t*)(dst+1*stride)),
  337. "+m"(*(uint64_t*)(dst+2*stride)),
  338. "+m"(*(uint64_t*)(dst+3*stride))
  339. );
  340. }
  341. }
  342. /***********************************/
  343. /* deblocking */
  344. // out: o = |x-y|>a
  345. // clobbers: t
  346. #define DIFF_GT_MMX(x,y,a,o,t)\
  347. "movq "#y", "#t" \n\t"\
  348. "movq "#x", "#o" \n\t"\
  349. "psubusb "#x", "#t" \n\t"\
  350. "psubusb "#y", "#o" \n\t"\
  351. "por "#t", "#o" \n\t"\
  352. "psubusb "#a", "#o" \n\t"
  353. // out: o = |x-y|>a
  354. // clobbers: t
  355. #define DIFF_GT2_MMX(x,y,a,o,t)\
  356. "movq "#y", "#t" \n\t"\
  357. "movq "#x", "#o" \n\t"\
  358. "psubusb "#x", "#t" \n\t"\
  359. "psubusb "#y", "#o" \n\t"\
  360. "psubusb "#a", "#t" \n\t"\
  361. "psubusb "#a", "#o" \n\t"\
  362. "pcmpeqb "#t", "#o" \n\t"\
  363. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  364. // out: mm5=beta-1, mm7=mask
  365. // clobbers: mm4,mm6
  366. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  367. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  368. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  369. "packuswb %%mm4, %%mm4 \n\t"\
  370. "packuswb %%mm5, %%mm5 \n\t"\
  371. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  372. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  373. "por %%mm4, %%mm7 \n\t"\
  374. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  375. "por %%mm4, %%mm7 \n\t"\
  376. "pxor %%mm6, %%mm6 \n\t"\
  377. "pcmpeqb %%mm6, %%mm7 \n\t"
  378. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  379. // out: mm1=p0' mm2=q0'
  380. // clobbers: mm0,3-6
  381. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  382. "movq %%mm1 , %%mm5 \n\t"\
  383. "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
  384. "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
  385. "pcmpeqb %%mm4 , %%mm4 \n\t"\
  386. "pxor %%mm4 , %%mm3 \n\t"\
  387. "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
  388. "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
  389. "pxor %%mm1 , %%mm4 \n\t"\
  390. "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
  391. "pavgb %%mm5 , %%mm3 \n\t"\
  392. "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
  393. "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
  394. "psubusb %%mm3 , %%mm6 \n\t"\
  395. "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
  396. "pminub %%mm7 , %%mm6 \n\t"\
  397. "pminub %%mm7 , %%mm3 \n\t"\
  398. "psubusb %%mm6 , %%mm1 \n\t"\
  399. "psubusb %%mm3 , %%mm2 \n\t"\
  400. "paddusb %%mm3 , %%mm1 \n\t"\
  401. "paddusb %%mm6 , %%mm2 \n\t"
  402. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
  403. // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  404. // clobbers: q2, tmp, tc0
  405. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  406. "movq %%mm1, "#tmp" \n\t"\
  407. "pavgb %%mm2, "#tmp" \n\t"\
  408. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  409. "pxor "q2addr", "#tmp" \n\t"\
  410. "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  411. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  412. "movq "#p1", "#tmp" \n\t"\
  413. "psubusb "#tc0", "#tmp" \n\t"\
  414. "paddusb "#p1", "#tc0" \n\t"\
  415. "pmaxub "#tmp", "#q2" \n\t"\
  416. "pminub "#tc0", "#q2" \n\t"\
  417. "movq "#q2", "q1addr" \n\t"
  418. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  419. {
  420. DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
  421. asm volatile(
  422. "movq (%1,%3), %%mm0 \n\t" //p1
  423. "movq (%1,%3,2), %%mm1 \n\t" //p0
  424. "movq (%2), %%mm2 \n\t" //q0
  425. "movq (%2,%3), %%mm3 \n\t" //q1
  426. H264_DEBLOCK_MASK(%6, %7)
  427. "movd %5, %%mm4 \n\t"
  428. "punpcklbw %%mm4, %%mm4 \n\t"
  429. "punpcklwd %%mm4, %%mm4 \n\t"
  430. "pcmpeqb %%mm3, %%mm3 \n\t"
  431. "movq %%mm4, %%mm6 \n\t"
  432. "pcmpgtb %%mm3, %%mm4 \n\t"
  433. "movq %%mm6, 8+%0 \n\t"
  434. "pand %%mm4, %%mm7 \n\t"
  435. "movq %%mm7, %0 \n\t"
  436. /* filter p1 */
  437. "movq (%1), %%mm3 \n\t" //p2
  438. DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  439. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  440. "pand 8+%0, %%mm7 \n\t" // mask & tc0
  441. "movq %%mm7, %%mm4 \n\t"
  442. "psubb %%mm6, %%mm7 \n\t"
  443. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  444. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
  445. /* filter q1 */
  446. "movq (%2,%3,2), %%mm4 \n\t" //q2
  447. DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  448. "pand %0, %%mm6 \n\t"
  449. "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
  450. "pand %%mm6, %%mm5 \n\t"
  451. "psubb %%mm6, %%mm7 \n\t"
  452. "movq (%2,%3), %%mm3 \n\t"
  453. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
  454. /* filter p0, q0 */
  455. H264_DEBLOCK_P0_Q0(%8, unused)
  456. "movq %%mm1, (%1,%3,2) \n\t"
  457. "movq %%mm2, (%2) \n\t"
  458. : "=m"(*tmp0)
  459. : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
  460. "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
  461. "m"(ff_bone)
  462. );
  463. }
  464. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  465. {
  466. if((tc0[0] & tc0[1]) >= 0)
  467. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  468. if((tc0[2] & tc0[3]) >= 0)
  469. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  470. }
  471. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  472. {
  473. //FIXME: could cut some load/stores by merging transpose with filter
  474. // also, it only needs to transpose 6x8
  475. DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
  476. int i;
  477. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  478. if((tc0[0] & tc0[1]) < 0)
  479. continue;
  480. transpose4x4(trans, pix-4, 8, stride);
  481. transpose4x4(trans +4*8, pix, 8, stride);
  482. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  483. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  484. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  485. transpose4x4(pix-2, trans +2*8, stride, 8);
  486. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  487. }
  488. }
  489. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  490. {
  491. asm volatile(
  492. "movq (%0), %%mm0 \n\t" //p1
  493. "movq (%0,%2), %%mm1 \n\t" //p0
  494. "movq (%1), %%mm2 \n\t" //q0
  495. "movq (%1,%2), %%mm3 \n\t" //q1
  496. H264_DEBLOCK_MASK(%4, %5)
  497. "movd %3, %%mm6 \n\t"
  498. "punpcklbw %%mm6, %%mm6 \n\t"
  499. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  500. H264_DEBLOCK_P0_Q0(%6, %7)
  501. "movq %%mm1, (%0,%2) \n\t"
  502. "movq %%mm2, (%1) \n\t"
  503. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  504. "r"(*(uint32_t*)tc0),
  505. "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
  506. );
  507. }
  508. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  509. {
  510. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  511. }
  512. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  513. {
  514. //FIXME: could cut some load/stores by merging transpose with filter
  515. DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
  516. transpose4x4(trans, pix-2, 8, stride);
  517. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  518. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  519. transpose4x4(pix-2, trans, stride, 8);
  520. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  521. }
  522. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  523. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  524. "movq "#p0", %%mm4 \n\t"\
  525. "pxor "#q1", %%mm4 \n\t"\
  526. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  527. "pavgb "#q1", "#p0" \n\t"\
  528. "psubusb %%mm4, "#p0" \n\t"\
  529. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  530. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  531. {
  532. asm volatile(
  533. "movq (%0), %%mm0 \n\t"
  534. "movq (%0,%2), %%mm1 \n\t"
  535. "movq (%1), %%mm2 \n\t"
  536. "movq (%1,%2), %%mm3 \n\t"
  537. H264_DEBLOCK_MASK(%3, %4)
  538. "movq %%mm1, %%mm5 \n\t"
  539. "movq %%mm2, %%mm6 \n\t"
  540. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  541. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  542. "psubb %%mm5, %%mm1 \n\t"
  543. "psubb %%mm6, %%mm2 \n\t"
  544. "pand %%mm7, %%mm1 \n\t"
  545. "pand %%mm7, %%mm2 \n\t"
  546. "paddb %%mm5, %%mm1 \n\t"
  547. "paddb %%mm6, %%mm2 \n\t"
  548. "movq %%mm1, (%0,%2) \n\t"
  549. "movq %%mm2, (%1) \n\t"
  550. :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
  551. "m"(alpha1), "m"(beta1), "m"(ff_bone)
  552. );
  553. }
  554. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  555. {
  556. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  557. }
  558. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  559. {
  560. //FIXME: could cut some load/stores by merging transpose with filter
  561. DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
  562. transpose4x4(trans, pix-2, 8, stride);
  563. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  564. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  565. transpose4x4(pix-2, trans, stride, 8);
  566. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  567. }
  568. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  569. int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) {
  570. int dir;
  571. asm volatile(
  572. "pxor %%mm7, %%mm7 \n\t"
  573. "movq %0, %%mm6 \n\t"
  574. "movq %1, %%mm5 \n\t"
  575. "movq %2, %%mm4 \n\t"
  576. ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
  577. );
  578. // could do a special case for dir==0 && edges==1, but it only reduces the
  579. // average filter time by 1.2%
  580. for( dir=1; dir>=0; dir-- ) {
  581. const int d_idx = dir ? -8 : -1;
  582. const int mask_mv = dir ? mask_mv1 : mask_mv0;
  583. DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
  584. int b_idx, edge, l;
  585. for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
  586. asm volatile(
  587. "pand %0, %%mm0 \n\t"
  588. ::"m"(mask_dir)
  589. );
  590. if(!(mask_mv & edge)) {
  591. asm volatile("pxor %%mm0, %%mm0 \n\t":);
  592. for( l = bidir; l >= 0; l-- ) {
  593. asm volatile(
  594. "movd %0, %%mm1 \n\t"
  595. "punpckldq %1, %%mm1 \n\t"
  596. "movq %%mm1, %%mm2 \n\t"
  597. "psrlw $7, %%mm2 \n\t"
  598. "pand %%mm6, %%mm2 \n\t"
  599. "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
  600. "punpckldq %%mm1, %%mm2 \n\t"
  601. "pcmpeqb %%mm2, %%mm1 \n\t"
  602. "paddb %%mm6, %%mm1 \n\t"
  603. "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
  604. "por %%mm1, %%mm0 \n\t"
  605. "movq %2, %%mm1 \n\t"
  606. "movq %3, %%mm2 \n\t"
  607. "psubw %4, %%mm1 \n\t"
  608. "psubw %5, %%mm2 \n\t"
  609. "packsswb %%mm2, %%mm1 \n\t"
  610. "paddb %%mm5, %%mm1 \n\t"
  611. "pminub %%mm4, %%mm1 \n\t"
  612. "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
  613. "por %%mm1, %%mm0 \n\t"
  614. ::"m"(ref[l][b_idx]),
  615. "m"(ref[l][b_idx+d_idx]),
  616. "m"(mv[l][b_idx][0]),
  617. "m"(mv[l][b_idx+2][0]),
  618. "m"(mv[l][b_idx+d_idx][0]),
  619. "m"(mv[l][b_idx+d_idx+2][0])
  620. );
  621. }
  622. }
  623. asm volatile(
  624. "movd %0, %%mm1 \n\t"
  625. "por %1, %%mm1 \n\t"
  626. "punpcklbw %%mm7, %%mm1 \n\t"
  627. "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
  628. ::"m"(nnz[b_idx]),
  629. "m"(nnz[b_idx+d_idx])
  630. );
  631. asm volatile(
  632. "pcmpeqw %%mm7, %%mm0 \n\t"
  633. "pcmpeqw %%mm7, %%mm0 \n\t"
  634. "psrlw $15, %%mm0 \n\t" // nonzero -> 1
  635. "psrlw $14, %%mm1 \n\t"
  636. "movq %%mm0, %%mm2 \n\t"
  637. "por %%mm1, %%mm2 \n\t"
  638. "psrlw $1, %%mm1 \n\t"
  639. "pandn %%mm2, %%mm1 \n\t"
  640. "movq %%mm1, %0 \n\t"
  641. :"=m"(*bS[dir][edge])
  642. ::"memory"
  643. );
  644. }
  645. edges = 4;
  646. step = 1;
  647. }
  648. asm volatile(
  649. "movq (%0), %%mm0 \n\t"
  650. "movq 8(%0), %%mm1 \n\t"
  651. "movq 16(%0), %%mm2 \n\t"
  652. "movq 24(%0), %%mm3 \n\t"
  653. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  654. "movq %%mm0, (%0) \n\t"
  655. "movq %%mm3, 8(%0) \n\t"
  656. "movq %%mm4, 16(%0) \n\t"
  657. "movq %%mm2, 24(%0) \n\t"
  658. ::"r"(bS[0])
  659. :"memory"
  660. );
  661. }
  662. /***********************************/
  663. /* motion compensation */
  664. #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
  665. "mov"#q" "#C", "#T" \n\t"\
  666. "mov"#d" (%0), "#F" \n\t"\
  667. "paddw "#D", "#T" \n\t"\
  668. "psllw $2, "#T" \n\t"\
  669. "psubw "#B", "#T" \n\t"\
  670. "psubw "#E", "#T" \n\t"\
  671. "punpcklbw "#Z", "#F" \n\t"\
  672. "pmullw %4, "#T" \n\t"\
  673. "paddw %5, "#A" \n\t"\
  674. "add %2, %0 \n\t"\
  675. "paddw "#F", "#A" \n\t"\
  676. "paddw "#A", "#T" \n\t"\
  677. "psraw $5, "#T" \n\t"\
  678. "packuswb "#T", "#T" \n\t"\
  679. OP(T, (%1), A, d)\
  680. "add %3, %1 \n\t"
  681. #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
  682. "mov"#q" "#C", "#T" \n\t"\
  683. "mov"#d" (%0), "#F" \n\t"\
  684. "paddw "#D", "#T" \n\t"\
  685. "psllw $2, "#T" \n\t"\
  686. "paddw %4, "#A" \n\t"\
  687. "psubw "#B", "#T" \n\t"\
  688. "psubw "#E", "#T" \n\t"\
  689. "punpcklbw "#Z", "#F" \n\t"\
  690. "pmullw %3, "#T" \n\t"\
  691. "paddw "#F", "#A" \n\t"\
  692. "add %2, %0 \n\t"\
  693. "paddw "#A", "#T" \n\t"\
  694. "mov"#q" "#T", "#OF"(%1) \n\t"
  695. #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
  696. #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
  697. #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
  698. #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
  699. #define QPEL_H264(OPNAME, OP, MMX)\
  700. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  701. int h=4;\
  702. \
  703. asm volatile(\
  704. "pxor %%mm7, %%mm7 \n\t"\
  705. "movq %5, %%mm4 \n\t"\
  706. "movq %6, %%mm5 \n\t"\
  707. "1: \n\t"\
  708. "movd -1(%0), %%mm1 \n\t"\
  709. "movd (%0), %%mm2 \n\t"\
  710. "movd 1(%0), %%mm3 \n\t"\
  711. "movd 2(%0), %%mm0 \n\t"\
  712. "punpcklbw %%mm7, %%mm1 \n\t"\
  713. "punpcklbw %%mm7, %%mm2 \n\t"\
  714. "punpcklbw %%mm7, %%mm3 \n\t"\
  715. "punpcklbw %%mm7, %%mm0 \n\t"\
  716. "paddw %%mm0, %%mm1 \n\t"\
  717. "paddw %%mm3, %%mm2 \n\t"\
  718. "movd -2(%0), %%mm0 \n\t"\
  719. "movd 3(%0), %%mm3 \n\t"\
  720. "punpcklbw %%mm7, %%mm0 \n\t"\
  721. "punpcklbw %%mm7, %%mm3 \n\t"\
  722. "paddw %%mm3, %%mm0 \n\t"\
  723. "psllw $2, %%mm2 \n\t"\
  724. "psubw %%mm1, %%mm2 \n\t"\
  725. "pmullw %%mm4, %%mm2 \n\t"\
  726. "paddw %%mm5, %%mm0 \n\t"\
  727. "paddw %%mm2, %%mm0 \n\t"\
  728. "psraw $5, %%mm0 \n\t"\
  729. "packuswb %%mm0, %%mm0 \n\t"\
  730. OP(%%mm0, (%1),%%mm6, d)\
  731. "add %3, %0 \n\t"\
  732. "add %4, %1 \n\t"\
  733. "decl %2 \n\t"\
  734. " jnz 1b \n\t"\
  735. : "+a"(src), "+c"(dst), "+g"(h)\
  736. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  737. : "memory"\
  738. );\
  739. }\
  740. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  741. int h=4;\
  742. asm volatile(\
  743. "pxor %%mm7, %%mm7 \n\t"\
  744. "movq %0, %%mm4 \n\t"\
  745. "movq %1, %%mm5 \n\t"\
  746. :: "m"(ff_pw_5), "m"(ff_pw_16)\
  747. );\
  748. do{\
  749. asm volatile(\
  750. "movd -1(%0), %%mm1 \n\t"\
  751. "movd (%0), %%mm2 \n\t"\
  752. "movd 1(%0), %%mm3 \n\t"\
  753. "movd 2(%0), %%mm0 \n\t"\
  754. "punpcklbw %%mm7, %%mm1 \n\t"\
  755. "punpcklbw %%mm7, %%mm2 \n\t"\
  756. "punpcklbw %%mm7, %%mm3 \n\t"\
  757. "punpcklbw %%mm7, %%mm0 \n\t"\
  758. "paddw %%mm0, %%mm1 \n\t"\
  759. "paddw %%mm3, %%mm2 \n\t"\
  760. "movd -2(%0), %%mm0 \n\t"\
  761. "movd 3(%0), %%mm3 \n\t"\
  762. "punpcklbw %%mm7, %%mm0 \n\t"\
  763. "punpcklbw %%mm7, %%mm3 \n\t"\
  764. "paddw %%mm3, %%mm0 \n\t"\
  765. "psllw $2, %%mm2 \n\t"\
  766. "psubw %%mm1, %%mm2 \n\t"\
  767. "pmullw %%mm4, %%mm2 \n\t"\
  768. "paddw %%mm5, %%mm0 \n\t"\
  769. "paddw %%mm2, %%mm0 \n\t"\
  770. "movd (%2), %%mm3 \n\t"\
  771. "psraw $5, %%mm0 \n\t"\
  772. "packuswb %%mm0, %%mm0 \n\t"\
  773. PAVGB" %%mm3, %%mm0 \n\t"\
  774. OP(%%mm0, (%1),%%mm6, d)\
  775. "add %4, %0 \n\t"\
  776. "add %4, %1 \n\t"\
  777. "add %3, %2 \n\t"\
  778. : "+a"(src), "+c"(dst), "+d"(src2)\
  779. : "D"((long)src2Stride), "S"((long)dstStride)\
  780. : "memory"\
  781. );\
  782. }while(--h);\
  783. }\
  784. static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  785. src -= 2*srcStride;\
  786. asm volatile(\
  787. "pxor %%mm7, %%mm7 \n\t"\
  788. "movd (%0), %%mm0 \n\t"\
  789. "add %2, %0 \n\t"\
  790. "movd (%0), %%mm1 \n\t"\
  791. "add %2, %0 \n\t"\
  792. "movd (%0), %%mm2 \n\t"\
  793. "add %2, %0 \n\t"\
  794. "movd (%0), %%mm3 \n\t"\
  795. "add %2, %0 \n\t"\
  796. "movd (%0), %%mm4 \n\t"\
  797. "add %2, %0 \n\t"\
  798. "punpcklbw %%mm7, %%mm0 \n\t"\
  799. "punpcklbw %%mm7, %%mm1 \n\t"\
  800. "punpcklbw %%mm7, %%mm2 \n\t"\
  801. "punpcklbw %%mm7, %%mm3 \n\t"\
  802. "punpcklbw %%mm7, %%mm4 \n\t"\
  803. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  804. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  805. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  806. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  807. \
  808. : "+a"(src), "+c"(dst)\
  809. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  810. : "memory"\
  811. );\
  812. }\
  813. static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  814. int h=4;\
  815. int w=3;\
  816. src -= 2*srcStride+2;\
  817. while(w--){\
  818. asm volatile(\
  819. "pxor %%mm7, %%mm7 \n\t"\
  820. "movd (%0), %%mm0 \n\t"\
  821. "add %2, %0 \n\t"\
  822. "movd (%0), %%mm1 \n\t"\
  823. "add %2, %0 \n\t"\
  824. "movd (%0), %%mm2 \n\t"\
  825. "add %2, %0 \n\t"\
  826. "movd (%0), %%mm3 \n\t"\
  827. "add %2, %0 \n\t"\
  828. "movd (%0), %%mm4 \n\t"\
  829. "add %2, %0 \n\t"\
  830. "punpcklbw %%mm7, %%mm0 \n\t"\
  831. "punpcklbw %%mm7, %%mm1 \n\t"\
  832. "punpcklbw %%mm7, %%mm2 \n\t"\
  833. "punpcklbw %%mm7, %%mm3 \n\t"\
  834. "punpcklbw %%mm7, %%mm4 \n\t"\
  835. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  836. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  837. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  838. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  839. \
  840. : "+a"(src)\
  841. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  842. : "memory"\
  843. );\
  844. tmp += 4;\
  845. src += 4 - 9*srcStride;\
  846. }\
  847. tmp -= 3*4;\
  848. asm volatile(\
  849. "1: \n\t"\
  850. "movq (%0), %%mm0 \n\t"\
  851. "paddw 10(%0), %%mm0 \n\t"\
  852. "movq 2(%0), %%mm1 \n\t"\
  853. "paddw 8(%0), %%mm1 \n\t"\
  854. "movq 4(%0), %%mm2 \n\t"\
  855. "paddw 6(%0), %%mm2 \n\t"\
  856. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  857. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  858. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  859. "paddsw %%mm2, %%mm0 \n\t"\
  860. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
  861. "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
  862. "psraw $6, %%mm0 \n\t"\
  863. "packuswb %%mm0, %%mm0 \n\t"\
  864. OP(%%mm0, (%1),%%mm7, d)\
  865. "add $24, %0 \n\t"\
  866. "add %3, %1 \n\t"\
  867. "decl %2 \n\t"\
  868. " jnz 1b \n\t"\
  869. : "+a"(tmp), "+c"(dst), "+g"(h)\
  870. : "S"((long)dstStride)\
  871. : "memory"\
  872. );\
  873. }\
  874. \
  875. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  876. int h=8;\
  877. asm volatile(\
  878. "pxor %%mm7, %%mm7 \n\t"\
  879. "movq %5, %%mm6 \n\t"\
  880. "1: \n\t"\
  881. "movq (%0), %%mm0 \n\t"\
  882. "movq 1(%0), %%mm2 \n\t"\
  883. "movq %%mm0, %%mm1 \n\t"\
  884. "movq %%mm2, %%mm3 \n\t"\
  885. "punpcklbw %%mm7, %%mm0 \n\t"\
  886. "punpckhbw %%mm7, %%mm1 \n\t"\
  887. "punpcklbw %%mm7, %%mm2 \n\t"\
  888. "punpckhbw %%mm7, %%mm3 \n\t"\
  889. "paddw %%mm2, %%mm0 \n\t"\
  890. "paddw %%mm3, %%mm1 \n\t"\
  891. "psllw $2, %%mm0 \n\t"\
  892. "psllw $2, %%mm1 \n\t"\
  893. "movq -1(%0), %%mm2 \n\t"\
  894. "movq 2(%0), %%mm4 \n\t"\
  895. "movq %%mm2, %%mm3 \n\t"\
  896. "movq %%mm4, %%mm5 \n\t"\
  897. "punpcklbw %%mm7, %%mm2 \n\t"\
  898. "punpckhbw %%mm7, %%mm3 \n\t"\
  899. "punpcklbw %%mm7, %%mm4 \n\t"\
  900. "punpckhbw %%mm7, %%mm5 \n\t"\
  901. "paddw %%mm4, %%mm2 \n\t"\
  902. "paddw %%mm3, %%mm5 \n\t"\
  903. "psubw %%mm2, %%mm0 \n\t"\
  904. "psubw %%mm5, %%mm1 \n\t"\
  905. "pmullw %%mm6, %%mm0 \n\t"\
  906. "pmullw %%mm6, %%mm1 \n\t"\
  907. "movd -2(%0), %%mm2 \n\t"\
  908. "movd 7(%0), %%mm5 \n\t"\
  909. "punpcklbw %%mm7, %%mm2 \n\t"\
  910. "punpcklbw %%mm7, %%mm5 \n\t"\
  911. "paddw %%mm3, %%mm2 \n\t"\
  912. "paddw %%mm5, %%mm4 \n\t"\
  913. "movq %6, %%mm5 \n\t"\
  914. "paddw %%mm5, %%mm2 \n\t"\
  915. "paddw %%mm5, %%mm4 \n\t"\
  916. "paddw %%mm2, %%mm0 \n\t"\
  917. "paddw %%mm4, %%mm1 \n\t"\
  918. "psraw $5, %%mm0 \n\t"\
  919. "psraw $5, %%mm1 \n\t"\
  920. "packuswb %%mm1, %%mm0 \n\t"\
  921. OP(%%mm0, (%1),%%mm5, q)\
  922. "add %3, %0 \n\t"\
  923. "add %4, %1 \n\t"\
  924. "decl %2 \n\t"\
  925. " jnz 1b \n\t"\
  926. : "+a"(src), "+c"(dst), "+g"(h)\
  927. : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  928. : "memory"\
  929. );\
  930. }\
  931. \
  932. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  933. int h=8;\
  934. asm volatile(\
  935. "pxor %%mm7, %%mm7 \n\t"\
  936. "movq %0, %%mm6 \n\t"\
  937. :: "m"(ff_pw_5)\
  938. );\
  939. do{\
  940. asm volatile(\
  941. "movq (%0), %%mm0 \n\t"\
  942. "movq 1(%0), %%mm2 \n\t"\
  943. "movq %%mm0, %%mm1 \n\t"\
  944. "movq %%mm2, %%mm3 \n\t"\
  945. "punpcklbw %%mm7, %%mm0 \n\t"\
  946. "punpckhbw %%mm7, %%mm1 \n\t"\
  947. "punpcklbw %%mm7, %%mm2 \n\t"\
  948. "punpckhbw %%mm7, %%mm3 \n\t"\
  949. "paddw %%mm2, %%mm0 \n\t"\
  950. "paddw %%mm3, %%mm1 \n\t"\
  951. "psllw $2, %%mm0 \n\t"\
  952. "psllw $2, %%mm1 \n\t"\
  953. "movq -1(%0), %%mm2 \n\t"\
  954. "movq 2(%0), %%mm4 \n\t"\
  955. "movq %%mm2, %%mm3 \n\t"\
  956. "movq %%mm4, %%mm5 \n\t"\
  957. "punpcklbw %%mm7, %%mm2 \n\t"\
  958. "punpckhbw %%mm7, %%mm3 \n\t"\
  959. "punpcklbw %%mm7, %%mm4 \n\t"\
  960. "punpckhbw %%mm7, %%mm5 \n\t"\
  961. "paddw %%mm4, %%mm2 \n\t"\
  962. "paddw %%mm3, %%mm5 \n\t"\
  963. "psubw %%mm2, %%mm0 \n\t"\
  964. "psubw %%mm5, %%mm1 \n\t"\
  965. "pmullw %%mm6, %%mm0 \n\t"\
  966. "pmullw %%mm6, %%mm1 \n\t"\
  967. "movd -2(%0), %%mm2 \n\t"\
  968. "movd 7(%0), %%mm5 \n\t"\
  969. "punpcklbw %%mm7, %%mm2 \n\t"\
  970. "punpcklbw %%mm7, %%mm5 \n\t"\
  971. "paddw %%mm3, %%mm2 \n\t"\
  972. "paddw %%mm5, %%mm4 \n\t"\
  973. "movq %5, %%mm5 \n\t"\
  974. "paddw %%mm5, %%mm2 \n\t"\
  975. "paddw %%mm5, %%mm4 \n\t"\
  976. "paddw %%mm2, %%mm0 \n\t"\
  977. "paddw %%mm4, %%mm1 \n\t"\
  978. "psraw $5, %%mm0 \n\t"\
  979. "psraw $5, %%mm1 \n\t"\
  980. "movq (%2), %%mm4 \n\t"\
  981. "packuswb %%mm1, %%mm0 \n\t"\
  982. PAVGB" %%mm4, %%mm0 \n\t"\
  983. OP(%%mm0, (%1),%%mm5, q)\
  984. "add %4, %0 \n\t"\
  985. "add %4, %1 \n\t"\
  986. "add %3, %2 \n\t"\
  987. : "+a"(src), "+c"(dst), "+d"(src2)\
  988. : "D"((long)src2Stride), "S"((long)dstStride),\
  989. "m"(ff_pw_16)\
  990. : "memory"\
  991. );\
  992. }while(--h);\
  993. }\
  994. \
  995. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  996. int w= 2;\
  997. src -= 2*srcStride;\
  998. \
  999. while(w--){\
  1000. asm volatile(\
  1001. "pxor %%mm7, %%mm7 \n\t"\
  1002. "movd (%0), %%mm0 \n\t"\
  1003. "add %2, %0 \n\t"\
  1004. "movd (%0), %%mm1 \n\t"\
  1005. "add %2, %0 \n\t"\
  1006. "movd (%0), %%mm2 \n\t"\
  1007. "add %2, %0 \n\t"\
  1008. "movd (%0), %%mm3 \n\t"\
  1009. "add %2, %0 \n\t"\
  1010. "movd (%0), %%mm4 \n\t"\
  1011. "add %2, %0 \n\t"\
  1012. "punpcklbw %%mm7, %%mm0 \n\t"\
  1013. "punpcklbw %%mm7, %%mm1 \n\t"\
  1014. "punpcklbw %%mm7, %%mm2 \n\t"\
  1015. "punpcklbw %%mm7, %%mm3 \n\t"\
  1016. "punpcklbw %%mm7, %%mm4 \n\t"\
  1017. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1018. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1019. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1020. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1021. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1022. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1023. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1024. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1025. \
  1026. : "+a"(src), "+c"(dst)\
  1027. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1028. : "memory"\
  1029. );\
  1030. if(h==16){\
  1031. asm volatile(\
  1032. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1033. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1034. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1035. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1036. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1037. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1038. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1039. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1040. \
  1041. : "+a"(src), "+c"(dst)\
  1042. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1043. : "memory"\
  1044. );\
  1045. }\
  1046. src += 4-(h+5)*srcStride;\
  1047. dst += 4-h*dstStride;\
  1048. }\
  1049. }\
  1050. static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
  1051. int w = (size+8)>>2;\
  1052. src -= 2*srcStride+2;\
  1053. while(w--){\
  1054. asm volatile(\
  1055. "pxor %%mm7, %%mm7 \n\t"\
  1056. "movd (%0), %%mm0 \n\t"\
  1057. "add %2, %0 \n\t"\
  1058. "movd (%0), %%mm1 \n\t"\
  1059. "add %2, %0 \n\t"\
  1060. "movd (%0), %%mm2 \n\t"\
  1061. "add %2, %0 \n\t"\
  1062. "movd (%0), %%mm3 \n\t"\
  1063. "add %2, %0 \n\t"\
  1064. "movd (%0), %%mm4 \n\t"\
  1065. "add %2, %0 \n\t"\
  1066. "punpcklbw %%mm7, %%mm0 \n\t"\
  1067. "punpcklbw %%mm7, %%mm1 \n\t"\
  1068. "punpcklbw %%mm7, %%mm2 \n\t"\
  1069. "punpcklbw %%mm7, %%mm3 \n\t"\
  1070. "punpcklbw %%mm7, %%mm4 \n\t"\
  1071. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
  1072. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
  1073. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
  1074. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
  1075. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
  1076. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
  1077. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
  1078. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
  1079. : "+a"(src)\
  1080. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1081. : "memory"\
  1082. );\
  1083. if(size==16){\
  1084. asm volatile(\
  1085. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
  1086. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
  1087. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
  1088. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
  1089. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
  1090. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
  1091. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
  1092. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
  1093. : "+a"(src)\
  1094. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1095. : "memory"\
  1096. );\
  1097. }\
  1098. tmp += 4;\
  1099. src += 4 - (size+5)*srcStride;\
  1100. }\
  1101. }\
  1102. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1103. int w = size>>4;\
  1104. do{\
  1105. int h = size;\
  1106. asm volatile(\
  1107. "1: \n\t"\
  1108. "movq (%0), %%mm0 \n\t"\
  1109. "movq 8(%0), %%mm3 \n\t"\
  1110. "movq 2(%0), %%mm1 \n\t"\
  1111. "movq 10(%0), %%mm4 \n\t"\
  1112. "paddw %%mm4, %%mm0 \n\t"\
  1113. "paddw %%mm3, %%mm1 \n\t"\
  1114. "paddw 18(%0), %%mm3 \n\t"\
  1115. "paddw 16(%0), %%mm4 \n\t"\
  1116. "movq 4(%0), %%mm2 \n\t"\
  1117. "movq 12(%0), %%mm5 \n\t"\
  1118. "paddw 6(%0), %%mm2 \n\t"\
  1119. "paddw 14(%0), %%mm5 \n\t"\
  1120. "psubw %%mm1, %%mm0 \n\t"\
  1121. "psubw %%mm4, %%mm3 \n\t"\
  1122. "psraw $2, %%mm0 \n\t"\
  1123. "psraw $2, %%mm3 \n\t"\
  1124. "psubw %%mm1, %%mm0 \n\t"\
  1125. "psubw %%mm4, %%mm3 \n\t"\
  1126. "paddsw %%mm2, %%mm0 \n\t"\
  1127. "paddsw %%mm5, %%mm3 \n\t"\
  1128. "psraw $2, %%mm0 \n\t"\
  1129. "psraw $2, %%mm3 \n\t"\
  1130. "paddw %%mm2, %%mm0 \n\t"\
  1131. "paddw %%mm5, %%mm3 \n\t"\
  1132. "psraw $6, %%mm0 \n\t"\
  1133. "psraw $6, %%mm3 \n\t"\
  1134. "packuswb %%mm3, %%mm0 \n\t"\
  1135. OP(%%mm0, (%1),%%mm7, q)\
  1136. "add $48, %0 \n\t"\
  1137. "add %3, %1 \n\t"\
  1138. "decl %2 \n\t"\
  1139. " jnz 1b \n\t"\
  1140. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1141. : "S"((long)dstStride)\
  1142. : "memory"\
  1143. );\
  1144. tmp += 8 - size*24;\
  1145. dst += 8 - size*dstStride;\
  1146. }while(w--);\
  1147. }\
  1148. \
  1149. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1150. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1151. }\
  1152. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1153. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1154. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1155. }\
  1156. \
  1157. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1158. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1159. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1160. src += 8*srcStride;\
  1161. dst += 8*dstStride;\
  1162. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1163. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1164. }\
  1165. \
  1166. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1167. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1168. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1169. src += 8*dstStride;\
  1170. dst += 8*dstStride;\
  1171. src2 += 8*src2Stride;\
  1172. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1173. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1174. }\
  1175. \
  1176. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1177. put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
  1178. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1179. }\
  1180. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1181. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
  1182. }\
  1183. \
  1184. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1185. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
  1186. }\
  1187. \
  1188. static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1189. {\
  1190. asm volatile(\
  1191. "movq (%1), %%mm0 \n\t"\
  1192. "movq 24(%1), %%mm1 \n\t"\
  1193. "psraw $5, %%mm0 \n\t"\
  1194. "psraw $5, %%mm1 \n\t"\
  1195. "packuswb %%mm0, %%mm0 \n\t"\
  1196. "packuswb %%mm1, %%mm1 \n\t"\
  1197. PAVGB" (%0), %%mm0 \n\t"\
  1198. PAVGB" (%0,%3), %%mm1 \n\t"\
  1199. OP(%%mm0, (%2), %%mm4, d)\
  1200. OP(%%mm1, (%2,%4), %%mm5, d)\
  1201. "lea (%0,%3,2), %0 \n\t"\
  1202. "lea (%2,%4,2), %2 \n\t"\
  1203. "movq 48(%1), %%mm0 \n\t"\
  1204. "movq 72(%1), %%mm1 \n\t"\
  1205. "psraw $5, %%mm0 \n\t"\
  1206. "psraw $5, %%mm1 \n\t"\
  1207. "packuswb %%mm0, %%mm0 \n\t"\
  1208. "packuswb %%mm1, %%mm1 \n\t"\
  1209. PAVGB" (%0), %%mm0 \n\t"\
  1210. PAVGB" (%0,%3), %%mm1 \n\t"\
  1211. OP(%%mm0, (%2), %%mm4, d)\
  1212. OP(%%mm1, (%2,%4), %%mm5, d)\
  1213. :"+a"(src8), "+c"(src16), "+d"(dst)\
  1214. :"S"((long)src8Stride), "D"((long)dstStride)\
  1215. :"memory");\
  1216. }\
  1217. static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1218. {\
  1219. do{\
  1220. asm volatile(\
  1221. "movq (%1), %%mm0 \n\t"\
  1222. "movq 8(%1), %%mm1 \n\t"\
  1223. "movq 48(%1), %%mm2 \n\t"\
  1224. "movq 8+48(%1), %%mm3 \n\t"\
  1225. "psraw $5, %%mm0 \n\t"\
  1226. "psraw $5, %%mm1 \n\t"\
  1227. "psraw $5, %%mm2 \n\t"\
  1228. "psraw $5, %%mm3 \n\t"\
  1229. "packuswb %%mm1, %%mm0 \n\t"\
  1230. "packuswb %%mm3, %%mm2 \n\t"\
  1231. PAVGB" (%0), %%mm0 \n\t"\
  1232. PAVGB" (%0,%3), %%mm2 \n\t"\
  1233. OP(%%mm0, (%2), %%mm5, q)\
  1234. OP(%%mm2, (%2,%4), %%mm5, q)\
  1235. ::"a"(src8), "c"(src16), "d"(dst),\
  1236. "r"((long)src8Stride), "r"((long)dstStride)\
  1237. :"memory");\
  1238. src8 += 2L*src8Stride;\
  1239. src16 += 48;\
  1240. dst += 2L*dstStride;\
  1241. }while(h-=2);\
  1242. }\
  1243. static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1244. {\
  1245. OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
  1246. OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
  1247. }\
  1248. #ifdef ARCH_X86_64
  1249. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1250. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1251. int h=16;\
  1252. asm volatile(\
  1253. "pxor %%xmm15, %%xmm15 \n\t"\
  1254. "movdqa %6, %%xmm14 \n\t"\
  1255. "movdqa %7, %%xmm13 \n\t"\
  1256. "1: \n\t"\
  1257. "lddqu 3(%0), %%xmm1 \n\t"\
  1258. "lddqu -5(%0), %%xmm7 \n\t"\
  1259. "movdqa %%xmm1, %%xmm0 \n\t"\
  1260. "punpckhbw %%xmm15, %%xmm1 \n\t"\
  1261. "punpcklbw %%xmm15, %%xmm0 \n\t"\
  1262. "punpcklbw %%xmm15, %%xmm7 \n\t"\
  1263. "movdqa %%xmm1, %%xmm2 \n\t"\
  1264. "movdqa %%xmm0, %%xmm6 \n\t"\
  1265. "movdqa %%xmm1, %%xmm3 \n\t"\
  1266. "movdqa %%xmm0, %%xmm8 \n\t"\
  1267. "movdqa %%xmm1, %%xmm4 \n\t"\
  1268. "movdqa %%xmm0, %%xmm9 \n\t"\
  1269. "movdqa %%xmm1, %%xmm5 \n\t"\
  1270. "movdqa %%xmm0, %%xmm10 \n\t"\
  1271. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1272. "palignr $6, %%xmm7, %%xmm10\n\t"\
  1273. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1274. "palignr $8, %%xmm7, %%xmm9 \n\t"\
  1275. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1276. "palignr $10,%%xmm7, %%xmm8 \n\t"\
  1277. "paddw %%xmm1, %%xmm5 \n\t"\
  1278. "paddw %%xmm0, %%xmm10 \n\t"\
  1279. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1280. "palignr $12,%%xmm7, %%xmm6 \n\t"\
  1281. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1282. "palignr $14,%%xmm7, %%xmm0 \n\t"\
  1283. "paddw %%xmm3, %%xmm2 \n\t"\
  1284. "paddw %%xmm8, %%xmm6 \n\t"\
  1285. "paddw %%xmm4, %%xmm1 \n\t"\
  1286. "paddw %%xmm9, %%xmm0 \n\t"\
  1287. "psllw $2, %%xmm2 \n\t"\
  1288. "psllw $2, %%xmm6 \n\t"\
  1289. "psubw %%xmm1, %%xmm2 \n\t"\
  1290. "psubw %%xmm0, %%xmm6 \n\t"\
  1291. "paddw %%xmm13,%%xmm5 \n\t"\
  1292. "paddw %%xmm13,%%xmm10 \n\t"\
  1293. "pmullw %%xmm14,%%xmm2 \n\t"\
  1294. "pmullw %%xmm14,%%xmm6 \n\t"\
  1295. "lddqu (%2), %%xmm3 \n\t"\
  1296. "paddw %%xmm5, %%xmm2 \n\t"\
  1297. "paddw %%xmm10,%%xmm6 \n\t"\
  1298. "psraw $5, %%xmm2 \n\t"\
  1299. "psraw $5, %%xmm6 \n\t"\
  1300. "packuswb %%xmm2,%%xmm6 \n\t"\
  1301. "pavgb %%xmm3, %%xmm6 \n\t"\
  1302. OP(%%xmm6, (%1), %%xmm4, dqa)\
  1303. "add %5, %0 \n\t"\
  1304. "add %5, %1 \n\t"\
  1305. "add %4, %2 \n\t"\
  1306. "dec %3 \n\t"\
  1307. "jg 1b \n\t"\
  1308. : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
  1309. : "D"((long)src2Stride), "S"((long)dstStride),\
  1310. "m"(ff_pw_5), "m"(ff_pw_16)\
  1311. : "memory"\
  1312. );\
  1313. }
  1314. #else // ARCH_X86_64
  1315. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1316. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1317. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1318. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1319. src += 8*dstStride;\
  1320. dst += 8*dstStride;\
  1321. src2 += 8*src2Stride;\
  1322. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1323. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1324. }
  1325. #endif // ARCH_X86_64
  1326. #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
  1327. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1328. int h=8;\
  1329. asm volatile(\
  1330. "pxor %%xmm7, %%xmm7 \n\t"\
  1331. "movdqa %0, %%xmm6 \n\t"\
  1332. :: "m"(ff_pw_5)\
  1333. );\
  1334. do{\
  1335. asm volatile(\
  1336. "lddqu -5(%0), %%xmm1 \n\t"\
  1337. "movdqa %%xmm1, %%xmm0 \n\t"\
  1338. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1339. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1340. "movdqa %%xmm1, %%xmm2 \n\t"\
  1341. "movdqa %%xmm1, %%xmm3 \n\t"\
  1342. "movdqa %%xmm1, %%xmm4 \n\t"\
  1343. "movdqa %%xmm1, %%xmm5 \n\t"\
  1344. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1345. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1346. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1347. "paddw %%xmm1, %%xmm5 \n\t"\
  1348. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1349. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1350. "paddw %%xmm3, %%xmm2 \n\t"\
  1351. "paddw %%xmm4, %%xmm1 \n\t"\
  1352. "psllw $2, %%xmm2 \n\t"\
  1353. "movq (%2), %%xmm3 \n\t"\
  1354. "psubw %%xmm1, %%xmm2 \n\t"\
  1355. "paddw %5, %%xmm5 \n\t"\
  1356. "pmullw %%xmm6, %%xmm2 \n\t"\
  1357. "paddw %%xmm5, %%xmm2 \n\t"\
  1358. "psraw $5, %%xmm2 \n\t"\
  1359. "packuswb %%xmm2, %%xmm2 \n\t"\
  1360. "pavgb %%xmm3, %%xmm2 \n\t"\
  1361. OP(%%xmm2, (%1), %%xmm4, q)\
  1362. "add %4, %0 \n\t"\
  1363. "add %4, %1 \n\t"\
  1364. "add %3, %2 \n\t"\
  1365. : "+a"(src), "+c"(dst), "+d"(src2)\
  1366. : "D"((long)src2Stride), "S"((long)dstStride),\
  1367. "m"(ff_pw_16)\
  1368. : "memory"\
  1369. );\
  1370. }while(--h);\
  1371. }\
  1372. QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1373. \
  1374. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1375. int h=8;\
  1376. asm volatile(\
  1377. "pxor %%xmm7, %%xmm7 \n\t"\
  1378. "movdqa %5, %%xmm6 \n\t"\
  1379. "1: \n\t"\
  1380. "lddqu -5(%0), %%xmm1 \n\t"\
  1381. "movdqa %%xmm1, %%xmm0 \n\t"\
  1382. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1383. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1384. "movdqa %%xmm1, %%xmm2 \n\t"\
  1385. "movdqa %%xmm1, %%xmm3 \n\t"\
  1386. "movdqa %%xmm1, %%xmm4 \n\t"\
  1387. "movdqa %%xmm1, %%xmm5 \n\t"\
  1388. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1389. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1390. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1391. "paddw %%xmm1, %%xmm5 \n\t"\
  1392. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1393. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1394. "paddw %%xmm3, %%xmm2 \n\t"\
  1395. "paddw %%xmm4, %%xmm1 \n\t"\
  1396. "psllw $2, %%xmm2 \n\t"\
  1397. "psubw %%xmm1, %%xmm2 \n\t"\
  1398. "paddw %6, %%xmm5 \n\t"\
  1399. "pmullw %%xmm6, %%xmm2 \n\t"\
  1400. "paddw %%xmm5, %%xmm2 \n\t"\
  1401. "psraw $5, %%xmm2 \n\t"\
  1402. "packuswb %%xmm2, %%xmm2 \n\t"\
  1403. OP(%%xmm2, (%1), %%xmm4, q)\
  1404. "add %3, %0 \n\t"\
  1405. "add %4, %1 \n\t"\
  1406. "decl %2 \n\t"\
  1407. " jnz 1b \n\t"\
  1408. : "+a"(src), "+c"(dst), "+g"(h)\
  1409. : "D"((long)srcStride), "S"((long)dstStride),\
  1410. "m"(ff_pw_5), "m"(ff_pw_16)\
  1411. : "memory"\
  1412. );\
  1413. }\
  1414. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1415. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1416. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1417. src += 8*srcStride;\
  1418. dst += 8*dstStride;\
  1419. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1420. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1421. }\
  1422. #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
  1423. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1424. src -= 2*srcStride;\
  1425. \
  1426. asm volatile(\
  1427. "pxor %%xmm7, %%xmm7 \n\t"\
  1428. "movq (%0), %%xmm0 \n\t"\
  1429. "add %2, %0 \n\t"\
  1430. "movq (%0), %%xmm1 \n\t"\
  1431. "add %2, %0 \n\t"\
  1432. "movq (%0), %%xmm2 \n\t"\
  1433. "add %2, %0 \n\t"\
  1434. "movq (%0), %%xmm3 \n\t"\
  1435. "add %2, %0 \n\t"\
  1436. "movq (%0), %%xmm4 \n\t"\
  1437. "add %2, %0 \n\t"\
  1438. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1439. "punpcklbw %%xmm7, %%xmm1 \n\t"\
  1440. "punpcklbw %%xmm7, %%xmm2 \n\t"\
  1441. "punpcklbw %%xmm7, %%xmm3 \n\t"\
  1442. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  1443. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1444. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1445. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1446. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1447. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1448. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1449. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1450. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1451. \
  1452. : "+a"(src), "+c"(dst)\
  1453. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1454. : "memory"\
  1455. );\
  1456. if(h==16){\
  1457. asm volatile(\
  1458. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1459. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1460. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1461. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1462. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1463. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1464. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1465. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1466. \
  1467. : "+a"(src), "+c"(dst)\
  1468. : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1469. : "memory"\
  1470. );\
  1471. }\
  1472. }\
  1473. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1474. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1475. }\
  1476. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1477. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1478. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1479. }
  1480. static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
  1481. int w = (size+8)>>3;
  1482. src -= 2*srcStride+2;
  1483. while(w--){
  1484. asm volatile(
  1485. "pxor %%xmm7, %%xmm7 \n\t"
  1486. "movq (%0), %%xmm0 \n\t"
  1487. "add %2, %0 \n\t"
  1488. "movq (%0), %%xmm1 \n\t"
  1489. "add %2, %0 \n\t"
  1490. "movq (%0), %%xmm2 \n\t"
  1491. "add %2, %0 \n\t"
  1492. "movq (%0), %%xmm3 \n\t"
  1493. "add %2, %0 \n\t"
  1494. "movq (%0), %%xmm4 \n\t"
  1495. "add %2, %0 \n\t"
  1496. "punpcklbw %%xmm7, %%xmm0 \n\t"
  1497. "punpcklbw %%xmm7, %%xmm1 \n\t"
  1498. "punpcklbw %%xmm7, %%xmm2 \n\t"
  1499. "punpcklbw %%xmm7, %%xmm3 \n\t"
  1500. "punpcklbw %%xmm7, %%xmm4 \n\t"
  1501. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
  1502. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
  1503. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
  1504. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
  1505. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
  1506. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
  1507. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
  1508. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
  1509. : "+a"(src)
  1510. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1511. : "memory"
  1512. );
  1513. if(size==16){
  1514. asm volatile(
  1515. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
  1516. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
  1517. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
  1518. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
  1519. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
  1520. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
  1521. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
  1522. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
  1523. : "+a"(src)
  1524. : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1525. : "memory"
  1526. );
  1527. }
  1528. tmp += 8;
  1529. src += 8 - (size+5)*srcStride;
  1530. }
  1531. }
  1532. #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
  1533. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1534. int h = size;\
  1535. if(size == 16){\
  1536. asm volatile(\
  1537. "1: \n\t"\
  1538. "movdqa 32(%0), %%xmm4 \n\t"\
  1539. "movdqa 16(%0), %%xmm5 \n\t"\
  1540. "movdqa (%0), %%xmm7 \n\t"\
  1541. "movdqa %%xmm4, %%xmm3 \n\t"\
  1542. "movdqa %%xmm4, %%xmm2 \n\t"\
  1543. "movdqa %%xmm4, %%xmm1 \n\t"\
  1544. "movdqa %%xmm4, %%xmm0 \n\t"\
  1545. "palignr $10, %%xmm5, %%xmm0 \n\t"\
  1546. "palignr $8, %%xmm5, %%xmm1 \n\t"\
  1547. "palignr $6, %%xmm5, %%xmm2 \n\t"\
  1548. "palignr $4, %%xmm5, %%xmm3 \n\t"\
  1549. "palignr $2, %%xmm5, %%xmm4 \n\t"\
  1550. "paddw %%xmm5, %%xmm0 \n\t"\
  1551. "paddw %%xmm4, %%xmm1 \n\t"\
  1552. "paddw %%xmm3, %%xmm2 \n\t"\
  1553. "movdqa %%xmm5, %%xmm6 \n\t"\
  1554. "movdqa %%xmm5, %%xmm4 \n\t"\
  1555. "movdqa %%xmm5, %%xmm3 \n\t"\
  1556. "palignr $8, %%xmm7, %%xmm4 \n\t"\
  1557. "palignr $2, %%xmm7, %%xmm6 \n\t"\
  1558. "palignr $10, %%xmm7, %%xmm3 \n\t"\
  1559. "paddw %%xmm6, %%xmm4 \n\t"\
  1560. "movdqa %%xmm5, %%xmm6 \n\t"\
  1561. "palignr $6, %%xmm7, %%xmm5 \n\t"\
  1562. "palignr $4, %%xmm7, %%xmm6 \n\t"\
  1563. "paddw %%xmm7, %%xmm3 \n\t"\
  1564. "paddw %%xmm6, %%xmm5 \n\t"\
  1565. \
  1566. "psubw %%xmm1, %%xmm0 \n\t"\
  1567. "psubw %%xmm4, %%xmm3 \n\t"\
  1568. "psraw $2, %%xmm0 \n\t"\
  1569. "psraw $2, %%xmm3 \n\t"\
  1570. "psubw %%xmm1, %%xmm0 \n\t"\
  1571. "psubw %%xmm4, %%xmm3 \n\t"\
  1572. "paddw %%xmm2, %%xmm0 \n\t"\
  1573. "paddw %%xmm5, %%xmm3 \n\t"\
  1574. "psraw $2, %%xmm0 \n\t"\
  1575. "psraw $2, %%xmm3 \n\t"\
  1576. "paddw %%xmm2, %%xmm0 \n\t"\
  1577. "paddw %%xmm5, %%xmm3 \n\t"\
  1578. "psraw $6, %%xmm0 \n\t"\
  1579. "psraw $6, %%xmm3 \n\t"\
  1580. "packuswb %%xmm0, %%xmm3 \n\t"\
  1581. OP(%%xmm3, (%1), %%xmm7, dqa)\
  1582. "add $48, %0 \n\t"\
  1583. "add %3, %1 \n\t"\
  1584. "decl %2 \n\t"\
  1585. " jnz 1b \n\t"\
  1586. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1587. : "S"((long)dstStride)\
  1588. : "memory"\
  1589. );\
  1590. }else{\
  1591. asm volatile(\
  1592. "1: \n\t"\
  1593. "movdqa 16(%0), %%xmm1 \n\t"\
  1594. "movdqa (%0), %%xmm0 \n\t"\
  1595. "movdqa %%xmm1, %%xmm2 \n\t"\
  1596. "movdqa %%xmm1, %%xmm3 \n\t"\
  1597. "movdqa %%xmm1, %%xmm4 \n\t"\
  1598. "movdqa %%xmm1, %%xmm5 \n\t"\
  1599. "palignr $10, %%xmm0, %%xmm5 \n\t"\
  1600. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1601. "palignr $6, %%xmm0, %%xmm3 \n\t"\
  1602. "palignr $4, %%xmm0, %%xmm2 \n\t"\
  1603. "palignr $2, %%xmm0, %%xmm1 \n\t"\
  1604. "paddw %%xmm5, %%xmm0 \n\t"\
  1605. "paddw %%xmm4, %%xmm1 \n\t"\
  1606. "paddw %%xmm3, %%xmm2 \n\t"\
  1607. "psubw %%xmm1, %%xmm0 \n\t"\
  1608. "psraw $2, %%xmm0 \n\t"\
  1609. "psubw %%xmm1, %%xmm0 \n\t"\
  1610. "paddw %%xmm2, %%xmm0 \n\t"\
  1611. "psraw $2, %%xmm0 \n\t"\
  1612. "paddw %%xmm2, %%xmm0 \n\t"\
  1613. "psraw $6, %%xmm0 \n\t"\
  1614. "packuswb %%xmm0, %%xmm0 \n\t"\
  1615. OP(%%xmm0, (%1), %%xmm7, q)\
  1616. "add $48, %0 \n\t"\
  1617. "add %3, %1 \n\t"\
  1618. "decl %2 \n\t"\
  1619. " jnz 1b \n\t"\
  1620. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1621. : "S"((long)dstStride)\
  1622. : "memory"\
  1623. );\
  1624. }\
  1625. }
  1626. #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
  1627. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1628. put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
  1629. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1630. }\
  1631. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1632. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
  1633. }\
  1634. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1635. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
  1636. }\
  1637. #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
  1638. #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
  1639. #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
  1640. #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
  1641. #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
  1642. #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
  1643. #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
  1644. #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
  1645. #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
  1646. #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
  1647. #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
  1648. #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
  1649. #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
  1650. #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
  1651. #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
  1652. #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
  1653. #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
  1654. #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
  1655. #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
  1656. #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
  1657. #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
  1658. #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
  1659. #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
  1660. #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
  1661. #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
  1662. #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
  1663. #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
  1664. H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
  1665. H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
  1666. H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
  1667. H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
  1668. static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1669. put_pixels16_sse2(dst, src, stride, 16);
  1670. }
  1671. static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1672. avg_pixels16_sse2(dst, src, stride, 16);
  1673. }
  1674. #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
  1675. #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
  1676. #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
  1677. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1678. OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
  1679. }\
  1680. #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
  1681. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1682. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
  1683. }\
  1684. \
  1685. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1686. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  1687. }\
  1688. \
  1689. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1690. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
  1691. }\
  1692. #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
  1693. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1694. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1695. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1696. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
  1697. }\
  1698. \
  1699. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1700. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  1701. }\
  1702. \
  1703. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1704. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1705. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1706. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
  1707. }\
  1708. #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
  1709. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1710. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1711. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1712. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1713. }\
  1714. \
  1715. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1716. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1717. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1718. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1719. }\
  1720. \
  1721. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1722. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1723. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1724. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1725. }\
  1726. \
  1727. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1728. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1729. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1730. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1731. }\
  1732. \
  1733. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1734. DECLARE_ALIGNED(ALIGN, uint16_t, temp[SIZE*(SIZE<8?12:24)]);\
  1735. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
  1736. }\
  1737. \
  1738. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1739. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1740. uint8_t * const halfHV= temp;\
  1741. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1742. assert(((int)temp & 7) == 0);\
  1743. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1744. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
  1745. }\
  1746. \
  1747. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1748. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1749. uint8_t * const halfHV= temp;\
  1750. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1751. assert(((int)temp & 7) == 0);\
  1752. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1753. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
  1754. }\
  1755. \
  1756. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1757. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1758. uint8_t * const halfHV= temp;\
  1759. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1760. assert(((int)temp & 7) == 0);\
  1761. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1762. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
  1763. }\
  1764. \
  1765. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1766. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1767. uint8_t * const halfHV= temp;\
  1768. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1769. assert(((int)temp & 7) == 0);\
  1770. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1771. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
  1772. }\
  1773. #define H264_MC_4816(MMX)\
  1774. H264_MC(put_, 4, MMX, 8)\
  1775. H264_MC(put_, 8, MMX, 8)\
  1776. H264_MC(put_, 16,MMX, 8)\
  1777. H264_MC(avg_, 4, MMX, 8)\
  1778. H264_MC(avg_, 8, MMX, 8)\
  1779. H264_MC(avg_, 16,MMX, 8)\
  1780. #define H264_MC_816(QPEL, XMM)\
  1781. QPEL(put_, 8, XMM, 16)\
  1782. QPEL(put_, 16,XMM, 16)\
  1783. QPEL(avg_, 8, XMM, 16)\
  1784. QPEL(avg_, 16,XMM, 16)\
  1785. #define AVG_3DNOW_OP(a,b,temp, size) \
  1786. "mov" #size " " #b ", " #temp " \n\t"\
  1787. "pavgusb " #temp ", " #a " \n\t"\
  1788. "mov" #size " " #a ", " #b " \n\t"
  1789. #define AVG_MMX2_OP(a,b,temp, size) \
  1790. "mov" #size " " #b ", " #temp " \n\t"\
  1791. "pavgb " #temp ", " #a " \n\t"\
  1792. "mov" #size " " #a ", " #b " \n\t"
  1793. #define PAVGB "pavgusb"
  1794. QPEL_H264(put_, PUT_OP, 3dnow)
  1795. QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
  1796. #undef PAVGB
  1797. #define PAVGB "pavgb"
  1798. QPEL_H264(put_, PUT_OP, mmx2)
  1799. QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
  1800. QPEL_H264_V_XMM(put_, PUT_OP, sse2)
  1801. QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
  1802. QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
  1803. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
  1804. #ifdef HAVE_SSSE3
  1805. QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
  1806. QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
  1807. QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
  1808. QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
  1809. QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
  1810. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
  1811. #endif
  1812. #undef PAVGB
  1813. H264_MC_4816(3dnow)
  1814. H264_MC_4816(mmx2)
  1815. H264_MC_816(H264_MC_V, sse2)
  1816. H264_MC_816(H264_MC_HV, sse2)
  1817. #ifdef HAVE_SSSE3
  1818. H264_MC_816(H264_MC_H, ssse3)
  1819. H264_MC_816(H264_MC_HV, ssse3)
  1820. #endif
  1821. #define H264_CHROMA_OP(S,D)
  1822. #define H264_CHROMA_OP4(S,D,T)
  1823. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
  1824. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
  1825. #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
  1826. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  1827. #include "dsputil_h264_template_mmx.c"
  1828. static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1829. {
  1830. put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 1);
  1831. }
  1832. static void put_h264_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1833. {
  1834. put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 0);
  1835. }
  1836. #undef H264_CHROMA_OP
  1837. #undef H264_CHROMA_OP4
  1838. #undef H264_CHROMA_MC8_TMPL
  1839. #undef H264_CHROMA_MC4_TMPL
  1840. #undef H264_CHROMA_MC2_TMPL
  1841. #undef H264_CHROMA_MC8_MV0
  1842. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  1843. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1844. "pavgb " #T ", " #D " \n\t"
  1845. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
  1846. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
  1847. #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
  1848. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  1849. #include "dsputil_h264_template_mmx.c"
  1850. static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1851. {
  1852. avg_h264_chroma_mc8_mmx2(dst, src, stride, h, x, y, 1);
  1853. }
  1854. #undef H264_CHROMA_OP
  1855. #undef H264_CHROMA_OP4
  1856. #undef H264_CHROMA_MC8_TMPL
  1857. #undef H264_CHROMA_MC4_TMPL
  1858. #undef H264_CHROMA_MC2_TMPL
  1859. #undef H264_CHROMA_MC8_MV0
  1860. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  1861. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1862. "pavgusb " #T ", " #D " \n\t"
  1863. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
  1864. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
  1865. #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
  1866. #include "dsputil_h264_template_mmx.c"
  1867. static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1868. {
  1869. avg_h264_chroma_mc8_3dnow(dst, src, stride, h, x, y, 1);
  1870. }
  1871. #undef H264_CHROMA_OP
  1872. #undef H264_CHROMA_OP4
  1873. #undef H264_CHROMA_MC8_TMPL
  1874. #undef H264_CHROMA_MC4_TMPL
  1875. #undef H264_CHROMA_MC8_MV0
  1876. /***********************************/
  1877. /* weighted prediction */
  1878. static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
  1879. {
  1880. int x, y;
  1881. offset <<= log2_denom;
  1882. offset += (1 << log2_denom) >> 1;
  1883. asm volatile(
  1884. "movd %0, %%mm4 \n\t"
  1885. "movd %1, %%mm5 \n\t"
  1886. "movd %2, %%mm6 \n\t"
  1887. "pshufw $0, %%mm4, %%mm4 \n\t"
  1888. "pshufw $0, %%mm5, %%mm5 \n\t"
  1889. "pxor %%mm7, %%mm7 \n\t"
  1890. :: "g"(weight), "g"(offset), "g"(log2_denom)
  1891. );
  1892. for(y=0; y<h; y+=2){
  1893. for(x=0; x<w; x+=4){
  1894. asm volatile(
  1895. "movd %0, %%mm0 \n\t"
  1896. "movd %1, %%mm1 \n\t"
  1897. "punpcklbw %%mm7, %%mm0 \n\t"
  1898. "punpcklbw %%mm7, %%mm1 \n\t"
  1899. "pmullw %%mm4, %%mm0 \n\t"
  1900. "pmullw %%mm4, %%mm1 \n\t"
  1901. "paddsw %%mm5, %%mm0 \n\t"
  1902. "paddsw %%mm5, %%mm1 \n\t"
  1903. "psraw %%mm6, %%mm0 \n\t"
  1904. "psraw %%mm6, %%mm1 \n\t"
  1905. "packuswb %%mm7, %%mm0 \n\t"
  1906. "packuswb %%mm7, %%mm1 \n\t"
  1907. "movd %%mm0, %0 \n\t"
  1908. "movd %%mm1, %1 \n\t"
  1909. : "+m"(*(uint32_t*)(dst+x)),
  1910. "+m"(*(uint32_t*)(dst+x+stride))
  1911. );
  1912. }
  1913. dst += 2*stride;
  1914. }
  1915. }
  1916. static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
  1917. {
  1918. int x, y;
  1919. offset = ((offset + 1) | 1) << log2_denom;
  1920. asm volatile(
  1921. "movd %0, %%mm3 \n\t"
  1922. "movd %1, %%mm4 \n\t"
  1923. "movd %2, %%mm5 \n\t"
  1924. "movd %3, %%mm6 \n\t"
  1925. "pshufw $0, %%mm3, %%mm3 \n\t"
  1926. "pshufw $0, %%mm4, %%mm4 \n\t"
  1927. "pshufw $0, %%mm5, %%mm5 \n\t"
  1928. "pxor %%mm7, %%mm7 \n\t"
  1929. :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
  1930. );
  1931. for(y=0; y<h; y++){
  1932. for(x=0; x<w; x+=4){
  1933. asm volatile(
  1934. "movd %0, %%mm0 \n\t"
  1935. "movd %1, %%mm1 \n\t"
  1936. "punpcklbw %%mm7, %%mm0 \n\t"
  1937. "punpcklbw %%mm7, %%mm1 \n\t"
  1938. "pmullw %%mm3, %%mm0 \n\t"
  1939. "pmullw %%mm4, %%mm1 \n\t"
  1940. "paddsw %%mm1, %%mm0 \n\t"
  1941. "paddsw %%mm5, %%mm0 \n\t"
  1942. "psraw %%mm6, %%mm0 \n\t"
  1943. "packuswb %%mm0, %%mm0 \n\t"
  1944. "movd %%mm0, %0 \n\t"
  1945. : "+m"(*(uint32_t*)(dst+x))
  1946. : "m"(*(uint32_t*)(src+x))
  1947. );
  1948. }
  1949. src += stride;
  1950. dst += stride;
  1951. }
  1952. }
  1953. #define H264_WEIGHT(W,H) \
  1954. static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
  1955. ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
  1956. } \
  1957. static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
  1958. ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
  1959. }
  1960. H264_WEIGHT(16,16)
  1961. H264_WEIGHT(16, 8)
  1962. H264_WEIGHT( 8,16)
  1963. H264_WEIGHT( 8, 8)
  1964. H264_WEIGHT( 8, 4)
  1965. H264_WEIGHT( 4, 8)
  1966. H264_WEIGHT( 4, 4)
  1967. H264_WEIGHT( 4, 2)