You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2114 lines
85KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "dsputil_mmx.h"
  21. DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
  22. DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
  23. /***********************************/
  24. /* IDCT */
  25. #define SUMSUB_BADC( a, b, c, d ) \
  26. "paddw "#b", "#a" \n\t"\
  27. "paddw "#d", "#c" \n\t"\
  28. "paddw "#b", "#b" \n\t"\
  29. "paddw "#d", "#d" \n\t"\
  30. "psubw "#a", "#b" \n\t"\
  31. "psubw "#c", "#d" \n\t"
  32. #define SUMSUBD2_AB( a, b, t ) \
  33. "movq "#b", "#t" \n\t"\
  34. "psraw $1 , "#b" \n\t"\
  35. "paddw "#a", "#b" \n\t"\
  36. "psraw $1 , "#a" \n\t"\
  37. "psubw "#t", "#a" \n\t"
  38. #define IDCT4_1D( s02, s13, d02, d13, t ) \
  39. SUMSUB_BA ( s02, d02 )\
  40. SUMSUBD2_AB( s13, d13, t )\
  41. SUMSUB_BADC( d13, s02, s13, d02 )
  42. #define STORE_DIFF_4P( p, t, z ) \
  43. "psraw $6, "#p" \n\t"\
  44. "movd (%0), "#t" \n\t"\
  45. "punpcklbw "#z", "#t" \n\t"\
  46. "paddsw "#t", "#p" \n\t"\
  47. "packuswb "#z", "#p" \n\t"\
  48. "movd "#p", (%0) \n\t"
  49. static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
  50. {
  51. /* Load dct coeffs */
  52. __asm__ volatile(
  53. "movq (%0), %%mm0 \n\t"
  54. "movq 8(%0), %%mm1 \n\t"
  55. "movq 16(%0), %%mm2 \n\t"
  56. "movq 24(%0), %%mm3 \n\t"
  57. :: "r"(block) );
  58. __asm__ volatile(
  59. /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
  60. IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
  61. "movq %0, %%mm6 \n\t"
  62. /* in: 1,4,0,2 out: 1,2,3,0 */
  63. TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
  64. "paddw %%mm6, %%mm3 \n\t"
  65. /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
  66. IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
  67. "pxor %%mm7, %%mm7 \n\t"
  68. :: "m"(ff_pw_32));
  69. __asm__ volatile(
  70. STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
  71. "add %1, %0 \n\t"
  72. STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
  73. "add %1, %0 \n\t"
  74. STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
  75. "add %1, %0 \n\t"
  76. STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
  77. : "+r"(dst)
  78. : "r" ((x86_reg)stride)
  79. );
  80. }
  81. static inline void h264_idct8_1d(int16_t *block)
  82. {
  83. __asm__ volatile(
  84. "movq 112(%0), %%mm7 \n\t"
  85. "movq 80(%0), %%mm0 \n\t"
  86. "movq 48(%0), %%mm3 \n\t"
  87. "movq 16(%0), %%mm5 \n\t"
  88. "movq %%mm0, %%mm4 \n\t"
  89. "movq %%mm5, %%mm1 \n\t"
  90. "psraw $1, %%mm4 \n\t"
  91. "psraw $1, %%mm1 \n\t"
  92. "paddw %%mm0, %%mm4 \n\t"
  93. "paddw %%mm5, %%mm1 \n\t"
  94. "paddw %%mm7, %%mm4 \n\t"
  95. "paddw %%mm0, %%mm1 \n\t"
  96. "psubw %%mm5, %%mm4 \n\t"
  97. "paddw %%mm3, %%mm1 \n\t"
  98. "psubw %%mm3, %%mm5 \n\t"
  99. "psubw %%mm3, %%mm0 \n\t"
  100. "paddw %%mm7, %%mm5 \n\t"
  101. "psubw %%mm7, %%mm0 \n\t"
  102. "psraw $1, %%mm3 \n\t"
  103. "psraw $1, %%mm7 \n\t"
  104. "psubw %%mm3, %%mm5 \n\t"
  105. "psubw %%mm7, %%mm0 \n\t"
  106. "movq %%mm4, %%mm3 \n\t"
  107. "movq %%mm1, %%mm7 \n\t"
  108. "psraw $2, %%mm1 \n\t"
  109. "psraw $2, %%mm3 \n\t"
  110. "paddw %%mm5, %%mm3 \n\t"
  111. "psraw $2, %%mm5 \n\t"
  112. "paddw %%mm0, %%mm1 \n\t"
  113. "psraw $2, %%mm0 \n\t"
  114. "psubw %%mm4, %%mm5 \n\t"
  115. "psubw %%mm0, %%mm7 \n\t"
  116. "movq 32(%0), %%mm2 \n\t"
  117. "movq 96(%0), %%mm6 \n\t"
  118. "movq %%mm2, %%mm4 \n\t"
  119. "movq %%mm6, %%mm0 \n\t"
  120. "psraw $1, %%mm4 \n\t"
  121. "psraw $1, %%mm6 \n\t"
  122. "psubw %%mm0, %%mm4 \n\t"
  123. "paddw %%mm2, %%mm6 \n\t"
  124. "movq (%0), %%mm2 \n\t"
  125. "movq 64(%0), %%mm0 \n\t"
  126. SUMSUB_BA( %%mm0, %%mm2 )
  127. SUMSUB_BA( %%mm6, %%mm0 )
  128. SUMSUB_BA( %%mm4, %%mm2 )
  129. SUMSUB_BA( %%mm7, %%mm6 )
  130. SUMSUB_BA( %%mm5, %%mm4 )
  131. SUMSUB_BA( %%mm3, %%mm2 )
  132. SUMSUB_BA( %%mm1, %%mm0 )
  133. :: "r"(block)
  134. );
  135. }
  136. static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
  137. {
  138. int i;
  139. int16_t __attribute__ ((aligned(8))) b2[64];
  140. block[0] += 32;
  141. for(i=0; i<2; i++){
  142. DECLARE_ALIGNED_8(uint64_t, tmp);
  143. h264_idct8_1d(block+4*i);
  144. __asm__ volatile(
  145. "movq %%mm7, %0 \n\t"
  146. TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
  147. "movq %%mm0, 8(%1) \n\t"
  148. "movq %%mm6, 24(%1) \n\t"
  149. "movq %%mm7, 40(%1) \n\t"
  150. "movq %%mm4, 56(%1) \n\t"
  151. "movq %0, %%mm7 \n\t"
  152. TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
  153. "movq %%mm7, (%1) \n\t"
  154. "movq %%mm1, 16(%1) \n\t"
  155. "movq %%mm0, 32(%1) \n\t"
  156. "movq %%mm3, 48(%1) \n\t"
  157. : "=m"(tmp)
  158. : "r"(b2+32*i)
  159. : "memory"
  160. );
  161. }
  162. for(i=0; i<2; i++){
  163. h264_idct8_1d(b2+4*i);
  164. __asm__ volatile(
  165. "psraw $6, %%mm7 \n\t"
  166. "psraw $6, %%mm6 \n\t"
  167. "psraw $6, %%mm5 \n\t"
  168. "psraw $6, %%mm4 \n\t"
  169. "psraw $6, %%mm3 \n\t"
  170. "psraw $6, %%mm2 \n\t"
  171. "psraw $6, %%mm1 \n\t"
  172. "psraw $6, %%mm0 \n\t"
  173. "movq %%mm7, (%0) \n\t"
  174. "movq %%mm5, 16(%0) \n\t"
  175. "movq %%mm3, 32(%0) \n\t"
  176. "movq %%mm1, 48(%0) \n\t"
  177. "movq %%mm0, 64(%0) \n\t"
  178. "movq %%mm2, 80(%0) \n\t"
  179. "movq %%mm4, 96(%0) \n\t"
  180. "movq %%mm6, 112(%0) \n\t"
  181. :: "r"(b2+4*i)
  182. : "memory"
  183. );
  184. }
  185. add_pixels_clamped_mmx(b2, dst, stride);
  186. }
  187. #define STORE_DIFF_8P( p, d, t, z )\
  188. "movq "#d", "#t" \n"\
  189. "psraw $6, "#p" \n"\
  190. "punpcklbw "#z", "#t" \n"\
  191. "paddsw "#t", "#p" \n"\
  192. "packuswb "#p", "#p" \n"\
  193. "movq "#p", "#d" \n"
  194. #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
  195. "movdqa "#c", "#a" \n"\
  196. "movdqa "#g", "#e" \n"\
  197. "psraw $1, "#c" \n"\
  198. "psraw $1, "#g" \n"\
  199. "psubw "#e", "#c" \n"\
  200. "paddw "#a", "#g" \n"\
  201. "movdqa "#b", "#e" \n"\
  202. "psraw $1, "#e" \n"\
  203. "paddw "#b", "#e" \n"\
  204. "paddw "#d", "#e" \n"\
  205. "paddw "#f", "#e" \n"\
  206. "movdqa "#f", "#a" \n"\
  207. "psraw $1, "#a" \n"\
  208. "paddw "#f", "#a" \n"\
  209. "paddw "#h", "#a" \n"\
  210. "psubw "#b", "#a" \n"\
  211. "psubw "#d", "#b" \n"\
  212. "psubw "#d", "#f" \n"\
  213. "paddw "#h", "#b" \n"\
  214. "psubw "#h", "#f" \n"\
  215. "psraw $1, "#d" \n"\
  216. "psraw $1, "#h" \n"\
  217. "psubw "#d", "#b" \n"\
  218. "psubw "#h", "#f" \n"\
  219. "movdqa "#e", "#d" \n"\
  220. "movdqa "#a", "#h" \n"\
  221. "psraw $2, "#d" \n"\
  222. "psraw $2, "#h" \n"\
  223. "paddw "#f", "#d" \n"\
  224. "paddw "#b", "#h" \n"\
  225. "psraw $2, "#f" \n"\
  226. "psraw $2, "#b" \n"\
  227. "psubw "#f", "#e" \n"\
  228. "psubw "#a", "#b" \n"\
  229. "movdqa 0x00(%1), "#a" \n"\
  230. "movdqa 0x40(%1), "#f" \n"\
  231. SUMSUB_BA(f, a)\
  232. SUMSUB_BA(g, f)\
  233. SUMSUB_BA(c, a)\
  234. SUMSUB_BA(e, g)\
  235. SUMSUB_BA(b, c)\
  236. SUMSUB_BA(h, a)\
  237. SUMSUB_BA(d, f)
  238. static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
  239. {
  240. __asm__ volatile(
  241. "movdqa 0x10(%1), %%xmm1 \n"
  242. "movdqa 0x20(%1), %%xmm2 \n"
  243. "movdqa 0x30(%1), %%xmm3 \n"
  244. "movdqa 0x50(%1), %%xmm5 \n"
  245. "movdqa 0x60(%1), %%xmm6 \n"
  246. "movdqa 0x70(%1), %%xmm7 \n"
  247. H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
  248. TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
  249. "paddw %4, %%xmm4 \n"
  250. "movdqa %%xmm4, 0x00(%1) \n"
  251. "movdqa %%xmm2, 0x40(%1) \n"
  252. H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
  253. "movdqa %%xmm6, 0x60(%1) \n"
  254. "movdqa %%xmm7, 0x70(%1) \n"
  255. "pxor %%xmm7, %%xmm7 \n"
  256. STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
  257. STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
  258. STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
  259. STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
  260. "lea (%0,%2,4), %0 \n"
  261. STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
  262. STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
  263. "movdqa 0x60(%1), %%xmm0 \n"
  264. "movdqa 0x70(%1), %%xmm1 \n"
  265. STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
  266. STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
  267. :"+r"(dst)
  268. :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32)
  269. );
  270. }
  271. static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  272. {
  273. int dc = (block[0] + 32) >> 6;
  274. __asm__ volatile(
  275. "movd %0, %%mm0 \n\t"
  276. "pshufw $0, %%mm0, %%mm0 \n\t"
  277. "pxor %%mm1, %%mm1 \n\t"
  278. "psubw %%mm0, %%mm1 \n\t"
  279. "packuswb %%mm0, %%mm0 \n\t"
  280. "packuswb %%mm1, %%mm1 \n\t"
  281. ::"r"(dc)
  282. );
  283. __asm__ volatile(
  284. "movd %0, %%mm2 \n\t"
  285. "movd %1, %%mm3 \n\t"
  286. "movd %2, %%mm4 \n\t"
  287. "movd %3, %%mm5 \n\t"
  288. "paddusb %%mm0, %%mm2 \n\t"
  289. "paddusb %%mm0, %%mm3 \n\t"
  290. "paddusb %%mm0, %%mm4 \n\t"
  291. "paddusb %%mm0, %%mm5 \n\t"
  292. "psubusb %%mm1, %%mm2 \n\t"
  293. "psubusb %%mm1, %%mm3 \n\t"
  294. "psubusb %%mm1, %%mm4 \n\t"
  295. "psubusb %%mm1, %%mm5 \n\t"
  296. "movd %%mm2, %0 \n\t"
  297. "movd %%mm3, %1 \n\t"
  298. "movd %%mm4, %2 \n\t"
  299. "movd %%mm5, %3 \n\t"
  300. :"+m"(*(uint32_t*)(dst+0*stride)),
  301. "+m"(*(uint32_t*)(dst+1*stride)),
  302. "+m"(*(uint32_t*)(dst+2*stride)),
  303. "+m"(*(uint32_t*)(dst+3*stride))
  304. );
  305. }
  306. static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  307. {
  308. int dc = (block[0] + 32) >> 6;
  309. int y;
  310. __asm__ volatile(
  311. "movd %0, %%mm0 \n\t"
  312. "pshufw $0, %%mm0, %%mm0 \n\t"
  313. "pxor %%mm1, %%mm1 \n\t"
  314. "psubw %%mm0, %%mm1 \n\t"
  315. "packuswb %%mm0, %%mm0 \n\t"
  316. "packuswb %%mm1, %%mm1 \n\t"
  317. ::"r"(dc)
  318. );
  319. for(y=2; y--; dst += 4*stride){
  320. __asm__ volatile(
  321. "movq %0, %%mm2 \n\t"
  322. "movq %1, %%mm3 \n\t"
  323. "movq %2, %%mm4 \n\t"
  324. "movq %3, %%mm5 \n\t"
  325. "paddusb %%mm0, %%mm2 \n\t"
  326. "paddusb %%mm0, %%mm3 \n\t"
  327. "paddusb %%mm0, %%mm4 \n\t"
  328. "paddusb %%mm0, %%mm5 \n\t"
  329. "psubusb %%mm1, %%mm2 \n\t"
  330. "psubusb %%mm1, %%mm3 \n\t"
  331. "psubusb %%mm1, %%mm4 \n\t"
  332. "psubusb %%mm1, %%mm5 \n\t"
  333. "movq %%mm2, %0 \n\t"
  334. "movq %%mm3, %1 \n\t"
  335. "movq %%mm4, %2 \n\t"
  336. "movq %%mm5, %3 \n\t"
  337. :"+m"(*(uint64_t*)(dst+0*stride)),
  338. "+m"(*(uint64_t*)(dst+1*stride)),
  339. "+m"(*(uint64_t*)(dst+2*stride)),
  340. "+m"(*(uint64_t*)(dst+3*stride))
  341. );
  342. }
  343. }
  344. /***********************************/
  345. /* deblocking */
  346. // out: o = |x-y|>a
  347. // clobbers: t
  348. #define DIFF_GT_MMX(x,y,a,o,t)\
  349. "movq "#y", "#t" \n\t"\
  350. "movq "#x", "#o" \n\t"\
  351. "psubusb "#x", "#t" \n\t"\
  352. "psubusb "#y", "#o" \n\t"\
  353. "por "#t", "#o" \n\t"\
  354. "psubusb "#a", "#o" \n\t"
  355. // out: o = |x-y|>a
  356. // clobbers: t
  357. #define DIFF_GT2_MMX(x,y,a,o,t)\
  358. "movq "#y", "#t" \n\t"\
  359. "movq "#x", "#o" \n\t"\
  360. "psubusb "#x", "#t" \n\t"\
  361. "psubusb "#y", "#o" \n\t"\
  362. "psubusb "#a", "#t" \n\t"\
  363. "psubusb "#a", "#o" \n\t"\
  364. "pcmpeqb "#t", "#o" \n\t"\
  365. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  366. // out: mm5=beta-1, mm7=mask
  367. // clobbers: mm4,mm6
  368. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  369. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  370. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  371. "packuswb %%mm4, %%mm4 \n\t"\
  372. "packuswb %%mm5, %%mm5 \n\t"\
  373. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  374. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  375. "por %%mm4, %%mm7 \n\t"\
  376. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  377. "por %%mm4, %%mm7 \n\t"\
  378. "pxor %%mm6, %%mm6 \n\t"\
  379. "pcmpeqb %%mm6, %%mm7 \n\t"
  380. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  381. // out: mm1=p0' mm2=q0'
  382. // clobbers: mm0,3-6
  383. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  384. "movq %%mm1 , %%mm5 \n\t"\
  385. "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
  386. "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
  387. "pcmpeqb %%mm4 , %%mm4 \n\t"\
  388. "pxor %%mm4 , %%mm3 \n\t"\
  389. "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
  390. "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
  391. "pxor %%mm1 , %%mm4 \n\t"\
  392. "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
  393. "pavgb %%mm5 , %%mm3 \n\t"\
  394. "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
  395. "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
  396. "psubusb %%mm3 , %%mm6 \n\t"\
  397. "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
  398. "pminub %%mm7 , %%mm6 \n\t"\
  399. "pminub %%mm7 , %%mm3 \n\t"\
  400. "psubusb %%mm6 , %%mm1 \n\t"\
  401. "psubusb %%mm3 , %%mm2 \n\t"\
  402. "paddusb %%mm3 , %%mm1 \n\t"\
  403. "paddusb %%mm6 , %%mm2 \n\t"
  404. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
  405. // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  406. // clobbers: q2, tmp, tc0
  407. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  408. "movq %%mm1, "#tmp" \n\t"\
  409. "pavgb %%mm2, "#tmp" \n\t"\
  410. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  411. "pxor "q2addr", "#tmp" \n\t"\
  412. "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  413. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  414. "movq "#p1", "#tmp" \n\t"\
  415. "psubusb "#tc0", "#tmp" \n\t"\
  416. "paddusb "#p1", "#tc0" \n\t"\
  417. "pmaxub "#tmp", "#q2" \n\t"\
  418. "pminub "#tc0", "#q2" \n\t"\
  419. "movq "#q2", "q1addr" \n\t"
  420. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  421. {
  422. DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
  423. __asm__ volatile(
  424. "movq (%1,%3), %%mm0 \n\t" //p1
  425. "movq (%1,%3,2), %%mm1 \n\t" //p0
  426. "movq (%2), %%mm2 \n\t" //q0
  427. "movq (%2,%3), %%mm3 \n\t" //q1
  428. H264_DEBLOCK_MASK(%6, %7)
  429. "movd %5, %%mm4 \n\t"
  430. "punpcklbw %%mm4, %%mm4 \n\t"
  431. "punpcklwd %%mm4, %%mm4 \n\t"
  432. "pcmpeqb %%mm3, %%mm3 \n\t"
  433. "movq %%mm4, %%mm6 \n\t"
  434. "pcmpgtb %%mm3, %%mm4 \n\t"
  435. "movq %%mm6, 8+%0 \n\t"
  436. "pand %%mm4, %%mm7 \n\t"
  437. "movq %%mm7, %0 \n\t"
  438. /* filter p1 */
  439. "movq (%1), %%mm3 \n\t" //p2
  440. DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  441. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  442. "pand 8+%0, %%mm7 \n\t" // mask & tc0
  443. "movq %%mm7, %%mm4 \n\t"
  444. "psubb %%mm6, %%mm7 \n\t"
  445. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  446. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
  447. /* filter q1 */
  448. "movq (%2,%3,2), %%mm4 \n\t" //q2
  449. DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  450. "pand %0, %%mm6 \n\t"
  451. "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
  452. "pand %%mm6, %%mm5 \n\t"
  453. "psubb %%mm6, %%mm7 \n\t"
  454. "movq (%2,%3), %%mm3 \n\t"
  455. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
  456. /* filter p0, q0 */
  457. H264_DEBLOCK_P0_Q0(%8, unused)
  458. "movq %%mm1, (%1,%3,2) \n\t"
  459. "movq %%mm2, (%2) \n\t"
  460. : "=m"(*tmp0)
  461. : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
  462. "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
  463. "m"(ff_bone)
  464. );
  465. }
  466. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  467. {
  468. if((tc0[0] & tc0[1]) >= 0)
  469. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  470. if((tc0[2] & tc0[3]) >= 0)
  471. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  472. }
  473. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  474. {
  475. //FIXME: could cut some load/stores by merging transpose with filter
  476. // also, it only needs to transpose 6x8
  477. DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
  478. int i;
  479. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  480. if((tc0[0] & tc0[1]) < 0)
  481. continue;
  482. transpose4x4(trans, pix-4, 8, stride);
  483. transpose4x4(trans +4*8, pix, 8, stride);
  484. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  485. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  486. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  487. transpose4x4(pix-2, trans +2*8, stride, 8);
  488. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  489. }
  490. }
  491. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  492. {
  493. __asm__ volatile(
  494. "movq (%0), %%mm0 \n\t" //p1
  495. "movq (%0,%2), %%mm1 \n\t" //p0
  496. "movq (%1), %%mm2 \n\t" //q0
  497. "movq (%1,%2), %%mm3 \n\t" //q1
  498. H264_DEBLOCK_MASK(%4, %5)
  499. "movd %3, %%mm6 \n\t"
  500. "punpcklbw %%mm6, %%mm6 \n\t"
  501. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  502. H264_DEBLOCK_P0_Q0(%6, %7)
  503. "movq %%mm1, (%0,%2) \n\t"
  504. "movq %%mm2, (%1) \n\t"
  505. :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
  506. "r"(*(uint32_t*)tc0),
  507. "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
  508. );
  509. }
  510. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  511. {
  512. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  513. }
  514. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  515. {
  516. //FIXME: could cut some load/stores by merging transpose with filter
  517. DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
  518. transpose4x4(trans, pix-2, 8, stride);
  519. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  520. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  521. transpose4x4(pix-2, trans, stride, 8);
  522. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  523. }
  524. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  525. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  526. "movq "#p0", %%mm4 \n\t"\
  527. "pxor "#q1", %%mm4 \n\t"\
  528. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  529. "pavgb "#q1", "#p0" \n\t"\
  530. "psubusb %%mm4, "#p0" \n\t"\
  531. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  532. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  533. {
  534. __asm__ volatile(
  535. "movq (%0), %%mm0 \n\t"
  536. "movq (%0,%2), %%mm1 \n\t"
  537. "movq (%1), %%mm2 \n\t"
  538. "movq (%1,%2), %%mm3 \n\t"
  539. H264_DEBLOCK_MASK(%3, %4)
  540. "movq %%mm1, %%mm5 \n\t"
  541. "movq %%mm2, %%mm6 \n\t"
  542. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  543. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  544. "psubb %%mm5, %%mm1 \n\t"
  545. "psubb %%mm6, %%mm2 \n\t"
  546. "pand %%mm7, %%mm1 \n\t"
  547. "pand %%mm7, %%mm2 \n\t"
  548. "paddb %%mm5, %%mm1 \n\t"
  549. "paddb %%mm6, %%mm2 \n\t"
  550. "movq %%mm1, (%0,%2) \n\t"
  551. "movq %%mm2, (%1) \n\t"
  552. :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
  553. "m"(alpha1), "m"(beta1), "m"(ff_bone)
  554. );
  555. }
  556. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  557. {
  558. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  559. }
  560. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  561. {
  562. //FIXME: could cut some load/stores by merging transpose with filter
  563. DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
  564. transpose4x4(trans, pix-2, 8, stride);
  565. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  566. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  567. transpose4x4(pix-2, trans, stride, 8);
  568. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  569. }
  570. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  571. int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
  572. int dir;
  573. __asm__ volatile(
  574. "pxor %%mm7, %%mm7 \n\t"
  575. "movq %0, %%mm6 \n\t"
  576. "movq %1, %%mm5 \n\t"
  577. "movq %2, %%mm4 \n\t"
  578. ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
  579. );
  580. if(field)
  581. __asm__ volatile(
  582. "movq %0, %%mm5 \n\t"
  583. "movq %1, %%mm4 \n\t"
  584. ::"m"(ff_pb_3_1), "m"(ff_pb_7_3)
  585. );
  586. // could do a special case for dir==0 && edges==1, but it only reduces the
  587. // average filter time by 1.2%
  588. for( dir=1; dir>=0; dir-- ) {
  589. const int d_idx = dir ? -8 : -1;
  590. const int mask_mv = dir ? mask_mv1 : mask_mv0;
  591. DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
  592. int b_idx, edge, l;
  593. for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
  594. __asm__ volatile(
  595. "pand %0, %%mm0 \n\t"
  596. ::"m"(mask_dir)
  597. );
  598. if(!(mask_mv & edge)) {
  599. __asm__ volatile("pxor %%mm0, %%mm0 \n\t":);
  600. for( l = bidir; l >= 0; l-- ) {
  601. __asm__ volatile(
  602. "movd %0, %%mm1 \n\t"
  603. "punpckldq %1, %%mm1 \n\t"
  604. "movq %%mm1, %%mm2 \n\t"
  605. "psrlw $7, %%mm2 \n\t"
  606. "pand %%mm6, %%mm2 \n\t"
  607. "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
  608. "punpckldq %%mm1, %%mm2 \n\t"
  609. "pcmpeqb %%mm2, %%mm1 \n\t"
  610. "paddb %%mm6, %%mm1 \n\t"
  611. "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
  612. "por %%mm1, %%mm0 \n\t"
  613. "movq %2, %%mm1 \n\t"
  614. "movq %3, %%mm2 \n\t"
  615. "psubw %4, %%mm1 \n\t"
  616. "psubw %5, %%mm2 \n\t"
  617. "packsswb %%mm2, %%mm1 \n\t"
  618. "paddb %%mm5, %%mm1 \n\t"
  619. "pminub %%mm4, %%mm1 \n\t"
  620. "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
  621. "por %%mm1, %%mm0 \n\t"
  622. ::"m"(ref[l][b_idx]),
  623. "m"(ref[l][b_idx+d_idx]),
  624. "m"(mv[l][b_idx][0]),
  625. "m"(mv[l][b_idx+2][0]),
  626. "m"(mv[l][b_idx+d_idx][0]),
  627. "m"(mv[l][b_idx+d_idx+2][0])
  628. );
  629. }
  630. }
  631. __asm__ volatile(
  632. "movd %0, %%mm1 \n\t"
  633. "por %1, %%mm1 \n\t"
  634. "punpcklbw %%mm7, %%mm1 \n\t"
  635. "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
  636. ::"m"(nnz[b_idx]),
  637. "m"(nnz[b_idx+d_idx])
  638. );
  639. __asm__ volatile(
  640. "pcmpeqw %%mm7, %%mm0 \n\t"
  641. "pcmpeqw %%mm7, %%mm0 \n\t"
  642. "psrlw $15, %%mm0 \n\t" // nonzero -> 1
  643. "psrlw $14, %%mm1 \n\t"
  644. "movq %%mm0, %%mm2 \n\t"
  645. "por %%mm1, %%mm2 \n\t"
  646. "psrlw $1, %%mm1 \n\t"
  647. "pandn %%mm2, %%mm1 \n\t"
  648. "movq %%mm1, %0 \n\t"
  649. :"=m"(*bS[dir][edge])
  650. ::"memory"
  651. );
  652. }
  653. edges = 4;
  654. step = 1;
  655. }
  656. __asm__ volatile(
  657. "movq (%0), %%mm0 \n\t"
  658. "movq 8(%0), %%mm1 \n\t"
  659. "movq 16(%0), %%mm2 \n\t"
  660. "movq 24(%0), %%mm3 \n\t"
  661. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  662. "movq %%mm0, (%0) \n\t"
  663. "movq %%mm3, 8(%0) \n\t"
  664. "movq %%mm4, 16(%0) \n\t"
  665. "movq %%mm2, 24(%0) \n\t"
  666. ::"r"(bS[0])
  667. :"memory"
  668. );
  669. }
  670. /***********************************/
  671. /* motion compensation */
  672. #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
  673. "mov"#q" "#C", "#T" \n\t"\
  674. "mov"#d" (%0), "#F" \n\t"\
  675. "paddw "#D", "#T" \n\t"\
  676. "psllw $2, "#T" \n\t"\
  677. "psubw "#B", "#T" \n\t"\
  678. "psubw "#E", "#T" \n\t"\
  679. "punpcklbw "#Z", "#F" \n\t"\
  680. "pmullw %4, "#T" \n\t"\
  681. "paddw %5, "#A" \n\t"\
  682. "add %2, %0 \n\t"\
  683. "paddw "#F", "#A" \n\t"\
  684. "paddw "#A", "#T" \n\t"\
  685. "psraw $5, "#T" \n\t"\
  686. "packuswb "#T", "#T" \n\t"\
  687. OP(T, (%1), A, d)\
  688. "add %3, %1 \n\t"
  689. #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
  690. "mov"#q" "#C", "#T" \n\t"\
  691. "mov"#d" (%0), "#F" \n\t"\
  692. "paddw "#D", "#T" \n\t"\
  693. "psllw $2, "#T" \n\t"\
  694. "paddw %4, "#A" \n\t"\
  695. "psubw "#B", "#T" \n\t"\
  696. "psubw "#E", "#T" \n\t"\
  697. "punpcklbw "#Z", "#F" \n\t"\
  698. "pmullw %3, "#T" \n\t"\
  699. "paddw "#F", "#A" \n\t"\
  700. "add %2, %0 \n\t"\
  701. "paddw "#A", "#T" \n\t"\
  702. "mov"#q" "#T", "#OF"(%1) \n\t"
  703. #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
  704. #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
  705. #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
  706. #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
  707. #define QPEL_H264(OPNAME, OP, MMX)\
  708. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  709. int h=4;\
  710. \
  711. __asm__ volatile(\
  712. "pxor %%mm7, %%mm7 \n\t"\
  713. "movq %5, %%mm4 \n\t"\
  714. "movq %6, %%mm5 \n\t"\
  715. "1: \n\t"\
  716. "movd -1(%0), %%mm1 \n\t"\
  717. "movd (%0), %%mm2 \n\t"\
  718. "movd 1(%0), %%mm3 \n\t"\
  719. "movd 2(%0), %%mm0 \n\t"\
  720. "punpcklbw %%mm7, %%mm1 \n\t"\
  721. "punpcklbw %%mm7, %%mm2 \n\t"\
  722. "punpcklbw %%mm7, %%mm3 \n\t"\
  723. "punpcklbw %%mm7, %%mm0 \n\t"\
  724. "paddw %%mm0, %%mm1 \n\t"\
  725. "paddw %%mm3, %%mm2 \n\t"\
  726. "movd -2(%0), %%mm0 \n\t"\
  727. "movd 3(%0), %%mm3 \n\t"\
  728. "punpcklbw %%mm7, %%mm0 \n\t"\
  729. "punpcklbw %%mm7, %%mm3 \n\t"\
  730. "paddw %%mm3, %%mm0 \n\t"\
  731. "psllw $2, %%mm2 \n\t"\
  732. "psubw %%mm1, %%mm2 \n\t"\
  733. "pmullw %%mm4, %%mm2 \n\t"\
  734. "paddw %%mm5, %%mm0 \n\t"\
  735. "paddw %%mm2, %%mm0 \n\t"\
  736. "psraw $5, %%mm0 \n\t"\
  737. "packuswb %%mm0, %%mm0 \n\t"\
  738. OP(%%mm0, (%1),%%mm6, d)\
  739. "add %3, %0 \n\t"\
  740. "add %4, %1 \n\t"\
  741. "decl %2 \n\t"\
  742. " jnz 1b \n\t"\
  743. : "+a"(src), "+c"(dst), "+g"(h)\
  744. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  745. : "memory"\
  746. );\
  747. }\
  748. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  749. int h=4;\
  750. __asm__ volatile(\
  751. "pxor %%mm7, %%mm7 \n\t"\
  752. "movq %0, %%mm4 \n\t"\
  753. "movq %1, %%mm5 \n\t"\
  754. :: "m"(ff_pw_5), "m"(ff_pw_16)\
  755. );\
  756. do{\
  757. __asm__ volatile(\
  758. "movd -1(%0), %%mm1 \n\t"\
  759. "movd (%0), %%mm2 \n\t"\
  760. "movd 1(%0), %%mm3 \n\t"\
  761. "movd 2(%0), %%mm0 \n\t"\
  762. "punpcklbw %%mm7, %%mm1 \n\t"\
  763. "punpcklbw %%mm7, %%mm2 \n\t"\
  764. "punpcklbw %%mm7, %%mm3 \n\t"\
  765. "punpcklbw %%mm7, %%mm0 \n\t"\
  766. "paddw %%mm0, %%mm1 \n\t"\
  767. "paddw %%mm3, %%mm2 \n\t"\
  768. "movd -2(%0), %%mm0 \n\t"\
  769. "movd 3(%0), %%mm3 \n\t"\
  770. "punpcklbw %%mm7, %%mm0 \n\t"\
  771. "punpcklbw %%mm7, %%mm3 \n\t"\
  772. "paddw %%mm3, %%mm0 \n\t"\
  773. "psllw $2, %%mm2 \n\t"\
  774. "psubw %%mm1, %%mm2 \n\t"\
  775. "pmullw %%mm4, %%mm2 \n\t"\
  776. "paddw %%mm5, %%mm0 \n\t"\
  777. "paddw %%mm2, %%mm0 \n\t"\
  778. "movd (%2), %%mm3 \n\t"\
  779. "psraw $5, %%mm0 \n\t"\
  780. "packuswb %%mm0, %%mm0 \n\t"\
  781. PAVGB" %%mm3, %%mm0 \n\t"\
  782. OP(%%mm0, (%1),%%mm6, d)\
  783. "add %4, %0 \n\t"\
  784. "add %4, %1 \n\t"\
  785. "add %3, %2 \n\t"\
  786. : "+a"(src), "+c"(dst), "+d"(src2)\
  787. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
  788. : "memory"\
  789. );\
  790. }while(--h);\
  791. }\
  792. static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  793. src -= 2*srcStride;\
  794. __asm__ volatile(\
  795. "pxor %%mm7, %%mm7 \n\t"\
  796. "movd (%0), %%mm0 \n\t"\
  797. "add %2, %0 \n\t"\
  798. "movd (%0), %%mm1 \n\t"\
  799. "add %2, %0 \n\t"\
  800. "movd (%0), %%mm2 \n\t"\
  801. "add %2, %0 \n\t"\
  802. "movd (%0), %%mm3 \n\t"\
  803. "add %2, %0 \n\t"\
  804. "movd (%0), %%mm4 \n\t"\
  805. "add %2, %0 \n\t"\
  806. "punpcklbw %%mm7, %%mm0 \n\t"\
  807. "punpcklbw %%mm7, %%mm1 \n\t"\
  808. "punpcklbw %%mm7, %%mm2 \n\t"\
  809. "punpcklbw %%mm7, %%mm3 \n\t"\
  810. "punpcklbw %%mm7, %%mm4 \n\t"\
  811. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  812. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  813. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  814. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  815. \
  816. : "+a"(src), "+c"(dst)\
  817. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  818. : "memory"\
  819. );\
  820. }\
  821. static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  822. int h=4;\
  823. int w=3;\
  824. src -= 2*srcStride+2;\
  825. while(w--){\
  826. __asm__ volatile(\
  827. "pxor %%mm7, %%mm7 \n\t"\
  828. "movd (%0), %%mm0 \n\t"\
  829. "add %2, %0 \n\t"\
  830. "movd (%0), %%mm1 \n\t"\
  831. "add %2, %0 \n\t"\
  832. "movd (%0), %%mm2 \n\t"\
  833. "add %2, %0 \n\t"\
  834. "movd (%0), %%mm3 \n\t"\
  835. "add %2, %0 \n\t"\
  836. "movd (%0), %%mm4 \n\t"\
  837. "add %2, %0 \n\t"\
  838. "punpcklbw %%mm7, %%mm0 \n\t"\
  839. "punpcklbw %%mm7, %%mm1 \n\t"\
  840. "punpcklbw %%mm7, %%mm2 \n\t"\
  841. "punpcklbw %%mm7, %%mm3 \n\t"\
  842. "punpcklbw %%mm7, %%mm4 \n\t"\
  843. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  844. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  845. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  846. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  847. \
  848. : "+a"(src)\
  849. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  850. : "memory"\
  851. );\
  852. tmp += 4;\
  853. src += 4 - 9*srcStride;\
  854. }\
  855. tmp -= 3*4;\
  856. __asm__ volatile(\
  857. "1: \n\t"\
  858. "movq (%0), %%mm0 \n\t"\
  859. "paddw 10(%0), %%mm0 \n\t"\
  860. "movq 2(%0), %%mm1 \n\t"\
  861. "paddw 8(%0), %%mm1 \n\t"\
  862. "movq 4(%0), %%mm2 \n\t"\
  863. "paddw 6(%0), %%mm2 \n\t"\
  864. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  865. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  866. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  867. "paddsw %%mm2, %%mm0 \n\t"\
  868. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
  869. "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
  870. "psraw $6, %%mm0 \n\t"\
  871. "packuswb %%mm0, %%mm0 \n\t"\
  872. OP(%%mm0, (%1),%%mm7, d)\
  873. "add $24, %0 \n\t"\
  874. "add %3, %1 \n\t"\
  875. "decl %2 \n\t"\
  876. " jnz 1b \n\t"\
  877. : "+a"(tmp), "+c"(dst), "+g"(h)\
  878. : "S"((x86_reg)dstStride)\
  879. : "memory"\
  880. );\
  881. }\
  882. \
  883. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  884. int h=8;\
  885. __asm__ volatile(\
  886. "pxor %%mm7, %%mm7 \n\t"\
  887. "movq %5, %%mm6 \n\t"\
  888. "1: \n\t"\
  889. "movq (%0), %%mm0 \n\t"\
  890. "movq 1(%0), %%mm2 \n\t"\
  891. "movq %%mm0, %%mm1 \n\t"\
  892. "movq %%mm2, %%mm3 \n\t"\
  893. "punpcklbw %%mm7, %%mm0 \n\t"\
  894. "punpckhbw %%mm7, %%mm1 \n\t"\
  895. "punpcklbw %%mm7, %%mm2 \n\t"\
  896. "punpckhbw %%mm7, %%mm3 \n\t"\
  897. "paddw %%mm2, %%mm0 \n\t"\
  898. "paddw %%mm3, %%mm1 \n\t"\
  899. "psllw $2, %%mm0 \n\t"\
  900. "psllw $2, %%mm1 \n\t"\
  901. "movq -1(%0), %%mm2 \n\t"\
  902. "movq 2(%0), %%mm4 \n\t"\
  903. "movq %%mm2, %%mm3 \n\t"\
  904. "movq %%mm4, %%mm5 \n\t"\
  905. "punpcklbw %%mm7, %%mm2 \n\t"\
  906. "punpckhbw %%mm7, %%mm3 \n\t"\
  907. "punpcklbw %%mm7, %%mm4 \n\t"\
  908. "punpckhbw %%mm7, %%mm5 \n\t"\
  909. "paddw %%mm4, %%mm2 \n\t"\
  910. "paddw %%mm3, %%mm5 \n\t"\
  911. "psubw %%mm2, %%mm0 \n\t"\
  912. "psubw %%mm5, %%mm1 \n\t"\
  913. "pmullw %%mm6, %%mm0 \n\t"\
  914. "pmullw %%mm6, %%mm1 \n\t"\
  915. "movd -2(%0), %%mm2 \n\t"\
  916. "movd 7(%0), %%mm5 \n\t"\
  917. "punpcklbw %%mm7, %%mm2 \n\t"\
  918. "punpcklbw %%mm7, %%mm5 \n\t"\
  919. "paddw %%mm3, %%mm2 \n\t"\
  920. "paddw %%mm5, %%mm4 \n\t"\
  921. "movq %6, %%mm5 \n\t"\
  922. "paddw %%mm5, %%mm2 \n\t"\
  923. "paddw %%mm5, %%mm4 \n\t"\
  924. "paddw %%mm2, %%mm0 \n\t"\
  925. "paddw %%mm4, %%mm1 \n\t"\
  926. "psraw $5, %%mm0 \n\t"\
  927. "psraw $5, %%mm1 \n\t"\
  928. "packuswb %%mm1, %%mm0 \n\t"\
  929. OP(%%mm0, (%1),%%mm5, q)\
  930. "add %3, %0 \n\t"\
  931. "add %4, %1 \n\t"\
  932. "decl %2 \n\t"\
  933. " jnz 1b \n\t"\
  934. : "+a"(src), "+c"(dst), "+g"(h)\
  935. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  936. : "memory"\
  937. );\
  938. }\
  939. \
  940. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  941. int h=8;\
  942. __asm__ volatile(\
  943. "pxor %%mm7, %%mm7 \n\t"\
  944. "movq %0, %%mm6 \n\t"\
  945. :: "m"(ff_pw_5)\
  946. );\
  947. do{\
  948. __asm__ volatile(\
  949. "movq (%0), %%mm0 \n\t"\
  950. "movq 1(%0), %%mm2 \n\t"\
  951. "movq %%mm0, %%mm1 \n\t"\
  952. "movq %%mm2, %%mm3 \n\t"\
  953. "punpcklbw %%mm7, %%mm0 \n\t"\
  954. "punpckhbw %%mm7, %%mm1 \n\t"\
  955. "punpcklbw %%mm7, %%mm2 \n\t"\
  956. "punpckhbw %%mm7, %%mm3 \n\t"\
  957. "paddw %%mm2, %%mm0 \n\t"\
  958. "paddw %%mm3, %%mm1 \n\t"\
  959. "psllw $2, %%mm0 \n\t"\
  960. "psllw $2, %%mm1 \n\t"\
  961. "movq -1(%0), %%mm2 \n\t"\
  962. "movq 2(%0), %%mm4 \n\t"\
  963. "movq %%mm2, %%mm3 \n\t"\
  964. "movq %%mm4, %%mm5 \n\t"\
  965. "punpcklbw %%mm7, %%mm2 \n\t"\
  966. "punpckhbw %%mm7, %%mm3 \n\t"\
  967. "punpcklbw %%mm7, %%mm4 \n\t"\
  968. "punpckhbw %%mm7, %%mm5 \n\t"\
  969. "paddw %%mm4, %%mm2 \n\t"\
  970. "paddw %%mm3, %%mm5 \n\t"\
  971. "psubw %%mm2, %%mm0 \n\t"\
  972. "psubw %%mm5, %%mm1 \n\t"\
  973. "pmullw %%mm6, %%mm0 \n\t"\
  974. "pmullw %%mm6, %%mm1 \n\t"\
  975. "movd -2(%0), %%mm2 \n\t"\
  976. "movd 7(%0), %%mm5 \n\t"\
  977. "punpcklbw %%mm7, %%mm2 \n\t"\
  978. "punpcklbw %%mm7, %%mm5 \n\t"\
  979. "paddw %%mm3, %%mm2 \n\t"\
  980. "paddw %%mm5, %%mm4 \n\t"\
  981. "movq %5, %%mm5 \n\t"\
  982. "paddw %%mm5, %%mm2 \n\t"\
  983. "paddw %%mm5, %%mm4 \n\t"\
  984. "paddw %%mm2, %%mm0 \n\t"\
  985. "paddw %%mm4, %%mm1 \n\t"\
  986. "psraw $5, %%mm0 \n\t"\
  987. "psraw $5, %%mm1 \n\t"\
  988. "movq (%2), %%mm4 \n\t"\
  989. "packuswb %%mm1, %%mm0 \n\t"\
  990. PAVGB" %%mm4, %%mm0 \n\t"\
  991. OP(%%mm0, (%1),%%mm5, q)\
  992. "add %4, %0 \n\t"\
  993. "add %4, %1 \n\t"\
  994. "add %3, %2 \n\t"\
  995. : "+a"(src), "+c"(dst), "+d"(src2)\
  996. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  997. "m"(ff_pw_16)\
  998. : "memory"\
  999. );\
  1000. }while(--h);\
  1001. }\
  1002. \
  1003. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1004. int w= 2;\
  1005. src -= 2*srcStride;\
  1006. \
  1007. while(w--){\
  1008. __asm__ volatile(\
  1009. "pxor %%mm7, %%mm7 \n\t"\
  1010. "movd (%0), %%mm0 \n\t"\
  1011. "add %2, %0 \n\t"\
  1012. "movd (%0), %%mm1 \n\t"\
  1013. "add %2, %0 \n\t"\
  1014. "movd (%0), %%mm2 \n\t"\
  1015. "add %2, %0 \n\t"\
  1016. "movd (%0), %%mm3 \n\t"\
  1017. "add %2, %0 \n\t"\
  1018. "movd (%0), %%mm4 \n\t"\
  1019. "add %2, %0 \n\t"\
  1020. "punpcklbw %%mm7, %%mm0 \n\t"\
  1021. "punpcklbw %%mm7, %%mm1 \n\t"\
  1022. "punpcklbw %%mm7, %%mm2 \n\t"\
  1023. "punpcklbw %%mm7, %%mm3 \n\t"\
  1024. "punpcklbw %%mm7, %%mm4 \n\t"\
  1025. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1026. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1027. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1028. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1029. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1030. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1031. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1032. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1033. \
  1034. : "+a"(src), "+c"(dst)\
  1035. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1036. : "memory"\
  1037. );\
  1038. if(h==16){\
  1039. __asm__ volatile(\
  1040. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1041. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1042. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1043. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1044. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1045. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1046. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1047. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1048. \
  1049. : "+a"(src), "+c"(dst)\
  1050. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1051. : "memory"\
  1052. );\
  1053. }\
  1054. src += 4-(h+5)*srcStride;\
  1055. dst += 4-h*dstStride;\
  1056. }\
  1057. }\
  1058. static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
  1059. int w = (size+8)>>2;\
  1060. src -= 2*srcStride+2;\
  1061. while(w--){\
  1062. __asm__ volatile(\
  1063. "pxor %%mm7, %%mm7 \n\t"\
  1064. "movd (%0), %%mm0 \n\t"\
  1065. "add %2, %0 \n\t"\
  1066. "movd (%0), %%mm1 \n\t"\
  1067. "add %2, %0 \n\t"\
  1068. "movd (%0), %%mm2 \n\t"\
  1069. "add %2, %0 \n\t"\
  1070. "movd (%0), %%mm3 \n\t"\
  1071. "add %2, %0 \n\t"\
  1072. "movd (%0), %%mm4 \n\t"\
  1073. "add %2, %0 \n\t"\
  1074. "punpcklbw %%mm7, %%mm0 \n\t"\
  1075. "punpcklbw %%mm7, %%mm1 \n\t"\
  1076. "punpcklbw %%mm7, %%mm2 \n\t"\
  1077. "punpcklbw %%mm7, %%mm3 \n\t"\
  1078. "punpcklbw %%mm7, %%mm4 \n\t"\
  1079. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
  1080. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
  1081. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
  1082. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
  1083. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
  1084. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
  1085. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
  1086. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
  1087. : "+a"(src)\
  1088. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1089. : "memory"\
  1090. );\
  1091. if(size==16){\
  1092. __asm__ volatile(\
  1093. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
  1094. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
  1095. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
  1096. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
  1097. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
  1098. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
  1099. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
  1100. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
  1101. : "+a"(src)\
  1102. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1103. : "memory"\
  1104. );\
  1105. }\
  1106. tmp += 4;\
  1107. src += 4 - (size+5)*srcStride;\
  1108. }\
  1109. }\
  1110. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1111. int w = size>>4;\
  1112. do{\
  1113. int h = size;\
  1114. __asm__ volatile(\
  1115. "1: \n\t"\
  1116. "movq (%0), %%mm0 \n\t"\
  1117. "movq 8(%0), %%mm3 \n\t"\
  1118. "movq 2(%0), %%mm1 \n\t"\
  1119. "movq 10(%0), %%mm4 \n\t"\
  1120. "paddw %%mm4, %%mm0 \n\t"\
  1121. "paddw %%mm3, %%mm1 \n\t"\
  1122. "paddw 18(%0), %%mm3 \n\t"\
  1123. "paddw 16(%0), %%mm4 \n\t"\
  1124. "movq 4(%0), %%mm2 \n\t"\
  1125. "movq 12(%0), %%mm5 \n\t"\
  1126. "paddw 6(%0), %%mm2 \n\t"\
  1127. "paddw 14(%0), %%mm5 \n\t"\
  1128. "psubw %%mm1, %%mm0 \n\t"\
  1129. "psubw %%mm4, %%mm3 \n\t"\
  1130. "psraw $2, %%mm0 \n\t"\
  1131. "psraw $2, %%mm3 \n\t"\
  1132. "psubw %%mm1, %%mm0 \n\t"\
  1133. "psubw %%mm4, %%mm3 \n\t"\
  1134. "paddsw %%mm2, %%mm0 \n\t"\
  1135. "paddsw %%mm5, %%mm3 \n\t"\
  1136. "psraw $2, %%mm0 \n\t"\
  1137. "psraw $2, %%mm3 \n\t"\
  1138. "paddw %%mm2, %%mm0 \n\t"\
  1139. "paddw %%mm5, %%mm3 \n\t"\
  1140. "psraw $6, %%mm0 \n\t"\
  1141. "psraw $6, %%mm3 \n\t"\
  1142. "packuswb %%mm3, %%mm0 \n\t"\
  1143. OP(%%mm0, (%1),%%mm7, q)\
  1144. "add $48, %0 \n\t"\
  1145. "add %3, %1 \n\t"\
  1146. "decl %2 \n\t"\
  1147. " jnz 1b \n\t"\
  1148. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1149. : "S"((x86_reg)dstStride)\
  1150. : "memory"\
  1151. );\
  1152. tmp += 8 - size*24;\
  1153. dst += 8 - size*dstStride;\
  1154. }while(w--);\
  1155. }\
  1156. \
  1157. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1158. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1159. }\
  1160. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1161. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1162. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1163. }\
  1164. \
  1165. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1166. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1167. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1168. src += 8*srcStride;\
  1169. dst += 8*dstStride;\
  1170. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1171. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1172. }\
  1173. \
  1174. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1175. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1176. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1177. src += 8*dstStride;\
  1178. dst += 8*dstStride;\
  1179. src2 += 8*src2Stride;\
  1180. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1181. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1182. }\
  1183. \
  1184. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1185. put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
  1186. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1187. }\
  1188. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1189. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
  1190. }\
  1191. \
  1192. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1193. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
  1194. }\
  1195. \
  1196. static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1197. {\
  1198. __asm__ volatile(\
  1199. "movq (%1), %%mm0 \n\t"\
  1200. "movq 24(%1), %%mm1 \n\t"\
  1201. "psraw $5, %%mm0 \n\t"\
  1202. "psraw $5, %%mm1 \n\t"\
  1203. "packuswb %%mm0, %%mm0 \n\t"\
  1204. "packuswb %%mm1, %%mm1 \n\t"\
  1205. PAVGB" (%0), %%mm0 \n\t"\
  1206. PAVGB" (%0,%3), %%mm1 \n\t"\
  1207. OP(%%mm0, (%2), %%mm4, d)\
  1208. OP(%%mm1, (%2,%4), %%mm5, d)\
  1209. "lea (%0,%3,2), %0 \n\t"\
  1210. "lea (%2,%4,2), %2 \n\t"\
  1211. "movq 48(%1), %%mm0 \n\t"\
  1212. "movq 72(%1), %%mm1 \n\t"\
  1213. "psraw $5, %%mm0 \n\t"\
  1214. "psraw $5, %%mm1 \n\t"\
  1215. "packuswb %%mm0, %%mm0 \n\t"\
  1216. "packuswb %%mm1, %%mm1 \n\t"\
  1217. PAVGB" (%0), %%mm0 \n\t"\
  1218. PAVGB" (%0,%3), %%mm1 \n\t"\
  1219. OP(%%mm0, (%2), %%mm4, d)\
  1220. OP(%%mm1, (%2,%4), %%mm5, d)\
  1221. :"+a"(src8), "+c"(src16), "+d"(dst)\
  1222. :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
  1223. :"memory");\
  1224. }\
  1225. static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1226. {\
  1227. do{\
  1228. __asm__ volatile(\
  1229. "movq (%1), %%mm0 \n\t"\
  1230. "movq 8(%1), %%mm1 \n\t"\
  1231. "movq 48(%1), %%mm2 \n\t"\
  1232. "movq 8+48(%1), %%mm3 \n\t"\
  1233. "psraw $5, %%mm0 \n\t"\
  1234. "psraw $5, %%mm1 \n\t"\
  1235. "psraw $5, %%mm2 \n\t"\
  1236. "psraw $5, %%mm3 \n\t"\
  1237. "packuswb %%mm1, %%mm0 \n\t"\
  1238. "packuswb %%mm3, %%mm2 \n\t"\
  1239. PAVGB" (%0), %%mm0 \n\t"\
  1240. PAVGB" (%0,%3), %%mm2 \n\t"\
  1241. OP(%%mm0, (%2), %%mm5, q)\
  1242. OP(%%mm2, (%2,%4), %%mm5, q)\
  1243. ::"a"(src8), "c"(src16), "d"(dst),\
  1244. "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
  1245. :"memory");\
  1246. src8 += 2L*src8Stride;\
  1247. src16 += 48;\
  1248. dst += 2L*dstStride;\
  1249. }while(h-=2);\
  1250. }\
  1251. static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1252. {\
  1253. OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
  1254. OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
  1255. }\
  1256. #ifdef ARCH_X86_64
  1257. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1258. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1259. int h=16;\
  1260. __asm__ volatile(\
  1261. "pxor %%xmm15, %%xmm15 \n\t"\
  1262. "movdqa %6, %%xmm14 \n\t"\
  1263. "movdqa %7, %%xmm13 \n\t"\
  1264. "1: \n\t"\
  1265. "lddqu 3(%0), %%xmm1 \n\t"\
  1266. "lddqu -5(%0), %%xmm7 \n\t"\
  1267. "movdqa %%xmm1, %%xmm0 \n\t"\
  1268. "punpckhbw %%xmm15, %%xmm1 \n\t"\
  1269. "punpcklbw %%xmm15, %%xmm0 \n\t"\
  1270. "punpcklbw %%xmm15, %%xmm7 \n\t"\
  1271. "movdqa %%xmm1, %%xmm2 \n\t"\
  1272. "movdqa %%xmm0, %%xmm6 \n\t"\
  1273. "movdqa %%xmm1, %%xmm3 \n\t"\
  1274. "movdqa %%xmm0, %%xmm8 \n\t"\
  1275. "movdqa %%xmm1, %%xmm4 \n\t"\
  1276. "movdqa %%xmm0, %%xmm9 \n\t"\
  1277. "movdqa %%xmm1, %%xmm5 \n\t"\
  1278. "movdqa %%xmm0, %%xmm10 \n\t"\
  1279. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1280. "palignr $6, %%xmm7, %%xmm10\n\t"\
  1281. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1282. "palignr $8, %%xmm7, %%xmm9 \n\t"\
  1283. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1284. "palignr $10,%%xmm7, %%xmm8 \n\t"\
  1285. "paddw %%xmm1, %%xmm5 \n\t"\
  1286. "paddw %%xmm0, %%xmm10 \n\t"\
  1287. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1288. "palignr $12,%%xmm7, %%xmm6 \n\t"\
  1289. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1290. "palignr $14,%%xmm7, %%xmm0 \n\t"\
  1291. "paddw %%xmm3, %%xmm2 \n\t"\
  1292. "paddw %%xmm8, %%xmm6 \n\t"\
  1293. "paddw %%xmm4, %%xmm1 \n\t"\
  1294. "paddw %%xmm9, %%xmm0 \n\t"\
  1295. "psllw $2, %%xmm2 \n\t"\
  1296. "psllw $2, %%xmm6 \n\t"\
  1297. "psubw %%xmm1, %%xmm2 \n\t"\
  1298. "psubw %%xmm0, %%xmm6 \n\t"\
  1299. "paddw %%xmm13,%%xmm5 \n\t"\
  1300. "paddw %%xmm13,%%xmm10 \n\t"\
  1301. "pmullw %%xmm14,%%xmm2 \n\t"\
  1302. "pmullw %%xmm14,%%xmm6 \n\t"\
  1303. "lddqu (%2), %%xmm3 \n\t"\
  1304. "paddw %%xmm5, %%xmm2 \n\t"\
  1305. "paddw %%xmm10,%%xmm6 \n\t"\
  1306. "psraw $5, %%xmm2 \n\t"\
  1307. "psraw $5, %%xmm6 \n\t"\
  1308. "packuswb %%xmm2,%%xmm6 \n\t"\
  1309. "pavgb %%xmm3, %%xmm6 \n\t"\
  1310. OP(%%xmm6, (%1), %%xmm4, dqa)\
  1311. "add %5, %0 \n\t"\
  1312. "add %5, %1 \n\t"\
  1313. "add %4, %2 \n\t"\
  1314. "decl %3 \n\t"\
  1315. "jg 1b \n\t"\
  1316. : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
  1317. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  1318. "m"(ff_pw_5), "m"(ff_pw_16)\
  1319. : "memory"\
  1320. );\
  1321. }
  1322. #else // ARCH_X86_64
  1323. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1324. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1325. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1326. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1327. src += 8*dstStride;\
  1328. dst += 8*dstStride;\
  1329. src2 += 8*src2Stride;\
  1330. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1331. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1332. }
  1333. #endif // ARCH_X86_64
  1334. #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
  1335. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1336. int h=8;\
  1337. __asm__ volatile(\
  1338. "pxor %%xmm7, %%xmm7 \n\t"\
  1339. "movdqa %0, %%xmm6 \n\t"\
  1340. :: "m"(ff_pw_5)\
  1341. );\
  1342. do{\
  1343. __asm__ volatile(\
  1344. "lddqu -5(%0), %%xmm1 \n\t"\
  1345. "movdqa %%xmm1, %%xmm0 \n\t"\
  1346. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1347. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1348. "movdqa %%xmm1, %%xmm2 \n\t"\
  1349. "movdqa %%xmm1, %%xmm3 \n\t"\
  1350. "movdqa %%xmm1, %%xmm4 \n\t"\
  1351. "movdqa %%xmm1, %%xmm5 \n\t"\
  1352. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1353. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1354. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1355. "paddw %%xmm1, %%xmm5 \n\t"\
  1356. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1357. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1358. "paddw %%xmm3, %%xmm2 \n\t"\
  1359. "paddw %%xmm4, %%xmm1 \n\t"\
  1360. "psllw $2, %%xmm2 \n\t"\
  1361. "movq (%2), %%xmm3 \n\t"\
  1362. "psubw %%xmm1, %%xmm2 \n\t"\
  1363. "paddw %5, %%xmm5 \n\t"\
  1364. "pmullw %%xmm6, %%xmm2 \n\t"\
  1365. "paddw %%xmm5, %%xmm2 \n\t"\
  1366. "psraw $5, %%xmm2 \n\t"\
  1367. "packuswb %%xmm2, %%xmm2 \n\t"\
  1368. "pavgb %%xmm3, %%xmm2 \n\t"\
  1369. OP(%%xmm2, (%1), %%xmm4, q)\
  1370. "add %4, %0 \n\t"\
  1371. "add %4, %1 \n\t"\
  1372. "add %3, %2 \n\t"\
  1373. : "+a"(src), "+c"(dst), "+d"(src2)\
  1374. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  1375. "m"(ff_pw_16)\
  1376. : "memory"\
  1377. );\
  1378. }while(--h);\
  1379. }\
  1380. QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1381. \
  1382. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1383. int h=8;\
  1384. __asm__ volatile(\
  1385. "pxor %%xmm7, %%xmm7 \n\t"\
  1386. "movdqa %5, %%xmm6 \n\t"\
  1387. "1: \n\t"\
  1388. "lddqu -5(%0), %%xmm1 \n\t"\
  1389. "movdqa %%xmm1, %%xmm0 \n\t"\
  1390. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1391. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1392. "movdqa %%xmm1, %%xmm2 \n\t"\
  1393. "movdqa %%xmm1, %%xmm3 \n\t"\
  1394. "movdqa %%xmm1, %%xmm4 \n\t"\
  1395. "movdqa %%xmm1, %%xmm5 \n\t"\
  1396. "palignr $6, %%xmm0, %%xmm5 \n\t"\
  1397. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1398. "palignr $10,%%xmm0, %%xmm3 \n\t"\
  1399. "paddw %%xmm1, %%xmm5 \n\t"\
  1400. "palignr $12,%%xmm0, %%xmm2 \n\t"\
  1401. "palignr $14,%%xmm0, %%xmm1 \n\t"\
  1402. "paddw %%xmm3, %%xmm2 \n\t"\
  1403. "paddw %%xmm4, %%xmm1 \n\t"\
  1404. "psllw $2, %%xmm2 \n\t"\
  1405. "psubw %%xmm1, %%xmm2 \n\t"\
  1406. "paddw %6, %%xmm5 \n\t"\
  1407. "pmullw %%xmm6, %%xmm2 \n\t"\
  1408. "paddw %%xmm5, %%xmm2 \n\t"\
  1409. "psraw $5, %%xmm2 \n\t"\
  1410. "packuswb %%xmm2, %%xmm2 \n\t"\
  1411. OP(%%xmm2, (%1), %%xmm4, q)\
  1412. "add %3, %0 \n\t"\
  1413. "add %4, %1 \n\t"\
  1414. "decl %2 \n\t"\
  1415. " jnz 1b \n\t"\
  1416. : "+a"(src), "+c"(dst), "+g"(h)\
  1417. : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride),\
  1418. "m"(ff_pw_5), "m"(ff_pw_16)\
  1419. : "memory"\
  1420. );\
  1421. }\
  1422. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1423. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1424. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1425. src += 8*srcStride;\
  1426. dst += 8*dstStride;\
  1427. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1428. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1429. }\
  1430. #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
  1431. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1432. src -= 2*srcStride;\
  1433. \
  1434. __asm__ volatile(\
  1435. "pxor %%xmm7, %%xmm7 \n\t"\
  1436. "movq (%0), %%xmm0 \n\t"\
  1437. "add %2, %0 \n\t"\
  1438. "movq (%0), %%xmm1 \n\t"\
  1439. "add %2, %0 \n\t"\
  1440. "movq (%0), %%xmm2 \n\t"\
  1441. "add %2, %0 \n\t"\
  1442. "movq (%0), %%xmm3 \n\t"\
  1443. "add %2, %0 \n\t"\
  1444. "movq (%0), %%xmm4 \n\t"\
  1445. "add %2, %0 \n\t"\
  1446. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1447. "punpcklbw %%xmm7, %%xmm1 \n\t"\
  1448. "punpcklbw %%xmm7, %%xmm2 \n\t"\
  1449. "punpcklbw %%xmm7, %%xmm3 \n\t"\
  1450. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  1451. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1452. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1453. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1454. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1455. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1456. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1457. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1458. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1459. \
  1460. : "+a"(src), "+c"(dst)\
  1461. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1462. : "memory"\
  1463. );\
  1464. if(h==16){\
  1465. __asm__ volatile(\
  1466. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1467. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1468. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1469. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1470. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1471. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1472. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1473. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1474. \
  1475. : "+a"(src), "+c"(dst)\
  1476. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1477. : "memory"\
  1478. );\
  1479. }\
  1480. }\
  1481. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1482. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1483. }\
  1484. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1485. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1486. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1487. }
  1488. static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
  1489. int w = (size+8)>>3;
  1490. src -= 2*srcStride+2;
  1491. while(w--){
  1492. __asm__ volatile(
  1493. "pxor %%xmm7, %%xmm7 \n\t"
  1494. "movq (%0), %%xmm0 \n\t"
  1495. "add %2, %0 \n\t"
  1496. "movq (%0), %%xmm1 \n\t"
  1497. "add %2, %0 \n\t"
  1498. "movq (%0), %%xmm2 \n\t"
  1499. "add %2, %0 \n\t"
  1500. "movq (%0), %%xmm3 \n\t"
  1501. "add %2, %0 \n\t"
  1502. "movq (%0), %%xmm4 \n\t"
  1503. "add %2, %0 \n\t"
  1504. "punpcklbw %%xmm7, %%xmm0 \n\t"
  1505. "punpcklbw %%xmm7, %%xmm1 \n\t"
  1506. "punpcklbw %%xmm7, %%xmm2 \n\t"
  1507. "punpcklbw %%xmm7, %%xmm3 \n\t"
  1508. "punpcklbw %%xmm7, %%xmm4 \n\t"
  1509. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
  1510. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
  1511. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
  1512. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
  1513. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
  1514. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
  1515. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
  1516. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
  1517. : "+a"(src)
  1518. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1519. : "memory"
  1520. );
  1521. if(size==16){
  1522. __asm__ volatile(
  1523. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
  1524. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
  1525. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
  1526. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
  1527. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
  1528. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
  1529. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
  1530. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
  1531. : "+a"(src)
  1532. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1533. : "memory"
  1534. );
  1535. }
  1536. tmp += 8;
  1537. src += 8 - (size+5)*srcStride;
  1538. }
  1539. }
  1540. #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
  1541. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1542. int h = size;\
  1543. if(size == 16){\
  1544. __asm__ volatile(\
  1545. "1: \n\t"\
  1546. "movdqa 32(%0), %%xmm4 \n\t"\
  1547. "movdqa 16(%0), %%xmm5 \n\t"\
  1548. "movdqa (%0), %%xmm7 \n\t"\
  1549. "movdqa %%xmm4, %%xmm3 \n\t"\
  1550. "movdqa %%xmm4, %%xmm2 \n\t"\
  1551. "movdqa %%xmm4, %%xmm1 \n\t"\
  1552. "movdqa %%xmm4, %%xmm0 \n\t"\
  1553. "palignr $10, %%xmm5, %%xmm0 \n\t"\
  1554. "palignr $8, %%xmm5, %%xmm1 \n\t"\
  1555. "palignr $6, %%xmm5, %%xmm2 \n\t"\
  1556. "palignr $4, %%xmm5, %%xmm3 \n\t"\
  1557. "palignr $2, %%xmm5, %%xmm4 \n\t"\
  1558. "paddw %%xmm5, %%xmm0 \n\t"\
  1559. "paddw %%xmm4, %%xmm1 \n\t"\
  1560. "paddw %%xmm3, %%xmm2 \n\t"\
  1561. "movdqa %%xmm5, %%xmm6 \n\t"\
  1562. "movdqa %%xmm5, %%xmm4 \n\t"\
  1563. "movdqa %%xmm5, %%xmm3 \n\t"\
  1564. "palignr $8, %%xmm7, %%xmm4 \n\t"\
  1565. "palignr $2, %%xmm7, %%xmm6 \n\t"\
  1566. "palignr $10, %%xmm7, %%xmm3 \n\t"\
  1567. "paddw %%xmm6, %%xmm4 \n\t"\
  1568. "movdqa %%xmm5, %%xmm6 \n\t"\
  1569. "palignr $6, %%xmm7, %%xmm5 \n\t"\
  1570. "palignr $4, %%xmm7, %%xmm6 \n\t"\
  1571. "paddw %%xmm7, %%xmm3 \n\t"\
  1572. "paddw %%xmm6, %%xmm5 \n\t"\
  1573. \
  1574. "psubw %%xmm1, %%xmm0 \n\t"\
  1575. "psubw %%xmm4, %%xmm3 \n\t"\
  1576. "psraw $2, %%xmm0 \n\t"\
  1577. "psraw $2, %%xmm3 \n\t"\
  1578. "psubw %%xmm1, %%xmm0 \n\t"\
  1579. "psubw %%xmm4, %%xmm3 \n\t"\
  1580. "paddw %%xmm2, %%xmm0 \n\t"\
  1581. "paddw %%xmm5, %%xmm3 \n\t"\
  1582. "psraw $2, %%xmm0 \n\t"\
  1583. "psraw $2, %%xmm3 \n\t"\
  1584. "paddw %%xmm2, %%xmm0 \n\t"\
  1585. "paddw %%xmm5, %%xmm3 \n\t"\
  1586. "psraw $6, %%xmm0 \n\t"\
  1587. "psraw $6, %%xmm3 \n\t"\
  1588. "packuswb %%xmm0, %%xmm3 \n\t"\
  1589. OP(%%xmm3, (%1), %%xmm7, dqa)\
  1590. "add $48, %0 \n\t"\
  1591. "add %3, %1 \n\t"\
  1592. "decl %2 \n\t"\
  1593. " jnz 1b \n\t"\
  1594. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1595. : "S"((x86_reg)dstStride)\
  1596. : "memory"\
  1597. );\
  1598. }else{\
  1599. __asm__ volatile(\
  1600. "1: \n\t"\
  1601. "movdqa 16(%0), %%xmm1 \n\t"\
  1602. "movdqa (%0), %%xmm0 \n\t"\
  1603. "movdqa %%xmm1, %%xmm2 \n\t"\
  1604. "movdqa %%xmm1, %%xmm3 \n\t"\
  1605. "movdqa %%xmm1, %%xmm4 \n\t"\
  1606. "movdqa %%xmm1, %%xmm5 \n\t"\
  1607. "palignr $10, %%xmm0, %%xmm5 \n\t"\
  1608. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1609. "palignr $6, %%xmm0, %%xmm3 \n\t"\
  1610. "palignr $4, %%xmm0, %%xmm2 \n\t"\
  1611. "palignr $2, %%xmm0, %%xmm1 \n\t"\
  1612. "paddw %%xmm5, %%xmm0 \n\t"\
  1613. "paddw %%xmm4, %%xmm1 \n\t"\
  1614. "paddw %%xmm3, %%xmm2 \n\t"\
  1615. "psubw %%xmm1, %%xmm0 \n\t"\
  1616. "psraw $2, %%xmm0 \n\t"\
  1617. "psubw %%xmm1, %%xmm0 \n\t"\
  1618. "paddw %%xmm2, %%xmm0 \n\t"\
  1619. "psraw $2, %%xmm0 \n\t"\
  1620. "paddw %%xmm2, %%xmm0 \n\t"\
  1621. "psraw $6, %%xmm0 \n\t"\
  1622. "packuswb %%xmm0, %%xmm0 \n\t"\
  1623. OP(%%xmm0, (%1), %%xmm7, q)\
  1624. "add $48, %0 \n\t"\
  1625. "add %3, %1 \n\t"\
  1626. "decl %2 \n\t"\
  1627. " jnz 1b \n\t"\
  1628. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1629. : "S"((x86_reg)dstStride)\
  1630. : "memory"\
  1631. );\
  1632. }\
  1633. }
  1634. #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
  1635. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1636. put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
  1637. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1638. }\
  1639. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1640. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
  1641. }\
  1642. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1643. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
  1644. }\
  1645. #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
  1646. #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
  1647. #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
  1648. #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
  1649. #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
  1650. #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
  1651. #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
  1652. #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
  1653. #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
  1654. #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
  1655. #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
  1656. #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
  1657. #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
  1658. #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
  1659. #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
  1660. #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
  1661. #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
  1662. #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
  1663. #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
  1664. #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
  1665. #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
  1666. #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
  1667. #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
  1668. #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
  1669. #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
  1670. #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
  1671. #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
  1672. H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
  1673. H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
  1674. H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
  1675. H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
  1676. static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1677. put_pixels16_sse2(dst, src, stride, 16);
  1678. }
  1679. static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1680. avg_pixels16_sse2(dst, src, stride, 16);
  1681. }
  1682. #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
  1683. #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
  1684. #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
  1685. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1686. OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
  1687. }\
  1688. #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
  1689. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1690. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
  1691. }\
  1692. \
  1693. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1694. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  1695. }\
  1696. \
  1697. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1698. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
  1699. }\
  1700. #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
  1701. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1702. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1703. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1704. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
  1705. }\
  1706. \
  1707. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1708. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  1709. }\
  1710. \
  1711. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1712. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1713. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1714. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
  1715. }\
  1716. #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
  1717. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1718. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1719. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1720. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1721. }\
  1722. \
  1723. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1724. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1725. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1726. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1727. }\
  1728. \
  1729. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1730. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1731. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1732. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1733. }\
  1734. \
  1735. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1736. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
  1737. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1738. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1739. }\
  1740. \
  1741. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1742. DECLARE_ALIGNED(ALIGN, uint16_t, temp[SIZE*(SIZE<8?12:24)]);\
  1743. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
  1744. }\
  1745. \
  1746. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1747. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1748. uint8_t * const halfHV= temp;\
  1749. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1750. assert(((int)temp & 7) == 0);\
  1751. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1752. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
  1753. }\
  1754. \
  1755. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1756. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1757. uint8_t * const halfHV= temp;\
  1758. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1759. assert(((int)temp & 7) == 0);\
  1760. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1761. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
  1762. }\
  1763. \
  1764. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1765. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1766. uint8_t * const halfHV= temp;\
  1767. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1768. assert(((int)temp & 7) == 0);\
  1769. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1770. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
  1771. }\
  1772. \
  1773. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1774. DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
  1775. uint8_t * const halfHV= temp;\
  1776. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1777. assert(((int)temp & 7) == 0);\
  1778. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1779. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
  1780. }\
  1781. #define H264_MC_4816(MMX)\
  1782. H264_MC(put_, 4, MMX, 8)\
  1783. H264_MC(put_, 8, MMX, 8)\
  1784. H264_MC(put_, 16,MMX, 8)\
  1785. H264_MC(avg_, 4, MMX, 8)\
  1786. H264_MC(avg_, 8, MMX, 8)\
  1787. H264_MC(avg_, 16,MMX, 8)\
  1788. #define H264_MC_816(QPEL, XMM)\
  1789. QPEL(put_, 8, XMM, 16)\
  1790. QPEL(put_, 16,XMM, 16)\
  1791. QPEL(avg_, 8, XMM, 16)\
  1792. QPEL(avg_, 16,XMM, 16)\
  1793. #define AVG_3DNOW_OP(a,b,temp, size) \
  1794. "mov" #size " " #b ", " #temp " \n\t"\
  1795. "pavgusb " #temp ", " #a " \n\t"\
  1796. "mov" #size " " #a ", " #b " \n\t"
  1797. #define AVG_MMX2_OP(a,b,temp, size) \
  1798. "mov" #size " " #b ", " #temp " \n\t"\
  1799. "pavgb " #temp ", " #a " \n\t"\
  1800. "mov" #size " " #a ", " #b " \n\t"
  1801. #define PAVGB "pavgusb"
  1802. QPEL_H264(put_, PUT_OP, 3dnow)
  1803. QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
  1804. #undef PAVGB
  1805. #define PAVGB "pavgb"
  1806. QPEL_H264(put_, PUT_OP, mmx2)
  1807. QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
  1808. QPEL_H264_V_XMM(put_, PUT_OP, sse2)
  1809. QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
  1810. QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
  1811. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
  1812. #ifdef HAVE_SSSE3
  1813. QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
  1814. QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
  1815. QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
  1816. QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
  1817. QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
  1818. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
  1819. #endif
  1820. #undef PAVGB
  1821. H264_MC_4816(3dnow)
  1822. H264_MC_4816(mmx2)
  1823. H264_MC_816(H264_MC_V, sse2)
  1824. H264_MC_816(H264_MC_HV, sse2)
  1825. #ifdef HAVE_SSSE3
  1826. H264_MC_816(H264_MC_H, ssse3)
  1827. H264_MC_816(H264_MC_HV, ssse3)
  1828. #endif
  1829. #define H264_CHROMA_OP(S,D)
  1830. #define H264_CHROMA_OP4(S,D,T)
  1831. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
  1832. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
  1833. #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
  1834. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  1835. #include "dsputil_h264_template_mmx.c"
  1836. static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1837. {
  1838. put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 1);
  1839. }
  1840. static void put_h264_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1841. {
  1842. put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 0);
  1843. }
  1844. #undef H264_CHROMA_OP
  1845. #undef H264_CHROMA_OP4
  1846. #undef H264_CHROMA_MC8_TMPL
  1847. #undef H264_CHROMA_MC4_TMPL
  1848. #undef H264_CHROMA_MC2_TMPL
  1849. #undef H264_CHROMA_MC8_MV0
  1850. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  1851. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1852. "pavgb " #T ", " #D " \n\t"
  1853. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
  1854. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
  1855. #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
  1856. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  1857. #include "dsputil_h264_template_mmx.c"
  1858. static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1859. {
  1860. avg_h264_chroma_mc8_mmx2(dst, src, stride, h, x, y, 1);
  1861. }
  1862. #undef H264_CHROMA_OP
  1863. #undef H264_CHROMA_OP4
  1864. #undef H264_CHROMA_MC8_TMPL
  1865. #undef H264_CHROMA_MC4_TMPL
  1866. #undef H264_CHROMA_MC2_TMPL
  1867. #undef H264_CHROMA_MC8_MV0
  1868. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  1869. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  1870. "pavgusb " #T ", " #D " \n\t"
  1871. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
  1872. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
  1873. #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
  1874. #include "dsputil_h264_template_mmx.c"
  1875. static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1876. {
  1877. avg_h264_chroma_mc8_3dnow(dst, src, stride, h, x, y, 1);
  1878. }
  1879. #undef H264_CHROMA_OP
  1880. #undef H264_CHROMA_OP4
  1881. #undef H264_CHROMA_MC8_TMPL
  1882. #undef H264_CHROMA_MC4_TMPL
  1883. #undef H264_CHROMA_MC8_MV0
  1884. #ifdef HAVE_SSSE3
  1885. #define AVG_OP(X)
  1886. #undef H264_CHROMA_MC8_TMPL
  1887. #undef H264_CHROMA_MC4_TMPL
  1888. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
  1889. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
  1890. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  1891. #include "dsputil_h264_template_ssse3.c"
  1892. static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1893. {
  1894. put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
  1895. }
  1896. static void put_h264_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1897. {
  1898. put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
  1899. }
  1900. #undef AVG_OP
  1901. #undef H264_CHROMA_MC8_TMPL
  1902. #undef H264_CHROMA_MC4_TMPL
  1903. #undef H264_CHROMA_MC8_MV0
  1904. #define AVG_OP(X) X
  1905. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
  1906. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
  1907. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  1908. #include "dsputil_h264_template_ssse3.c"
  1909. static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  1910. {
  1911. avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
  1912. }
  1913. #undef AVG_OP
  1914. #undef H264_CHROMA_MC8_TMPL
  1915. #undef H264_CHROMA_MC4_TMPL
  1916. #undef H264_CHROMA_MC8_MV0
  1917. #endif
  1918. /***********************************/
  1919. /* weighted prediction */
  1920. static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
  1921. {
  1922. int x, y;
  1923. offset <<= log2_denom;
  1924. offset += (1 << log2_denom) >> 1;
  1925. __asm__ volatile(
  1926. "movd %0, %%mm4 \n\t"
  1927. "movd %1, %%mm5 \n\t"
  1928. "movd %2, %%mm6 \n\t"
  1929. "pshufw $0, %%mm4, %%mm4 \n\t"
  1930. "pshufw $0, %%mm5, %%mm5 \n\t"
  1931. "pxor %%mm7, %%mm7 \n\t"
  1932. :: "g"(weight), "g"(offset), "g"(log2_denom)
  1933. );
  1934. for(y=0; y<h; y+=2){
  1935. for(x=0; x<w; x+=4){
  1936. __asm__ volatile(
  1937. "movd %0, %%mm0 \n\t"
  1938. "movd %1, %%mm1 \n\t"
  1939. "punpcklbw %%mm7, %%mm0 \n\t"
  1940. "punpcklbw %%mm7, %%mm1 \n\t"
  1941. "pmullw %%mm4, %%mm0 \n\t"
  1942. "pmullw %%mm4, %%mm1 \n\t"
  1943. "paddsw %%mm5, %%mm0 \n\t"
  1944. "paddsw %%mm5, %%mm1 \n\t"
  1945. "psraw %%mm6, %%mm0 \n\t"
  1946. "psraw %%mm6, %%mm1 \n\t"
  1947. "packuswb %%mm7, %%mm0 \n\t"
  1948. "packuswb %%mm7, %%mm1 \n\t"
  1949. "movd %%mm0, %0 \n\t"
  1950. "movd %%mm1, %1 \n\t"
  1951. : "+m"(*(uint32_t*)(dst+x)),
  1952. "+m"(*(uint32_t*)(dst+x+stride))
  1953. );
  1954. }
  1955. dst += 2*stride;
  1956. }
  1957. }
  1958. static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
  1959. {
  1960. int x, y;
  1961. offset = ((offset + 1) | 1) << log2_denom;
  1962. __asm__ volatile(
  1963. "movd %0, %%mm3 \n\t"
  1964. "movd %1, %%mm4 \n\t"
  1965. "movd %2, %%mm5 \n\t"
  1966. "movd %3, %%mm6 \n\t"
  1967. "pshufw $0, %%mm3, %%mm3 \n\t"
  1968. "pshufw $0, %%mm4, %%mm4 \n\t"
  1969. "pshufw $0, %%mm5, %%mm5 \n\t"
  1970. "pxor %%mm7, %%mm7 \n\t"
  1971. :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
  1972. );
  1973. for(y=0; y<h; y++){
  1974. for(x=0; x<w; x+=4){
  1975. __asm__ volatile(
  1976. "movd %0, %%mm0 \n\t"
  1977. "movd %1, %%mm1 \n\t"
  1978. "punpcklbw %%mm7, %%mm0 \n\t"
  1979. "punpcklbw %%mm7, %%mm1 \n\t"
  1980. "pmullw %%mm3, %%mm0 \n\t"
  1981. "pmullw %%mm4, %%mm1 \n\t"
  1982. "paddsw %%mm1, %%mm0 \n\t"
  1983. "paddsw %%mm5, %%mm0 \n\t"
  1984. "psraw %%mm6, %%mm0 \n\t"
  1985. "packuswb %%mm0, %%mm0 \n\t"
  1986. "movd %%mm0, %0 \n\t"
  1987. : "+m"(*(uint32_t*)(dst+x))
  1988. : "m"(*(uint32_t*)(src+x))
  1989. );
  1990. }
  1991. src += stride;
  1992. dst += stride;
  1993. }
  1994. }
  1995. #define H264_WEIGHT(W,H) \
  1996. static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
  1997. ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
  1998. } \
  1999. static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
  2000. ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
  2001. }
  2002. H264_WEIGHT(16,16)
  2003. H264_WEIGHT(16, 8)
  2004. H264_WEIGHT( 8,16)
  2005. H264_WEIGHT( 8, 8)
  2006. H264_WEIGHT( 8, 4)
  2007. H264_WEIGHT( 4, 8)
  2008. H264_WEIGHT( 4, 4)
  2009. H264_WEIGHT( 4, 2)