You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2424 lines
98KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "dsputil_mmx.h"
  21. #include "libavcodec/h264pred.h"
  22. DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
  23. DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
  24. /***********************************/
  25. /* IDCT */
  26. #define SUMSUB_BADC( a, b, c, d ) \
  27. "paddw "#b", "#a" \n\t"\
  28. "paddw "#d", "#c" \n\t"\
  29. "paddw "#b", "#b" \n\t"\
  30. "paddw "#d", "#d" \n\t"\
  31. "psubw "#a", "#b" \n\t"\
  32. "psubw "#c", "#d" \n\t"
  33. #define SUMSUBD2_AB( a, b, t ) \
  34. "movq "#b", "#t" \n\t"\
  35. "psraw $1 , "#b" \n\t"\
  36. "paddw "#a", "#b" \n\t"\
  37. "psraw $1 , "#a" \n\t"\
  38. "psubw "#t", "#a" \n\t"
  39. #define IDCT4_1D( s02, s13, d02, d13, t ) \
  40. SUMSUB_BA ( s02, d02 )\
  41. SUMSUBD2_AB( s13, d13, t )\
  42. SUMSUB_BADC( d13, s02, s13, d02 )
  43. #define STORE_DIFF_4P( p, t, z ) \
  44. "psraw $6, "#p" \n\t"\
  45. "movd (%0), "#t" \n\t"\
  46. "punpcklbw "#z", "#t" \n\t"\
  47. "paddsw "#t", "#p" \n\t"\
  48. "packuswb "#z", "#p" \n\t"\
  49. "movd "#p", (%0) \n\t"
  50. static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
  51. {
  52. /* Load dct coeffs */
  53. __asm__ volatile(
  54. "movq (%0), %%mm0 \n\t"
  55. "movq 8(%0), %%mm1 \n\t"
  56. "movq 16(%0), %%mm2 \n\t"
  57. "movq 24(%0), %%mm3 \n\t"
  58. :: "r"(block) );
  59. __asm__ volatile(
  60. /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
  61. IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
  62. "movq %0, %%mm6 \n\t"
  63. /* in: 1,4,0,2 out: 1,2,3,0 */
  64. TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
  65. "paddw %%mm6, %%mm3 \n\t"
  66. /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
  67. IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
  68. "pxor %%mm7, %%mm7 \n\t"
  69. :: "m"(ff_pw_32));
  70. __asm__ volatile(
  71. STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
  72. "add %1, %0 \n\t"
  73. STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
  74. "add %1, %0 \n\t"
  75. STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
  76. "add %1, %0 \n\t"
  77. STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
  78. : "+r"(dst)
  79. : "r" ((x86_reg)stride)
  80. );
  81. }
  82. static inline void h264_idct8_1d(int16_t *block)
  83. {
  84. __asm__ volatile(
  85. "movq 112(%0), %%mm7 \n\t"
  86. "movq 80(%0), %%mm0 \n\t"
  87. "movq 48(%0), %%mm3 \n\t"
  88. "movq 16(%0), %%mm5 \n\t"
  89. "movq %%mm0, %%mm4 \n\t"
  90. "movq %%mm5, %%mm1 \n\t"
  91. "psraw $1, %%mm4 \n\t"
  92. "psraw $1, %%mm1 \n\t"
  93. "paddw %%mm0, %%mm4 \n\t"
  94. "paddw %%mm5, %%mm1 \n\t"
  95. "paddw %%mm7, %%mm4 \n\t"
  96. "paddw %%mm0, %%mm1 \n\t"
  97. "psubw %%mm5, %%mm4 \n\t"
  98. "paddw %%mm3, %%mm1 \n\t"
  99. "psubw %%mm3, %%mm5 \n\t"
  100. "psubw %%mm3, %%mm0 \n\t"
  101. "paddw %%mm7, %%mm5 \n\t"
  102. "psubw %%mm7, %%mm0 \n\t"
  103. "psraw $1, %%mm3 \n\t"
  104. "psraw $1, %%mm7 \n\t"
  105. "psubw %%mm3, %%mm5 \n\t"
  106. "psubw %%mm7, %%mm0 \n\t"
  107. "movq %%mm4, %%mm3 \n\t"
  108. "movq %%mm1, %%mm7 \n\t"
  109. "psraw $2, %%mm1 \n\t"
  110. "psraw $2, %%mm3 \n\t"
  111. "paddw %%mm5, %%mm3 \n\t"
  112. "psraw $2, %%mm5 \n\t"
  113. "paddw %%mm0, %%mm1 \n\t"
  114. "psraw $2, %%mm0 \n\t"
  115. "psubw %%mm4, %%mm5 \n\t"
  116. "psubw %%mm0, %%mm7 \n\t"
  117. "movq 32(%0), %%mm2 \n\t"
  118. "movq 96(%0), %%mm6 \n\t"
  119. "movq %%mm2, %%mm4 \n\t"
  120. "movq %%mm6, %%mm0 \n\t"
  121. "psraw $1, %%mm4 \n\t"
  122. "psraw $1, %%mm6 \n\t"
  123. "psubw %%mm0, %%mm4 \n\t"
  124. "paddw %%mm2, %%mm6 \n\t"
  125. "movq (%0), %%mm2 \n\t"
  126. "movq 64(%0), %%mm0 \n\t"
  127. SUMSUB_BA( %%mm0, %%mm2 )
  128. SUMSUB_BA( %%mm6, %%mm0 )
  129. SUMSUB_BA( %%mm4, %%mm2 )
  130. SUMSUB_BA( %%mm7, %%mm6 )
  131. SUMSUB_BA( %%mm5, %%mm4 )
  132. SUMSUB_BA( %%mm3, %%mm2 )
  133. SUMSUB_BA( %%mm1, %%mm0 )
  134. :: "r"(block)
  135. );
  136. }
  137. static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
  138. {
  139. int i;
  140. DECLARE_ALIGNED(8, int16_t, b2)[64];
  141. block[0] += 32;
  142. for(i=0; i<2; i++){
  143. DECLARE_ALIGNED(8, uint64_t, tmp);
  144. h264_idct8_1d(block+4*i);
  145. __asm__ volatile(
  146. "movq %%mm7, %0 \n\t"
  147. TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
  148. "movq %%mm0, 8(%1) \n\t"
  149. "movq %%mm6, 24(%1) \n\t"
  150. "movq %%mm7, 40(%1) \n\t"
  151. "movq %%mm4, 56(%1) \n\t"
  152. "movq %0, %%mm7 \n\t"
  153. TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
  154. "movq %%mm7, (%1) \n\t"
  155. "movq %%mm1, 16(%1) \n\t"
  156. "movq %%mm0, 32(%1) \n\t"
  157. "movq %%mm3, 48(%1) \n\t"
  158. : "=m"(tmp)
  159. : "r"(b2+32*i)
  160. : "memory"
  161. );
  162. }
  163. for(i=0; i<2; i++){
  164. h264_idct8_1d(b2+4*i);
  165. __asm__ volatile(
  166. "psraw $6, %%mm7 \n\t"
  167. "psraw $6, %%mm6 \n\t"
  168. "psraw $6, %%mm5 \n\t"
  169. "psraw $6, %%mm4 \n\t"
  170. "psraw $6, %%mm3 \n\t"
  171. "psraw $6, %%mm2 \n\t"
  172. "psraw $6, %%mm1 \n\t"
  173. "psraw $6, %%mm0 \n\t"
  174. "movq %%mm7, (%0) \n\t"
  175. "movq %%mm5, 16(%0) \n\t"
  176. "movq %%mm3, 32(%0) \n\t"
  177. "movq %%mm1, 48(%0) \n\t"
  178. "movq %%mm0, 64(%0) \n\t"
  179. "movq %%mm2, 80(%0) \n\t"
  180. "movq %%mm4, 96(%0) \n\t"
  181. "movq %%mm6, 112(%0) \n\t"
  182. :: "r"(b2+4*i)
  183. : "memory"
  184. );
  185. }
  186. add_pixels_clamped_mmx(b2, dst, stride);
  187. }
  188. #define STORE_DIFF_8P( p, d, t, z )\
  189. "movq "#d", "#t" \n"\
  190. "psraw $6, "#p" \n"\
  191. "punpcklbw "#z", "#t" \n"\
  192. "paddsw "#t", "#p" \n"\
  193. "packuswb "#p", "#p" \n"\
  194. "movq "#p", "#d" \n"
  195. #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
  196. "movdqa "#c", "#a" \n"\
  197. "movdqa "#g", "#e" \n"\
  198. "psraw $1, "#c" \n"\
  199. "psraw $1, "#g" \n"\
  200. "psubw "#e", "#c" \n"\
  201. "paddw "#a", "#g" \n"\
  202. "movdqa "#b", "#e" \n"\
  203. "psraw $1, "#e" \n"\
  204. "paddw "#b", "#e" \n"\
  205. "paddw "#d", "#e" \n"\
  206. "paddw "#f", "#e" \n"\
  207. "movdqa "#f", "#a" \n"\
  208. "psraw $1, "#a" \n"\
  209. "paddw "#f", "#a" \n"\
  210. "paddw "#h", "#a" \n"\
  211. "psubw "#b", "#a" \n"\
  212. "psubw "#d", "#b" \n"\
  213. "psubw "#d", "#f" \n"\
  214. "paddw "#h", "#b" \n"\
  215. "psubw "#h", "#f" \n"\
  216. "psraw $1, "#d" \n"\
  217. "psraw $1, "#h" \n"\
  218. "psubw "#d", "#b" \n"\
  219. "psubw "#h", "#f" \n"\
  220. "movdqa "#e", "#d" \n"\
  221. "movdqa "#a", "#h" \n"\
  222. "psraw $2, "#d" \n"\
  223. "psraw $2, "#h" \n"\
  224. "paddw "#f", "#d" \n"\
  225. "paddw "#b", "#h" \n"\
  226. "psraw $2, "#f" \n"\
  227. "psraw $2, "#b" \n"\
  228. "psubw "#f", "#e" \n"\
  229. "psubw "#a", "#b" \n"\
  230. "movdqa 0x00(%1), "#a" \n"\
  231. "movdqa 0x40(%1), "#f" \n"\
  232. SUMSUB_BA(f, a)\
  233. SUMSUB_BA(g, f)\
  234. SUMSUB_BA(c, a)\
  235. SUMSUB_BA(e, g)\
  236. SUMSUB_BA(b, c)\
  237. SUMSUB_BA(h, a)\
  238. SUMSUB_BA(d, f)
  239. static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
  240. {
  241. __asm__ volatile(
  242. "movdqa 0x10(%1), %%xmm1 \n"
  243. "movdqa 0x20(%1), %%xmm2 \n"
  244. "movdqa 0x30(%1), %%xmm3 \n"
  245. "movdqa 0x50(%1), %%xmm5 \n"
  246. "movdqa 0x60(%1), %%xmm6 \n"
  247. "movdqa 0x70(%1), %%xmm7 \n"
  248. H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
  249. TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
  250. "paddw %4, %%xmm4 \n"
  251. "movdqa %%xmm4, 0x00(%1) \n"
  252. "movdqa %%xmm2, 0x40(%1) \n"
  253. H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
  254. "movdqa %%xmm6, 0x60(%1) \n"
  255. "movdqa %%xmm7, 0x70(%1) \n"
  256. "pxor %%xmm7, %%xmm7 \n"
  257. STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
  258. STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
  259. STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
  260. STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
  261. "lea (%0,%2,4), %0 \n"
  262. STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
  263. STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
  264. "movdqa 0x60(%1), %%xmm0 \n"
  265. "movdqa 0x70(%1), %%xmm1 \n"
  266. STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
  267. STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
  268. :"+r"(dst)
  269. :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32)
  270. );
  271. }
  272. static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  273. {
  274. int dc = (block[0] + 32) >> 6;
  275. __asm__ volatile(
  276. "movd %0, %%mm0 \n\t"
  277. "pshufw $0, %%mm0, %%mm0 \n\t"
  278. "pxor %%mm1, %%mm1 \n\t"
  279. "psubw %%mm0, %%mm1 \n\t"
  280. "packuswb %%mm0, %%mm0 \n\t"
  281. "packuswb %%mm1, %%mm1 \n\t"
  282. ::"r"(dc)
  283. );
  284. __asm__ volatile(
  285. "movd %0, %%mm2 \n\t"
  286. "movd %1, %%mm3 \n\t"
  287. "movd %2, %%mm4 \n\t"
  288. "movd %3, %%mm5 \n\t"
  289. "paddusb %%mm0, %%mm2 \n\t"
  290. "paddusb %%mm0, %%mm3 \n\t"
  291. "paddusb %%mm0, %%mm4 \n\t"
  292. "paddusb %%mm0, %%mm5 \n\t"
  293. "psubusb %%mm1, %%mm2 \n\t"
  294. "psubusb %%mm1, %%mm3 \n\t"
  295. "psubusb %%mm1, %%mm4 \n\t"
  296. "psubusb %%mm1, %%mm5 \n\t"
  297. "movd %%mm2, %0 \n\t"
  298. "movd %%mm3, %1 \n\t"
  299. "movd %%mm4, %2 \n\t"
  300. "movd %%mm5, %3 \n\t"
  301. :"+m"(*(uint32_t*)(dst+0*stride)),
  302. "+m"(*(uint32_t*)(dst+1*stride)),
  303. "+m"(*(uint32_t*)(dst+2*stride)),
  304. "+m"(*(uint32_t*)(dst+3*stride))
  305. );
  306. }
  307. static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
  308. {
  309. int dc = (block[0] + 32) >> 6;
  310. int y;
  311. __asm__ volatile(
  312. "movd %0, %%mm0 \n\t"
  313. "pshufw $0, %%mm0, %%mm0 \n\t"
  314. "pxor %%mm1, %%mm1 \n\t"
  315. "psubw %%mm0, %%mm1 \n\t"
  316. "packuswb %%mm0, %%mm0 \n\t"
  317. "packuswb %%mm1, %%mm1 \n\t"
  318. ::"r"(dc)
  319. );
  320. for(y=2; y--; dst += 4*stride){
  321. __asm__ volatile(
  322. "movq %0, %%mm2 \n\t"
  323. "movq %1, %%mm3 \n\t"
  324. "movq %2, %%mm4 \n\t"
  325. "movq %3, %%mm5 \n\t"
  326. "paddusb %%mm0, %%mm2 \n\t"
  327. "paddusb %%mm0, %%mm3 \n\t"
  328. "paddusb %%mm0, %%mm4 \n\t"
  329. "paddusb %%mm0, %%mm5 \n\t"
  330. "psubusb %%mm1, %%mm2 \n\t"
  331. "psubusb %%mm1, %%mm3 \n\t"
  332. "psubusb %%mm1, %%mm4 \n\t"
  333. "psubusb %%mm1, %%mm5 \n\t"
  334. "movq %%mm2, %0 \n\t"
  335. "movq %%mm3, %1 \n\t"
  336. "movq %%mm4, %2 \n\t"
  337. "movq %%mm5, %3 \n\t"
  338. :"+m"(*(uint64_t*)(dst+0*stride)),
  339. "+m"(*(uint64_t*)(dst+1*stride)),
  340. "+m"(*(uint64_t*)(dst+2*stride)),
  341. "+m"(*(uint64_t*)(dst+3*stride))
  342. );
  343. }
  344. }
  345. //FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
  346. static const uint8_t scan8[16 + 2*4]={
  347. 4+1*8, 5+1*8, 4+2*8, 5+2*8,
  348. 6+1*8, 7+1*8, 6+2*8, 7+2*8,
  349. 4+3*8, 5+3*8, 4+4*8, 5+4*8,
  350. 6+3*8, 7+3*8, 6+4*8, 7+4*8,
  351. 1+1*8, 2+1*8,
  352. 1+2*8, 2+2*8,
  353. 1+4*8, 2+4*8,
  354. 1+5*8, 2+5*8,
  355. };
  356. static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  357. int i;
  358. for(i=0; i<16; i++){
  359. if(nnzc[ scan8[i] ])
  360. ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
  361. }
  362. }
  363. static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  364. int i;
  365. for(i=0; i<16; i+=4){
  366. if(nnzc[ scan8[i] ])
  367. ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride);
  368. }
  369. }
  370. static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  371. int i;
  372. for(i=0; i<16; i++){
  373. int nnz = nnzc[ scan8[i] ];
  374. if(nnz){
  375. if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
  376. else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
  377. }
  378. }
  379. }
  380. static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  381. int i;
  382. for(i=0; i<16; i++){
  383. if(nnzc[ scan8[i] ] || block[i*16])
  384. ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
  385. }
  386. }
  387. static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  388. int i;
  389. for(i=0; i<16; i++){
  390. if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
  391. else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
  392. }
  393. }
  394. static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  395. int i;
  396. for(i=0; i<16; i+=4){
  397. int nnz = nnzc[ scan8[i] ];
  398. if(nnz){
  399. if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
  400. else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride);
  401. }
  402. }
  403. }
  404. static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  405. int i;
  406. for(i=0; i<16; i+=4){
  407. int nnz = nnzc[ scan8[i] ];
  408. if(nnz){
  409. if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
  410. else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride);
  411. }
  412. }
  413. }
  414. static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  415. int i;
  416. for(i=16; i<16+8; i++){
  417. if(nnzc[ scan8[i] ] || block[i*16])
  418. ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
  419. }
  420. }
  421. static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  422. int i;
  423. for(i=16; i<16+8; i++){
  424. if(nnzc[ scan8[i] ])
  425. ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
  426. else if(block[i*16])
  427. ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
  428. }
  429. }
  430. #if CONFIG_GPL && HAVE_YASM
  431. static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride)
  432. {
  433. __asm__ volatile(
  434. "movd %0, %%mm0 \n\t" // 0 0 X D
  435. "punpcklwd %1, %%mm0 \n\t" // x X d D
  436. "paddsw %2, %%mm0 \n\t"
  437. "psraw $6, %%mm0 \n\t"
  438. "punpcklwd %%mm0, %%mm0 \n\t" // d d D D
  439. "pxor %%mm1, %%mm1 \n\t" // 0 0 0 0
  440. "psubw %%mm0, %%mm1 \n\t" // -d-d-D-D
  441. "packuswb %%mm1, %%mm0 \n\t" // -d-d-D-D d d D D
  442. "pshufw $0xFA, %%mm0, %%mm1 \n\t" // -d-d-d-d-D-D-D-D
  443. "punpcklwd %%mm0, %%mm0 \n\t" // d d d d D D D D
  444. ::"m"(block[ 0]),
  445. "m"(block[16]),
  446. "m"(ff_pw_32)
  447. );
  448. __asm__ volatile(
  449. "movq %0, %%mm2 \n\t"
  450. "movq %1, %%mm3 \n\t"
  451. "movq %2, %%mm4 \n\t"
  452. "movq %3, %%mm5 \n\t"
  453. "paddusb %%mm0, %%mm2 \n\t"
  454. "paddusb %%mm0, %%mm3 \n\t"
  455. "paddusb %%mm0, %%mm4 \n\t"
  456. "paddusb %%mm0, %%mm5 \n\t"
  457. "psubusb %%mm1, %%mm2 \n\t"
  458. "psubusb %%mm1, %%mm3 \n\t"
  459. "psubusb %%mm1, %%mm4 \n\t"
  460. "psubusb %%mm1, %%mm5 \n\t"
  461. "movq %%mm2, %0 \n\t"
  462. "movq %%mm3, %1 \n\t"
  463. "movq %%mm4, %2 \n\t"
  464. "movq %%mm5, %3 \n\t"
  465. :"+m"(*(uint64_t*)(dst+0*stride)),
  466. "+m"(*(uint64_t*)(dst+1*stride)),
  467. "+m"(*(uint64_t*)(dst+2*stride)),
  468. "+m"(*(uint64_t*)(dst+3*stride))
  469. );
  470. }
  471. extern void ff_x264_add8x4_idct_sse2(uint8_t *dst, int16_t *block, int stride);
  472. static void ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  473. int i;
  474. for(i=0; i<16; i+=2)
  475. if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
  476. ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride);
  477. }
  478. static void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  479. int i;
  480. for(i=0; i<16; i+=2){
  481. if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
  482. ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride);
  483. else if(block[i*16]|block[i*16+16])
  484. ff_h264_idct_dc_add8_mmx2(dst + block_offset[i], block + i*16, stride);
  485. }
  486. }
  487. static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
  488. int i;
  489. for(i=16; i<16+8; i+=2){
  490. if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ])
  491. ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
  492. else if(block[i*16]|block[i*16+16])
  493. ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
  494. }
  495. }
  496. #endif
  497. /***********************************/
  498. /* deblocking */
  499. // out: o = |x-y|>a
  500. // clobbers: t
  501. #define DIFF_GT_MMX(x,y,a,o,t)\
  502. "movq "#y", "#t" \n\t"\
  503. "movq "#x", "#o" \n\t"\
  504. "psubusb "#x", "#t" \n\t"\
  505. "psubusb "#y", "#o" \n\t"\
  506. "por "#t", "#o" \n\t"\
  507. "psubusb "#a", "#o" \n\t"
  508. // out: o = |x-y|>a
  509. // clobbers: t
  510. #define DIFF_GT2_MMX(x,y,a,o,t)\
  511. "movq "#y", "#t" \n\t"\
  512. "movq "#x", "#o" \n\t"\
  513. "psubusb "#x", "#t" \n\t"\
  514. "psubusb "#y", "#o" \n\t"\
  515. "psubusb "#a", "#t" \n\t"\
  516. "psubusb "#a", "#o" \n\t"\
  517. "pcmpeqb "#t", "#o" \n\t"\
  518. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
  519. // out: mm5=beta-1, mm7=mask
  520. // clobbers: mm4,mm6
  521. #define H264_DEBLOCK_MASK(alpha1, beta1) \
  522. "pshufw $0, "#alpha1", %%mm4 \n\t"\
  523. "pshufw $0, "#beta1 ", %%mm5 \n\t"\
  524. "packuswb %%mm4, %%mm4 \n\t"\
  525. "packuswb %%mm5, %%mm5 \n\t"\
  526. DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
  527. DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
  528. "por %%mm4, %%mm7 \n\t"\
  529. DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
  530. "por %%mm4, %%mm7 \n\t"\
  531. "pxor %%mm6, %%mm6 \n\t"\
  532. "pcmpeqb %%mm6, %%mm7 \n\t"
  533. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
  534. // out: mm1=p0' mm2=q0'
  535. // clobbers: mm0,3-6
  536. #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
  537. "movq %%mm1 , %%mm5 \n\t"\
  538. "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
  539. "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
  540. "pcmpeqb %%mm4 , %%mm4 \n\t"\
  541. "pxor %%mm4 , %%mm3 \n\t"\
  542. "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
  543. "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
  544. "pxor %%mm1 , %%mm4 \n\t"\
  545. "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
  546. "pavgb %%mm5 , %%mm3 \n\t"\
  547. "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
  548. "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
  549. "psubusb %%mm3 , %%mm6 \n\t"\
  550. "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
  551. "pminub %%mm7 , %%mm6 \n\t"\
  552. "pminub %%mm7 , %%mm3 \n\t"\
  553. "psubusb %%mm6 , %%mm1 \n\t"\
  554. "psubusb %%mm3 , %%mm2 \n\t"\
  555. "paddusb %%mm3 , %%mm1 \n\t"\
  556. "paddusb %%mm6 , %%mm2 \n\t"
  557. // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
  558. // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
  559. // clobbers: q2, tmp, tc0
  560. #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
  561. "movq %%mm1, "#tmp" \n\t"\
  562. "pavgb %%mm2, "#tmp" \n\t"\
  563. "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
  564. "pxor "q2addr", "#tmp" \n\t"\
  565. "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
  566. "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
  567. "movq "#p1", "#tmp" \n\t"\
  568. "psubusb "#tc0", "#tmp" \n\t"\
  569. "paddusb "#p1", "#tc0" \n\t"\
  570. "pmaxub "#tmp", "#q2" \n\t"\
  571. "pminub "#tc0", "#q2" \n\t"\
  572. "movq "#q2", "q1addr" \n\t"
  573. static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  574. {
  575. DECLARE_ALIGNED(8, uint64_t, tmp0)[2];
  576. __asm__ volatile(
  577. "movq (%2,%4), %%mm0 \n\t" //p1
  578. "movq (%2,%4,2), %%mm1 \n\t" //p0
  579. "movq (%3), %%mm2 \n\t" //q0
  580. "movq (%3,%4), %%mm3 \n\t" //q1
  581. H264_DEBLOCK_MASK(%7, %8)
  582. "movd %6, %%mm4 \n\t"
  583. "punpcklbw %%mm4, %%mm4 \n\t"
  584. "punpcklwd %%mm4, %%mm4 \n\t"
  585. "pcmpeqb %%mm3, %%mm3 \n\t"
  586. "movq %%mm4, %%mm6 \n\t"
  587. "pcmpgtb %%mm3, %%mm4 \n\t"
  588. "movq %%mm6, %1 \n\t"
  589. "pand %%mm4, %%mm7 \n\t"
  590. "movq %%mm7, %0 \n\t"
  591. /* filter p1 */
  592. "movq (%2), %%mm3 \n\t" //p2
  593. DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
  594. "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
  595. "pand %1, %%mm7 \n\t" // mask & tc0
  596. "movq %%mm7, %%mm4 \n\t"
  597. "psubb %%mm6, %%mm7 \n\t"
  598. "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
  599. H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4)
  600. /* filter q1 */
  601. "movq (%3,%4,2), %%mm4 \n\t" //q2
  602. DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
  603. "pand %0, %%mm6 \n\t"
  604. "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then
  605. "pand %%mm6, %%mm5 \n\t"
  606. "psubb %%mm6, %%mm7 \n\t"
  607. "movq (%3,%4), %%mm3 \n\t"
  608. H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6)
  609. /* filter p0, q0 */
  610. H264_DEBLOCK_P0_Q0(%9, unused)
  611. "movq %%mm1, (%2,%4,2) \n\t"
  612. "movq %%mm2, (%3) \n\t"
  613. : "=m"(tmp0[0]), "=m"(tmp0[1])
  614. : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
  615. "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
  616. "m"(ff_bone)
  617. );
  618. }
  619. static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  620. {
  621. if((tc0[0] & tc0[1]) >= 0)
  622. h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  623. if((tc0[2] & tc0[3]) >= 0)
  624. h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
  625. }
  626. static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  627. {
  628. //FIXME: could cut some load/stores by merging transpose with filter
  629. // also, it only needs to transpose 6x8
  630. DECLARE_ALIGNED(8, uint8_t, trans)[8*8];
  631. int i;
  632. for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
  633. if((tc0[0] & tc0[1]) < 0)
  634. continue;
  635. transpose4x4(trans, pix-4, 8, stride);
  636. transpose4x4(trans +4*8, pix, 8, stride);
  637. transpose4x4(trans+4, pix-4+4*stride, 8, stride);
  638. transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
  639. h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
  640. transpose4x4(pix-2, trans +2*8, stride, 8);
  641. transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
  642. }
  643. }
  644. static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
  645. {
  646. __asm__ volatile(
  647. "movq (%0), %%mm0 \n\t" //p1
  648. "movq (%0,%2), %%mm1 \n\t" //p0
  649. "movq (%1), %%mm2 \n\t" //q0
  650. "movq (%1,%2), %%mm3 \n\t" //q1
  651. H264_DEBLOCK_MASK(%4, %5)
  652. "movd %3, %%mm6 \n\t"
  653. "punpcklbw %%mm6, %%mm6 \n\t"
  654. "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
  655. H264_DEBLOCK_P0_Q0(%6, %7)
  656. "movq %%mm1, (%0,%2) \n\t"
  657. "movq %%mm2, (%1) \n\t"
  658. :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
  659. "r"(*(uint32_t*)tc0),
  660. "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
  661. );
  662. }
  663. static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  664. {
  665. h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
  666. }
  667. static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  668. {
  669. //FIXME: could cut some load/stores by merging transpose with filter
  670. DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
  671. transpose4x4(trans, pix-2, 8, stride);
  672. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  673. h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
  674. transpose4x4(pix-2, trans, stride, 8);
  675. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  676. }
  677. // p0 = (p0 + q1 + 2*p1 + 2) >> 2
  678. #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
  679. "movq "#p0", %%mm4 \n\t"\
  680. "pxor "#q1", %%mm4 \n\t"\
  681. "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
  682. "pavgb "#q1", "#p0" \n\t"\
  683. "psubusb %%mm4, "#p0" \n\t"\
  684. "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
  685. static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
  686. {
  687. __asm__ volatile(
  688. "movq (%0), %%mm0 \n\t"
  689. "movq (%0,%2), %%mm1 \n\t"
  690. "movq (%1), %%mm2 \n\t"
  691. "movq (%1,%2), %%mm3 \n\t"
  692. H264_DEBLOCK_MASK(%3, %4)
  693. "movq %%mm1, %%mm5 \n\t"
  694. "movq %%mm2, %%mm6 \n\t"
  695. H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
  696. H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
  697. "psubb %%mm5, %%mm1 \n\t"
  698. "psubb %%mm6, %%mm2 \n\t"
  699. "pand %%mm7, %%mm1 \n\t"
  700. "pand %%mm7, %%mm2 \n\t"
  701. "paddb %%mm5, %%mm1 \n\t"
  702. "paddb %%mm6, %%mm2 \n\t"
  703. "movq %%mm1, (%0,%2) \n\t"
  704. "movq %%mm2, (%1) \n\t"
  705. :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
  706. "m"(alpha1), "m"(beta1), "m"(ff_bone)
  707. );
  708. }
  709. static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  710. {
  711. h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
  712. }
  713. static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
  714. {
  715. //FIXME: could cut some load/stores by merging transpose with filter
  716. DECLARE_ALIGNED(8, uint8_t, trans)[8*4];
  717. transpose4x4(trans, pix-2, 8, stride);
  718. transpose4x4(trans+4, pix-2+4*stride, 8, stride);
  719. h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
  720. transpose4x4(pix-2, trans, stride, 8);
  721. transpose4x4(pix-2+4*stride, trans+4, stride, 8);
  722. }
  723. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  724. int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
  725. int dir;
  726. __asm__ volatile(
  727. "movq %0, %%mm7 \n"
  728. "movq %1, %%mm6 \n"
  729. ::"m"(ff_pb_1), "m"(ff_pb_3)
  730. );
  731. if(field)
  732. __asm__ volatile(
  733. "movq %0, %%mm6 \n"
  734. ::"m"(ff_pb_3_1)
  735. );
  736. __asm__ volatile(
  737. "movq %%mm6, %%mm5 \n"
  738. "paddb %%mm5, %%mm5 \n"
  739. :);
  740. // could do a special case for dir==0 && edges==1, but it only reduces the
  741. // average filter time by 1.2%
  742. for( dir=1; dir>=0; dir-- ) {
  743. const x86_reg d_idx = dir ? -8 : -1;
  744. const int mask_mv = dir ? mask_mv1 : mask_mv0;
  745. DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
  746. int b_idx, edge;
  747. for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
  748. __asm__ volatile(
  749. "pand %0, %%mm0 \n\t"
  750. ::"m"(mask_dir)
  751. );
  752. if(!(mask_mv & edge)) {
  753. if(bidir) {
  754. __asm__ volatile(
  755. "movd (%1,%0), %%mm2 \n"
  756. "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
  757. "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] }
  758. "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] }
  759. "pshufw $0x4E, %%mm2, %%mm3 \n"
  760. "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] }
  761. "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] }
  762. "1: \n"
  763. "por %%mm1, %%mm0 \n"
  764. "movq (%2,%0,4), %%mm1 \n"
  765. "movq 8(%2,%0,4), %%mm2 \n"
  766. "movq %%mm1, %%mm3 \n"
  767. "movq %%mm2, %%mm4 \n"
  768. "psubw (%2), %%mm1 \n"
  769. "psubw 8(%2), %%mm2 \n"
  770. "psubw 160(%2), %%mm3 \n"
  771. "psubw 168(%2), %%mm4 \n"
  772. "packsswb %%mm2, %%mm1 \n"
  773. "packsswb %%mm4, %%mm3 \n"
  774. "paddb %%mm6, %%mm1 \n"
  775. "paddb %%mm6, %%mm3 \n"
  776. "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
  777. "psubusb %%mm5, %%mm3 \n"
  778. "packsswb %%mm3, %%mm1 \n"
  779. "add $40, %0 \n"
  780. "cmp $40, %0 \n"
  781. "jl 1b \n"
  782. "sub $80, %0 \n"
  783. "pshufw $0x4E, %%mm1, %%mm1 \n"
  784. "por %%mm1, %%mm0 \n"
  785. "pshufw $0x4E, %%mm0, %%mm1 \n"
  786. "pminub %%mm1, %%mm0 \n"
  787. ::"r"(d_idx),
  788. "r"(ref[0]+b_idx),
  789. "r"(mv[0]+b_idx)
  790. );
  791. } else {
  792. __asm__ volatile(
  793. "movd (%1), %%mm0 \n"
  794. "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn]
  795. "movq (%2), %%mm1 \n"
  796. "movq 8(%2), %%mm2 \n"
  797. "psubw (%2,%0,4), %%mm1 \n"
  798. "psubw 8(%2,%0,4), %%mm2 \n"
  799. "packsswb %%mm2, %%mm1 \n"
  800. "paddb %%mm6, %%mm1 \n"
  801. "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
  802. "packsswb %%mm1, %%mm1 \n"
  803. "por %%mm1, %%mm0 \n"
  804. ::"r"(d_idx),
  805. "r"(ref[0]+b_idx),
  806. "r"(mv[0]+b_idx)
  807. );
  808. }
  809. }
  810. __asm__ volatile(
  811. "movd %0, %%mm1 \n"
  812. "por %1, %%mm1 \n" // nnz[b] || nnz[bn]
  813. ::"m"(nnz[b_idx]),
  814. "m"(nnz[b_idx+d_idx])
  815. );
  816. __asm__ volatile(
  817. "pminub %%mm7, %%mm1 \n"
  818. "pminub %%mm7, %%mm0 \n"
  819. "psllw $1, %%mm1 \n"
  820. "pxor %%mm2, %%mm2 \n"
  821. "pmaxub %%mm0, %%mm1 \n"
  822. "punpcklbw %%mm2, %%mm1 \n"
  823. "movq %%mm1, %0 \n"
  824. :"=m"(*bS[dir][edge])
  825. ::"memory"
  826. );
  827. }
  828. edges = 4;
  829. step = 1;
  830. }
  831. __asm__ volatile(
  832. "movq (%0), %%mm0 \n\t"
  833. "movq 8(%0), %%mm1 \n\t"
  834. "movq 16(%0), %%mm2 \n\t"
  835. "movq 24(%0), %%mm3 \n\t"
  836. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  837. "movq %%mm0, (%0) \n\t"
  838. "movq %%mm3, 8(%0) \n\t"
  839. "movq %%mm4, 16(%0) \n\t"
  840. "movq %%mm2, 24(%0) \n\t"
  841. ::"r"(bS[0])
  842. :"memory"
  843. );
  844. }
  845. /***********************************/
  846. /* motion compensation */
  847. #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
  848. "mov"#q" "#C", "#T" \n\t"\
  849. "mov"#d" (%0), "#F" \n\t"\
  850. "paddw "#D", "#T" \n\t"\
  851. "psllw $2, "#T" \n\t"\
  852. "psubw "#B", "#T" \n\t"\
  853. "psubw "#E", "#T" \n\t"\
  854. "punpcklbw "#Z", "#F" \n\t"\
  855. "pmullw %4, "#T" \n\t"\
  856. "paddw %5, "#A" \n\t"\
  857. "add %2, %0 \n\t"\
  858. "paddw "#F", "#A" \n\t"\
  859. "paddw "#A", "#T" \n\t"\
  860. "psraw $5, "#T" \n\t"\
  861. "packuswb "#T", "#T" \n\t"\
  862. OP(T, (%1), A, d)\
  863. "add %3, %1 \n\t"
  864. #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
  865. "mov"#q" "#C", "#T" \n\t"\
  866. "mov"#d" (%0), "#F" \n\t"\
  867. "paddw "#D", "#T" \n\t"\
  868. "psllw $2, "#T" \n\t"\
  869. "paddw %4, "#A" \n\t"\
  870. "psubw "#B", "#T" \n\t"\
  871. "psubw "#E", "#T" \n\t"\
  872. "punpcklbw "#Z", "#F" \n\t"\
  873. "pmullw %3, "#T" \n\t"\
  874. "paddw "#F", "#A" \n\t"\
  875. "add %2, %0 \n\t"\
  876. "paddw "#A", "#T" \n\t"\
  877. "mov"#q" "#T", "#OF"(%1) \n\t"
  878. #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
  879. #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
  880. #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
  881. #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
  882. #define QPEL_H264(OPNAME, OP, MMX)\
  883. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  884. int h=4;\
  885. \
  886. __asm__ volatile(\
  887. "pxor %%mm7, %%mm7 \n\t"\
  888. "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\
  889. "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
  890. "1: \n\t"\
  891. "movd -1(%0), %%mm1 \n\t"\
  892. "movd (%0), %%mm2 \n\t"\
  893. "movd 1(%0), %%mm3 \n\t"\
  894. "movd 2(%0), %%mm0 \n\t"\
  895. "punpcklbw %%mm7, %%mm1 \n\t"\
  896. "punpcklbw %%mm7, %%mm2 \n\t"\
  897. "punpcklbw %%mm7, %%mm3 \n\t"\
  898. "punpcklbw %%mm7, %%mm0 \n\t"\
  899. "paddw %%mm0, %%mm1 \n\t"\
  900. "paddw %%mm3, %%mm2 \n\t"\
  901. "movd -2(%0), %%mm0 \n\t"\
  902. "movd 3(%0), %%mm3 \n\t"\
  903. "punpcklbw %%mm7, %%mm0 \n\t"\
  904. "punpcklbw %%mm7, %%mm3 \n\t"\
  905. "paddw %%mm3, %%mm0 \n\t"\
  906. "psllw $2, %%mm2 \n\t"\
  907. "psubw %%mm1, %%mm2 \n\t"\
  908. "pmullw %%mm4, %%mm2 \n\t"\
  909. "paddw %%mm5, %%mm0 \n\t"\
  910. "paddw %%mm2, %%mm0 \n\t"\
  911. "psraw $5, %%mm0 \n\t"\
  912. "packuswb %%mm0, %%mm0 \n\t"\
  913. OP(%%mm0, (%1),%%mm6, d)\
  914. "add %3, %0 \n\t"\
  915. "add %4, %1 \n\t"\
  916. "decl %2 \n\t"\
  917. " jnz 1b \n\t"\
  918. : "+a"(src), "+c"(dst), "+g"(h)\
  919. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
  920. : "memory"\
  921. );\
  922. }\
  923. static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  924. int h=4;\
  925. __asm__ volatile(\
  926. "pxor %%mm7, %%mm7 \n\t"\
  927. "movq %0, %%mm4 \n\t"\
  928. "movq %1, %%mm5 \n\t"\
  929. :: "m"(ff_pw_5), "m"(ff_pw_16)\
  930. );\
  931. do{\
  932. __asm__ volatile(\
  933. "movd -1(%0), %%mm1 \n\t"\
  934. "movd (%0), %%mm2 \n\t"\
  935. "movd 1(%0), %%mm3 \n\t"\
  936. "movd 2(%0), %%mm0 \n\t"\
  937. "punpcklbw %%mm7, %%mm1 \n\t"\
  938. "punpcklbw %%mm7, %%mm2 \n\t"\
  939. "punpcklbw %%mm7, %%mm3 \n\t"\
  940. "punpcklbw %%mm7, %%mm0 \n\t"\
  941. "paddw %%mm0, %%mm1 \n\t"\
  942. "paddw %%mm3, %%mm2 \n\t"\
  943. "movd -2(%0), %%mm0 \n\t"\
  944. "movd 3(%0), %%mm3 \n\t"\
  945. "punpcklbw %%mm7, %%mm0 \n\t"\
  946. "punpcklbw %%mm7, %%mm3 \n\t"\
  947. "paddw %%mm3, %%mm0 \n\t"\
  948. "psllw $2, %%mm2 \n\t"\
  949. "psubw %%mm1, %%mm2 \n\t"\
  950. "pmullw %%mm4, %%mm2 \n\t"\
  951. "paddw %%mm5, %%mm0 \n\t"\
  952. "paddw %%mm2, %%mm0 \n\t"\
  953. "movd (%2), %%mm3 \n\t"\
  954. "psraw $5, %%mm0 \n\t"\
  955. "packuswb %%mm0, %%mm0 \n\t"\
  956. PAVGB" %%mm3, %%mm0 \n\t"\
  957. OP(%%mm0, (%1),%%mm6, d)\
  958. "add %4, %0 \n\t"\
  959. "add %4, %1 \n\t"\
  960. "add %3, %2 \n\t"\
  961. : "+a"(src), "+c"(dst), "+d"(src2)\
  962. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
  963. : "memory"\
  964. );\
  965. }while(--h);\
  966. }\
  967. static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  968. src -= 2*srcStride;\
  969. __asm__ volatile(\
  970. "pxor %%mm7, %%mm7 \n\t"\
  971. "movd (%0), %%mm0 \n\t"\
  972. "add %2, %0 \n\t"\
  973. "movd (%0), %%mm1 \n\t"\
  974. "add %2, %0 \n\t"\
  975. "movd (%0), %%mm2 \n\t"\
  976. "add %2, %0 \n\t"\
  977. "movd (%0), %%mm3 \n\t"\
  978. "add %2, %0 \n\t"\
  979. "movd (%0), %%mm4 \n\t"\
  980. "add %2, %0 \n\t"\
  981. "punpcklbw %%mm7, %%mm0 \n\t"\
  982. "punpcklbw %%mm7, %%mm1 \n\t"\
  983. "punpcklbw %%mm7, %%mm2 \n\t"\
  984. "punpcklbw %%mm7, %%mm3 \n\t"\
  985. "punpcklbw %%mm7, %%mm4 \n\t"\
  986. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  987. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  988. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  989. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  990. \
  991. : "+a"(src), "+c"(dst)\
  992. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  993. : "memory"\
  994. );\
  995. }\
  996. static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  997. int h=4;\
  998. int w=3;\
  999. src -= 2*srcStride+2;\
  1000. while(w--){\
  1001. __asm__ volatile(\
  1002. "pxor %%mm7, %%mm7 \n\t"\
  1003. "movd (%0), %%mm0 \n\t"\
  1004. "add %2, %0 \n\t"\
  1005. "movd (%0), %%mm1 \n\t"\
  1006. "add %2, %0 \n\t"\
  1007. "movd (%0), %%mm2 \n\t"\
  1008. "add %2, %0 \n\t"\
  1009. "movd (%0), %%mm3 \n\t"\
  1010. "add %2, %0 \n\t"\
  1011. "movd (%0), %%mm4 \n\t"\
  1012. "add %2, %0 \n\t"\
  1013. "punpcklbw %%mm7, %%mm0 \n\t"\
  1014. "punpcklbw %%mm7, %%mm1 \n\t"\
  1015. "punpcklbw %%mm7, %%mm2 \n\t"\
  1016. "punpcklbw %%mm7, %%mm3 \n\t"\
  1017. "punpcklbw %%mm7, %%mm4 \n\t"\
  1018. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
  1019. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
  1020. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
  1021. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
  1022. \
  1023. : "+a"(src)\
  1024. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1025. : "memory"\
  1026. );\
  1027. tmp += 4;\
  1028. src += 4 - 9*srcStride;\
  1029. }\
  1030. tmp -= 3*4;\
  1031. __asm__ volatile(\
  1032. "1: \n\t"\
  1033. "movq (%0), %%mm0 \n\t"\
  1034. "paddw 10(%0), %%mm0 \n\t"\
  1035. "movq 2(%0), %%mm1 \n\t"\
  1036. "paddw 8(%0), %%mm1 \n\t"\
  1037. "movq 4(%0), %%mm2 \n\t"\
  1038. "paddw 6(%0), %%mm2 \n\t"\
  1039. "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
  1040. "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
  1041. "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
  1042. "paddsw %%mm2, %%mm0 \n\t"\
  1043. "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
  1044. "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
  1045. "psraw $6, %%mm0 \n\t"\
  1046. "packuswb %%mm0, %%mm0 \n\t"\
  1047. OP(%%mm0, (%1),%%mm7, d)\
  1048. "add $24, %0 \n\t"\
  1049. "add %3, %1 \n\t"\
  1050. "decl %2 \n\t"\
  1051. " jnz 1b \n\t"\
  1052. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1053. : "S"((x86_reg)dstStride)\
  1054. : "memory"\
  1055. );\
  1056. }\
  1057. \
  1058. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1059. int h=8;\
  1060. __asm__ volatile(\
  1061. "pxor %%mm7, %%mm7 \n\t"\
  1062. "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
  1063. "1: \n\t"\
  1064. "movq (%0), %%mm0 \n\t"\
  1065. "movq 1(%0), %%mm2 \n\t"\
  1066. "movq %%mm0, %%mm1 \n\t"\
  1067. "movq %%mm2, %%mm3 \n\t"\
  1068. "punpcklbw %%mm7, %%mm0 \n\t"\
  1069. "punpckhbw %%mm7, %%mm1 \n\t"\
  1070. "punpcklbw %%mm7, %%mm2 \n\t"\
  1071. "punpckhbw %%mm7, %%mm3 \n\t"\
  1072. "paddw %%mm2, %%mm0 \n\t"\
  1073. "paddw %%mm3, %%mm1 \n\t"\
  1074. "psllw $2, %%mm0 \n\t"\
  1075. "psllw $2, %%mm1 \n\t"\
  1076. "movq -1(%0), %%mm2 \n\t"\
  1077. "movq 2(%0), %%mm4 \n\t"\
  1078. "movq %%mm2, %%mm3 \n\t"\
  1079. "movq %%mm4, %%mm5 \n\t"\
  1080. "punpcklbw %%mm7, %%mm2 \n\t"\
  1081. "punpckhbw %%mm7, %%mm3 \n\t"\
  1082. "punpcklbw %%mm7, %%mm4 \n\t"\
  1083. "punpckhbw %%mm7, %%mm5 \n\t"\
  1084. "paddw %%mm4, %%mm2 \n\t"\
  1085. "paddw %%mm3, %%mm5 \n\t"\
  1086. "psubw %%mm2, %%mm0 \n\t"\
  1087. "psubw %%mm5, %%mm1 \n\t"\
  1088. "pmullw %%mm6, %%mm0 \n\t"\
  1089. "pmullw %%mm6, %%mm1 \n\t"\
  1090. "movd -2(%0), %%mm2 \n\t"\
  1091. "movd 7(%0), %%mm5 \n\t"\
  1092. "punpcklbw %%mm7, %%mm2 \n\t"\
  1093. "punpcklbw %%mm7, %%mm5 \n\t"\
  1094. "paddw %%mm3, %%mm2 \n\t"\
  1095. "paddw %%mm5, %%mm4 \n\t"\
  1096. "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\
  1097. "paddw %%mm5, %%mm2 \n\t"\
  1098. "paddw %%mm5, %%mm4 \n\t"\
  1099. "paddw %%mm2, %%mm0 \n\t"\
  1100. "paddw %%mm4, %%mm1 \n\t"\
  1101. "psraw $5, %%mm0 \n\t"\
  1102. "psraw $5, %%mm1 \n\t"\
  1103. "packuswb %%mm1, %%mm0 \n\t"\
  1104. OP(%%mm0, (%1),%%mm5, q)\
  1105. "add %3, %0 \n\t"\
  1106. "add %4, %1 \n\t"\
  1107. "decl %2 \n\t"\
  1108. " jnz 1b \n\t"\
  1109. : "+a"(src), "+c"(dst), "+g"(h)\
  1110. : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
  1111. : "memory"\
  1112. );\
  1113. }\
  1114. \
  1115. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1116. int h=8;\
  1117. __asm__ volatile(\
  1118. "pxor %%mm7, %%mm7 \n\t"\
  1119. "movq %0, %%mm6 \n\t"\
  1120. :: "m"(ff_pw_5)\
  1121. );\
  1122. do{\
  1123. __asm__ volatile(\
  1124. "movq (%0), %%mm0 \n\t"\
  1125. "movq 1(%0), %%mm2 \n\t"\
  1126. "movq %%mm0, %%mm1 \n\t"\
  1127. "movq %%mm2, %%mm3 \n\t"\
  1128. "punpcklbw %%mm7, %%mm0 \n\t"\
  1129. "punpckhbw %%mm7, %%mm1 \n\t"\
  1130. "punpcklbw %%mm7, %%mm2 \n\t"\
  1131. "punpckhbw %%mm7, %%mm3 \n\t"\
  1132. "paddw %%mm2, %%mm0 \n\t"\
  1133. "paddw %%mm3, %%mm1 \n\t"\
  1134. "psllw $2, %%mm0 \n\t"\
  1135. "psllw $2, %%mm1 \n\t"\
  1136. "movq -1(%0), %%mm2 \n\t"\
  1137. "movq 2(%0), %%mm4 \n\t"\
  1138. "movq %%mm2, %%mm3 \n\t"\
  1139. "movq %%mm4, %%mm5 \n\t"\
  1140. "punpcklbw %%mm7, %%mm2 \n\t"\
  1141. "punpckhbw %%mm7, %%mm3 \n\t"\
  1142. "punpcklbw %%mm7, %%mm4 \n\t"\
  1143. "punpckhbw %%mm7, %%mm5 \n\t"\
  1144. "paddw %%mm4, %%mm2 \n\t"\
  1145. "paddw %%mm3, %%mm5 \n\t"\
  1146. "psubw %%mm2, %%mm0 \n\t"\
  1147. "psubw %%mm5, %%mm1 \n\t"\
  1148. "pmullw %%mm6, %%mm0 \n\t"\
  1149. "pmullw %%mm6, %%mm1 \n\t"\
  1150. "movd -2(%0), %%mm2 \n\t"\
  1151. "movd 7(%0), %%mm5 \n\t"\
  1152. "punpcklbw %%mm7, %%mm2 \n\t"\
  1153. "punpcklbw %%mm7, %%mm5 \n\t"\
  1154. "paddw %%mm3, %%mm2 \n\t"\
  1155. "paddw %%mm5, %%mm4 \n\t"\
  1156. "movq %5, %%mm5 \n\t"\
  1157. "paddw %%mm5, %%mm2 \n\t"\
  1158. "paddw %%mm5, %%mm4 \n\t"\
  1159. "paddw %%mm2, %%mm0 \n\t"\
  1160. "paddw %%mm4, %%mm1 \n\t"\
  1161. "psraw $5, %%mm0 \n\t"\
  1162. "psraw $5, %%mm1 \n\t"\
  1163. "movq (%2), %%mm4 \n\t"\
  1164. "packuswb %%mm1, %%mm0 \n\t"\
  1165. PAVGB" %%mm4, %%mm0 \n\t"\
  1166. OP(%%mm0, (%1),%%mm5, q)\
  1167. "add %4, %0 \n\t"\
  1168. "add %4, %1 \n\t"\
  1169. "add %3, %2 \n\t"\
  1170. : "+a"(src), "+c"(dst), "+d"(src2)\
  1171. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  1172. "m"(ff_pw_16)\
  1173. : "memory"\
  1174. );\
  1175. }while(--h);\
  1176. }\
  1177. \
  1178. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1179. int w= 2;\
  1180. src -= 2*srcStride;\
  1181. \
  1182. while(w--){\
  1183. __asm__ volatile(\
  1184. "pxor %%mm7, %%mm7 \n\t"\
  1185. "movd (%0), %%mm0 \n\t"\
  1186. "add %2, %0 \n\t"\
  1187. "movd (%0), %%mm1 \n\t"\
  1188. "add %2, %0 \n\t"\
  1189. "movd (%0), %%mm2 \n\t"\
  1190. "add %2, %0 \n\t"\
  1191. "movd (%0), %%mm3 \n\t"\
  1192. "add %2, %0 \n\t"\
  1193. "movd (%0), %%mm4 \n\t"\
  1194. "add %2, %0 \n\t"\
  1195. "punpcklbw %%mm7, %%mm0 \n\t"\
  1196. "punpcklbw %%mm7, %%mm1 \n\t"\
  1197. "punpcklbw %%mm7, %%mm2 \n\t"\
  1198. "punpcklbw %%mm7, %%mm3 \n\t"\
  1199. "punpcklbw %%mm7, %%mm4 \n\t"\
  1200. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1201. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1202. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1203. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1204. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1205. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1206. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1207. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1208. \
  1209. : "+a"(src), "+c"(dst)\
  1210. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1211. : "memory"\
  1212. );\
  1213. if(h==16){\
  1214. __asm__ volatile(\
  1215. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1216. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1217. QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
  1218. QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
  1219. QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
  1220. QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
  1221. QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
  1222. QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
  1223. \
  1224. : "+a"(src), "+c"(dst)\
  1225. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1226. : "memory"\
  1227. );\
  1228. }\
  1229. src += 4-(h+5)*srcStride;\
  1230. dst += 4-h*dstStride;\
  1231. }\
  1232. }\
  1233. static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
  1234. int w = (size+8)>>2;\
  1235. src -= 2*srcStride+2;\
  1236. while(w--){\
  1237. __asm__ volatile(\
  1238. "pxor %%mm7, %%mm7 \n\t"\
  1239. "movd (%0), %%mm0 \n\t"\
  1240. "add %2, %0 \n\t"\
  1241. "movd (%0), %%mm1 \n\t"\
  1242. "add %2, %0 \n\t"\
  1243. "movd (%0), %%mm2 \n\t"\
  1244. "add %2, %0 \n\t"\
  1245. "movd (%0), %%mm3 \n\t"\
  1246. "add %2, %0 \n\t"\
  1247. "movd (%0), %%mm4 \n\t"\
  1248. "add %2, %0 \n\t"\
  1249. "punpcklbw %%mm7, %%mm0 \n\t"\
  1250. "punpcklbw %%mm7, %%mm1 \n\t"\
  1251. "punpcklbw %%mm7, %%mm2 \n\t"\
  1252. "punpcklbw %%mm7, %%mm3 \n\t"\
  1253. "punpcklbw %%mm7, %%mm4 \n\t"\
  1254. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
  1255. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
  1256. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
  1257. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
  1258. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
  1259. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
  1260. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
  1261. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
  1262. : "+a"(src)\
  1263. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1264. : "memory"\
  1265. );\
  1266. if(size==16){\
  1267. __asm__ volatile(\
  1268. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
  1269. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
  1270. QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
  1271. QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
  1272. QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
  1273. QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
  1274. QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
  1275. QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
  1276. : "+a"(src)\
  1277. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1278. : "memory"\
  1279. );\
  1280. }\
  1281. tmp += 4;\
  1282. src += 4 - (size+5)*srcStride;\
  1283. }\
  1284. }\
  1285. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1286. int w = size>>4;\
  1287. do{\
  1288. int h = size;\
  1289. __asm__ volatile(\
  1290. "1: \n\t"\
  1291. "movq (%0), %%mm0 \n\t"\
  1292. "movq 8(%0), %%mm3 \n\t"\
  1293. "movq 2(%0), %%mm1 \n\t"\
  1294. "movq 10(%0), %%mm4 \n\t"\
  1295. "paddw %%mm4, %%mm0 \n\t"\
  1296. "paddw %%mm3, %%mm1 \n\t"\
  1297. "paddw 18(%0), %%mm3 \n\t"\
  1298. "paddw 16(%0), %%mm4 \n\t"\
  1299. "movq 4(%0), %%mm2 \n\t"\
  1300. "movq 12(%0), %%mm5 \n\t"\
  1301. "paddw 6(%0), %%mm2 \n\t"\
  1302. "paddw 14(%0), %%mm5 \n\t"\
  1303. "psubw %%mm1, %%mm0 \n\t"\
  1304. "psubw %%mm4, %%mm3 \n\t"\
  1305. "psraw $2, %%mm0 \n\t"\
  1306. "psraw $2, %%mm3 \n\t"\
  1307. "psubw %%mm1, %%mm0 \n\t"\
  1308. "psubw %%mm4, %%mm3 \n\t"\
  1309. "paddsw %%mm2, %%mm0 \n\t"\
  1310. "paddsw %%mm5, %%mm3 \n\t"\
  1311. "psraw $2, %%mm0 \n\t"\
  1312. "psraw $2, %%mm3 \n\t"\
  1313. "paddw %%mm2, %%mm0 \n\t"\
  1314. "paddw %%mm5, %%mm3 \n\t"\
  1315. "psraw $6, %%mm0 \n\t"\
  1316. "psraw $6, %%mm3 \n\t"\
  1317. "packuswb %%mm3, %%mm0 \n\t"\
  1318. OP(%%mm0, (%1),%%mm7, q)\
  1319. "add $48, %0 \n\t"\
  1320. "add %3, %1 \n\t"\
  1321. "decl %2 \n\t"\
  1322. " jnz 1b \n\t"\
  1323. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1324. : "S"((x86_reg)dstStride)\
  1325. : "memory"\
  1326. );\
  1327. tmp += 8 - size*24;\
  1328. dst += 8 - size*dstStride;\
  1329. }while(w--);\
  1330. }\
  1331. \
  1332. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1333. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1334. }\
  1335. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1336. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1337. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1338. }\
  1339. \
  1340. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1341. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1342. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1343. src += 8*srcStride;\
  1344. dst += 8*dstStride;\
  1345. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1346. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1347. }\
  1348. \
  1349. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1350. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1351. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1352. src += 8*dstStride;\
  1353. dst += 8*dstStride;\
  1354. src2 += 8*src2Stride;\
  1355. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1356. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1357. }\
  1358. \
  1359. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1360. put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
  1361. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1362. }\
  1363. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1364. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
  1365. }\
  1366. \
  1367. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1368. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
  1369. }\
  1370. \
  1371. static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1372. {\
  1373. __asm__ volatile(\
  1374. "movq (%1), %%mm0 \n\t"\
  1375. "movq 24(%1), %%mm1 \n\t"\
  1376. "psraw $5, %%mm0 \n\t"\
  1377. "psraw $5, %%mm1 \n\t"\
  1378. "packuswb %%mm0, %%mm0 \n\t"\
  1379. "packuswb %%mm1, %%mm1 \n\t"\
  1380. PAVGB" (%0), %%mm0 \n\t"\
  1381. PAVGB" (%0,%3), %%mm1 \n\t"\
  1382. OP(%%mm0, (%2), %%mm4, d)\
  1383. OP(%%mm1, (%2,%4), %%mm5, d)\
  1384. "lea (%0,%3,2), %0 \n\t"\
  1385. "lea (%2,%4,2), %2 \n\t"\
  1386. "movq 48(%1), %%mm0 \n\t"\
  1387. "movq 72(%1), %%mm1 \n\t"\
  1388. "psraw $5, %%mm0 \n\t"\
  1389. "psraw $5, %%mm1 \n\t"\
  1390. "packuswb %%mm0, %%mm0 \n\t"\
  1391. "packuswb %%mm1, %%mm1 \n\t"\
  1392. PAVGB" (%0), %%mm0 \n\t"\
  1393. PAVGB" (%0,%3), %%mm1 \n\t"\
  1394. OP(%%mm0, (%2), %%mm4, d)\
  1395. OP(%%mm1, (%2,%4), %%mm5, d)\
  1396. :"+a"(src8), "+c"(src16), "+d"(dst)\
  1397. :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
  1398. :"memory");\
  1399. }\
  1400. static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1401. {\
  1402. do{\
  1403. __asm__ volatile(\
  1404. "movq (%1), %%mm0 \n\t"\
  1405. "movq 8(%1), %%mm1 \n\t"\
  1406. "movq 48(%1), %%mm2 \n\t"\
  1407. "movq 8+48(%1), %%mm3 \n\t"\
  1408. "psraw $5, %%mm0 \n\t"\
  1409. "psraw $5, %%mm1 \n\t"\
  1410. "psraw $5, %%mm2 \n\t"\
  1411. "psraw $5, %%mm3 \n\t"\
  1412. "packuswb %%mm1, %%mm0 \n\t"\
  1413. "packuswb %%mm3, %%mm2 \n\t"\
  1414. PAVGB" (%0), %%mm0 \n\t"\
  1415. PAVGB" (%0,%3), %%mm2 \n\t"\
  1416. OP(%%mm0, (%2), %%mm5, q)\
  1417. OP(%%mm2, (%2,%4), %%mm5, q)\
  1418. ::"a"(src8), "c"(src16), "d"(dst),\
  1419. "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
  1420. :"memory");\
  1421. src8 += 2L*src8Stride;\
  1422. src16 += 48;\
  1423. dst += 2L*dstStride;\
  1424. }while(h-=2);\
  1425. }\
  1426. static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
  1427. {\
  1428. OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
  1429. OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
  1430. }\
  1431. #if ARCH_X86_64
  1432. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1433. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1434. int h=16;\
  1435. __asm__ volatile(\
  1436. "pxor %%xmm15, %%xmm15 \n\t"\
  1437. "movdqa %6, %%xmm14 \n\t"\
  1438. "movdqa %7, %%xmm13 \n\t"\
  1439. "1: \n\t"\
  1440. "lddqu 6(%0), %%xmm1 \n\t"\
  1441. "lddqu -2(%0), %%xmm7 \n\t"\
  1442. "movdqa %%xmm1, %%xmm0 \n\t"\
  1443. "punpckhbw %%xmm15, %%xmm1 \n\t"\
  1444. "punpcklbw %%xmm15, %%xmm0 \n\t"\
  1445. "punpcklbw %%xmm15, %%xmm7 \n\t"\
  1446. "movdqa %%xmm1, %%xmm2 \n\t"\
  1447. "movdqa %%xmm0, %%xmm6 \n\t"\
  1448. "movdqa %%xmm1, %%xmm3 \n\t"\
  1449. "movdqa %%xmm0, %%xmm8 \n\t"\
  1450. "movdqa %%xmm1, %%xmm4 \n\t"\
  1451. "movdqa %%xmm0, %%xmm9 \n\t"\
  1452. "movdqa %%xmm0, %%xmm12 \n\t"\
  1453. "movdqa %%xmm1, %%xmm11 \n\t"\
  1454. "palignr $10,%%xmm0, %%xmm11\n\t"\
  1455. "palignr $10,%%xmm7, %%xmm12\n\t"\
  1456. "palignr $2, %%xmm0, %%xmm4 \n\t"\
  1457. "palignr $2, %%xmm7, %%xmm9 \n\t"\
  1458. "palignr $4, %%xmm0, %%xmm3 \n\t"\
  1459. "palignr $4, %%xmm7, %%xmm8 \n\t"\
  1460. "palignr $6, %%xmm0, %%xmm2 \n\t"\
  1461. "palignr $6, %%xmm7, %%xmm6 \n\t"\
  1462. "paddw %%xmm0 ,%%xmm11 \n\t"\
  1463. "palignr $8, %%xmm0, %%xmm1 \n\t"\
  1464. "palignr $8, %%xmm7, %%xmm0 \n\t"\
  1465. "paddw %%xmm12,%%xmm7 \n\t"\
  1466. "paddw %%xmm3, %%xmm2 \n\t"\
  1467. "paddw %%xmm8, %%xmm6 \n\t"\
  1468. "paddw %%xmm4, %%xmm1 \n\t"\
  1469. "paddw %%xmm9, %%xmm0 \n\t"\
  1470. "psllw $2, %%xmm2 \n\t"\
  1471. "psllw $2, %%xmm6 \n\t"\
  1472. "psubw %%xmm1, %%xmm2 \n\t"\
  1473. "psubw %%xmm0, %%xmm6 \n\t"\
  1474. "paddw %%xmm13,%%xmm11 \n\t"\
  1475. "paddw %%xmm13,%%xmm7 \n\t"\
  1476. "pmullw %%xmm14,%%xmm2 \n\t"\
  1477. "pmullw %%xmm14,%%xmm6 \n\t"\
  1478. "lddqu (%2), %%xmm3 \n\t"\
  1479. "paddw %%xmm11,%%xmm2 \n\t"\
  1480. "paddw %%xmm7, %%xmm6 \n\t"\
  1481. "psraw $5, %%xmm2 \n\t"\
  1482. "psraw $5, %%xmm6 \n\t"\
  1483. "packuswb %%xmm2,%%xmm6 \n\t"\
  1484. "pavgb %%xmm3, %%xmm6 \n\t"\
  1485. OP(%%xmm6, (%1), %%xmm4, dqa)\
  1486. "add %5, %0 \n\t"\
  1487. "add %5, %1 \n\t"\
  1488. "add %4, %2 \n\t"\
  1489. "decl %3 \n\t"\
  1490. "jg 1b \n\t"\
  1491. : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
  1492. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  1493. "m"(ff_pw_5), "m"(ff_pw_16)\
  1494. : "memory"\
  1495. );\
  1496. }
  1497. #else // ARCH_X86_64
  1498. #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1499. static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1500. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1501. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1502. src += 8*dstStride;\
  1503. dst += 8*dstStride;\
  1504. src2 += 8*src2Stride;\
  1505. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
  1506. OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
  1507. }
  1508. #endif // ARCH_X86_64
  1509. #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
  1510. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
  1511. int h=8;\
  1512. __asm__ volatile(\
  1513. "pxor %%xmm7, %%xmm7 \n\t"\
  1514. "movdqa %0, %%xmm6 \n\t"\
  1515. :: "m"(ff_pw_5)\
  1516. );\
  1517. do{\
  1518. __asm__ volatile(\
  1519. "lddqu -2(%0), %%xmm1 \n\t"\
  1520. "movdqa %%xmm1, %%xmm0 \n\t"\
  1521. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1522. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1523. "movdqa %%xmm1, %%xmm2 \n\t"\
  1524. "movdqa %%xmm1, %%xmm3 \n\t"\
  1525. "movdqa %%xmm1, %%xmm4 \n\t"\
  1526. "movdqa %%xmm1, %%xmm5 \n\t"\
  1527. "palignr $2, %%xmm0, %%xmm4 \n\t"\
  1528. "palignr $4, %%xmm0, %%xmm3 \n\t"\
  1529. "palignr $6, %%xmm0, %%xmm2 \n\t"\
  1530. "palignr $8, %%xmm0, %%xmm1 \n\t"\
  1531. "palignr $10,%%xmm0, %%xmm5 \n\t"\
  1532. "paddw %%xmm5, %%xmm0 \n\t"\
  1533. "paddw %%xmm3, %%xmm2 \n\t"\
  1534. "paddw %%xmm4, %%xmm1 \n\t"\
  1535. "psllw $2, %%xmm2 \n\t"\
  1536. "movq (%2), %%xmm3 \n\t"\
  1537. "psubw %%xmm1, %%xmm2 \n\t"\
  1538. "paddw %5, %%xmm0 \n\t"\
  1539. "pmullw %%xmm6, %%xmm2 \n\t"\
  1540. "paddw %%xmm0, %%xmm2 \n\t"\
  1541. "psraw $5, %%xmm2 \n\t"\
  1542. "packuswb %%xmm2, %%xmm2 \n\t"\
  1543. "pavgb %%xmm3, %%xmm2 \n\t"\
  1544. OP(%%xmm2, (%1), %%xmm4, q)\
  1545. "add %4, %0 \n\t"\
  1546. "add %4, %1 \n\t"\
  1547. "add %3, %2 \n\t"\
  1548. : "+a"(src), "+c"(dst), "+d"(src2)\
  1549. : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
  1550. "m"(ff_pw_16)\
  1551. : "memory"\
  1552. );\
  1553. }while(--h);\
  1554. }\
  1555. QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
  1556. \
  1557. static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1558. int h=8;\
  1559. __asm__ volatile(\
  1560. "pxor %%xmm7, %%xmm7 \n\t"\
  1561. "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\
  1562. "1: \n\t"\
  1563. "lddqu -2(%0), %%xmm1 \n\t"\
  1564. "movdqa %%xmm1, %%xmm0 \n\t"\
  1565. "punpckhbw %%xmm7, %%xmm1 \n\t"\
  1566. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1567. "movdqa %%xmm1, %%xmm2 \n\t"\
  1568. "movdqa %%xmm1, %%xmm3 \n\t"\
  1569. "movdqa %%xmm1, %%xmm4 \n\t"\
  1570. "movdqa %%xmm1, %%xmm5 \n\t"\
  1571. "palignr $2, %%xmm0, %%xmm4 \n\t"\
  1572. "palignr $4, %%xmm0, %%xmm3 \n\t"\
  1573. "palignr $6, %%xmm0, %%xmm2 \n\t"\
  1574. "palignr $8, %%xmm0, %%xmm1 \n\t"\
  1575. "palignr $10,%%xmm0, %%xmm5 \n\t"\
  1576. "paddw %%xmm5, %%xmm0 \n\t"\
  1577. "paddw %%xmm3, %%xmm2 \n\t"\
  1578. "paddw %%xmm4, %%xmm1 \n\t"\
  1579. "psllw $2, %%xmm2 \n\t"\
  1580. "psubw %%xmm1, %%xmm2 \n\t"\
  1581. "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\
  1582. "pmullw %%xmm6, %%xmm2 \n\t"\
  1583. "paddw %%xmm0, %%xmm2 \n\t"\
  1584. "psraw $5, %%xmm2 \n\t"\
  1585. "packuswb %%xmm2, %%xmm2 \n\t"\
  1586. OP(%%xmm2, (%1), %%xmm4, q)\
  1587. "add %3, %0 \n\t"\
  1588. "add %4, %1 \n\t"\
  1589. "decl %2 \n\t"\
  1590. " jnz 1b \n\t"\
  1591. : "+a"(src), "+c"(dst), "+g"(h)\
  1592. : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
  1593. : "memory"\
  1594. );\
  1595. }\
  1596. static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1597. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1598. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1599. src += 8*srcStride;\
  1600. dst += 8*dstStride;\
  1601. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
  1602. OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
  1603. }\
  1604. #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
  1605. static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
  1606. src -= 2*srcStride;\
  1607. \
  1608. __asm__ volatile(\
  1609. "pxor %%xmm7, %%xmm7 \n\t"\
  1610. "movq (%0), %%xmm0 \n\t"\
  1611. "add %2, %0 \n\t"\
  1612. "movq (%0), %%xmm1 \n\t"\
  1613. "add %2, %0 \n\t"\
  1614. "movq (%0), %%xmm2 \n\t"\
  1615. "add %2, %0 \n\t"\
  1616. "movq (%0), %%xmm3 \n\t"\
  1617. "add %2, %0 \n\t"\
  1618. "movq (%0), %%xmm4 \n\t"\
  1619. "add %2, %0 \n\t"\
  1620. "punpcklbw %%xmm7, %%xmm0 \n\t"\
  1621. "punpcklbw %%xmm7, %%xmm1 \n\t"\
  1622. "punpcklbw %%xmm7, %%xmm2 \n\t"\
  1623. "punpcklbw %%xmm7, %%xmm3 \n\t"\
  1624. "punpcklbw %%xmm7, %%xmm4 \n\t"\
  1625. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1626. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1627. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1628. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1629. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1630. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1631. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1632. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1633. \
  1634. : "+a"(src), "+c"(dst)\
  1635. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1636. : "memory"\
  1637. );\
  1638. if(h==16){\
  1639. __asm__ volatile(\
  1640. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1641. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1642. QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
  1643. QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
  1644. QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
  1645. QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
  1646. QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
  1647. QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
  1648. \
  1649. : "+a"(src), "+c"(dst)\
  1650. : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
  1651. : "memory"\
  1652. );\
  1653. }\
  1654. }\
  1655. static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1656. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
  1657. }\
  1658. static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
  1659. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
  1660. OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
  1661. }
  1662. static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
  1663. int w = (size+8)>>3;
  1664. src -= 2*srcStride+2;
  1665. while(w--){
  1666. __asm__ volatile(
  1667. "pxor %%xmm7, %%xmm7 \n\t"
  1668. "movq (%0), %%xmm0 \n\t"
  1669. "add %2, %0 \n\t"
  1670. "movq (%0), %%xmm1 \n\t"
  1671. "add %2, %0 \n\t"
  1672. "movq (%0), %%xmm2 \n\t"
  1673. "add %2, %0 \n\t"
  1674. "movq (%0), %%xmm3 \n\t"
  1675. "add %2, %0 \n\t"
  1676. "movq (%0), %%xmm4 \n\t"
  1677. "add %2, %0 \n\t"
  1678. "punpcklbw %%xmm7, %%xmm0 \n\t"
  1679. "punpcklbw %%xmm7, %%xmm1 \n\t"
  1680. "punpcklbw %%xmm7, %%xmm2 \n\t"
  1681. "punpcklbw %%xmm7, %%xmm3 \n\t"
  1682. "punpcklbw %%xmm7, %%xmm4 \n\t"
  1683. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
  1684. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
  1685. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
  1686. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
  1687. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
  1688. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
  1689. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
  1690. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
  1691. : "+a"(src)
  1692. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1693. : "memory"
  1694. );
  1695. if(size==16){
  1696. __asm__ volatile(
  1697. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
  1698. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
  1699. QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
  1700. QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
  1701. QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
  1702. QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
  1703. QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
  1704. QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
  1705. : "+a"(src)
  1706. : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
  1707. : "memory"
  1708. );
  1709. }
  1710. tmp += 8;
  1711. src += 8 - (size+5)*srcStride;
  1712. }
  1713. }
  1714. #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
  1715. static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
  1716. int h = size;\
  1717. if(size == 16){\
  1718. __asm__ volatile(\
  1719. "1: \n\t"\
  1720. "movdqa 32(%0), %%xmm4 \n\t"\
  1721. "movdqa 16(%0), %%xmm5 \n\t"\
  1722. "movdqa (%0), %%xmm7 \n\t"\
  1723. "movdqa %%xmm4, %%xmm3 \n\t"\
  1724. "movdqa %%xmm4, %%xmm2 \n\t"\
  1725. "movdqa %%xmm4, %%xmm1 \n\t"\
  1726. "movdqa %%xmm4, %%xmm0 \n\t"\
  1727. "palignr $10, %%xmm5, %%xmm0 \n\t"\
  1728. "palignr $8, %%xmm5, %%xmm1 \n\t"\
  1729. "palignr $6, %%xmm5, %%xmm2 \n\t"\
  1730. "palignr $4, %%xmm5, %%xmm3 \n\t"\
  1731. "palignr $2, %%xmm5, %%xmm4 \n\t"\
  1732. "paddw %%xmm5, %%xmm0 \n\t"\
  1733. "paddw %%xmm4, %%xmm1 \n\t"\
  1734. "paddw %%xmm3, %%xmm2 \n\t"\
  1735. "movdqa %%xmm5, %%xmm6 \n\t"\
  1736. "movdqa %%xmm5, %%xmm4 \n\t"\
  1737. "movdqa %%xmm5, %%xmm3 \n\t"\
  1738. "palignr $8, %%xmm7, %%xmm4 \n\t"\
  1739. "palignr $2, %%xmm7, %%xmm6 \n\t"\
  1740. "palignr $10, %%xmm7, %%xmm3 \n\t"\
  1741. "paddw %%xmm6, %%xmm4 \n\t"\
  1742. "movdqa %%xmm5, %%xmm6 \n\t"\
  1743. "palignr $6, %%xmm7, %%xmm5 \n\t"\
  1744. "palignr $4, %%xmm7, %%xmm6 \n\t"\
  1745. "paddw %%xmm7, %%xmm3 \n\t"\
  1746. "paddw %%xmm6, %%xmm5 \n\t"\
  1747. \
  1748. "psubw %%xmm1, %%xmm0 \n\t"\
  1749. "psubw %%xmm4, %%xmm3 \n\t"\
  1750. "psraw $2, %%xmm0 \n\t"\
  1751. "psraw $2, %%xmm3 \n\t"\
  1752. "psubw %%xmm1, %%xmm0 \n\t"\
  1753. "psubw %%xmm4, %%xmm3 \n\t"\
  1754. "paddw %%xmm2, %%xmm0 \n\t"\
  1755. "paddw %%xmm5, %%xmm3 \n\t"\
  1756. "psraw $2, %%xmm0 \n\t"\
  1757. "psraw $2, %%xmm3 \n\t"\
  1758. "paddw %%xmm2, %%xmm0 \n\t"\
  1759. "paddw %%xmm5, %%xmm3 \n\t"\
  1760. "psraw $6, %%xmm0 \n\t"\
  1761. "psraw $6, %%xmm3 \n\t"\
  1762. "packuswb %%xmm0, %%xmm3 \n\t"\
  1763. OP(%%xmm3, (%1), %%xmm7, dqa)\
  1764. "add $48, %0 \n\t"\
  1765. "add %3, %1 \n\t"\
  1766. "decl %2 \n\t"\
  1767. " jnz 1b \n\t"\
  1768. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1769. : "S"((x86_reg)dstStride)\
  1770. : "memory"\
  1771. );\
  1772. }else{\
  1773. __asm__ volatile(\
  1774. "1: \n\t"\
  1775. "movdqa 16(%0), %%xmm1 \n\t"\
  1776. "movdqa (%0), %%xmm0 \n\t"\
  1777. "movdqa %%xmm1, %%xmm2 \n\t"\
  1778. "movdqa %%xmm1, %%xmm3 \n\t"\
  1779. "movdqa %%xmm1, %%xmm4 \n\t"\
  1780. "movdqa %%xmm1, %%xmm5 \n\t"\
  1781. "palignr $10, %%xmm0, %%xmm5 \n\t"\
  1782. "palignr $8, %%xmm0, %%xmm4 \n\t"\
  1783. "palignr $6, %%xmm0, %%xmm3 \n\t"\
  1784. "palignr $4, %%xmm0, %%xmm2 \n\t"\
  1785. "palignr $2, %%xmm0, %%xmm1 \n\t"\
  1786. "paddw %%xmm5, %%xmm0 \n\t"\
  1787. "paddw %%xmm4, %%xmm1 \n\t"\
  1788. "paddw %%xmm3, %%xmm2 \n\t"\
  1789. "psubw %%xmm1, %%xmm0 \n\t"\
  1790. "psraw $2, %%xmm0 \n\t"\
  1791. "psubw %%xmm1, %%xmm0 \n\t"\
  1792. "paddw %%xmm2, %%xmm0 \n\t"\
  1793. "psraw $2, %%xmm0 \n\t"\
  1794. "paddw %%xmm2, %%xmm0 \n\t"\
  1795. "psraw $6, %%xmm0 \n\t"\
  1796. "packuswb %%xmm0, %%xmm0 \n\t"\
  1797. OP(%%xmm0, (%1), %%xmm7, q)\
  1798. "add $48, %0 \n\t"\
  1799. "add %3, %1 \n\t"\
  1800. "decl %2 \n\t"\
  1801. " jnz 1b \n\t"\
  1802. : "+a"(tmp), "+c"(dst), "+g"(h)\
  1803. : "S"((x86_reg)dstStride)\
  1804. : "memory"\
  1805. );\
  1806. }\
  1807. }
  1808. #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
  1809. static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
  1810. put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
  1811. OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
  1812. }\
  1813. static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1814. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
  1815. }\
  1816. static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
  1817. OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
  1818. }\
  1819. #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
  1820. #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
  1821. #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
  1822. #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
  1823. #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
  1824. #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
  1825. #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
  1826. #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
  1827. #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
  1828. #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
  1829. #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
  1830. #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
  1831. #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
  1832. #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
  1833. #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
  1834. #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
  1835. #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
  1836. #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
  1837. #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
  1838. #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
  1839. #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
  1840. #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
  1841. #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
  1842. #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
  1843. #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
  1844. #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
  1845. #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
  1846. H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
  1847. H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
  1848. H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
  1849. H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
  1850. static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1851. put_pixels16_sse2(dst, src, stride, 16);
  1852. }
  1853. static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
  1854. avg_pixels16_sse2(dst, src, stride, 16);
  1855. }
  1856. #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
  1857. #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
  1858. #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
  1859. static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
  1860. OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
  1861. }\
  1862. #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
  1863. static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1864. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
  1865. }\
  1866. \
  1867. static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1868. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
  1869. }\
  1870. \
  1871. static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1872. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
  1873. }\
  1874. #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
  1875. static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1876. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1877. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1878. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
  1879. }\
  1880. \
  1881. static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1882. OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
  1883. }\
  1884. \
  1885. static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1886. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1887. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1888. OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
  1889. }\
  1890. #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
  1891. static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1892. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1893. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1894. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1895. }\
  1896. \
  1897. static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1898. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1899. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1900. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
  1901. }\
  1902. \
  1903. static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1904. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1905. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
  1906. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1907. }\
  1908. \
  1909. static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1910. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
  1911. put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
  1912. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
  1913. }\
  1914. \
  1915. static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1916. DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
  1917. OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
  1918. }\
  1919. \
  1920. static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1921. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
  1922. uint8_t * const halfHV= temp;\
  1923. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1924. assert(((int)temp & 7) == 0);\
  1925. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1926. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
  1927. }\
  1928. \
  1929. static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1930. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
  1931. uint8_t * const halfHV= temp;\
  1932. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1933. assert(((int)temp & 7) == 0);\
  1934. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1935. OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
  1936. }\
  1937. \
  1938. static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1939. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
  1940. uint8_t * const halfHV= temp;\
  1941. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1942. assert(((int)temp & 7) == 0);\
  1943. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1944. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
  1945. }\
  1946. \
  1947. static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
  1948. DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
  1949. uint8_t * const halfHV= temp;\
  1950. int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
  1951. assert(((int)temp & 7) == 0);\
  1952. put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
  1953. OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
  1954. }\
  1955. #define H264_MC_4816(MMX)\
  1956. H264_MC(put_, 4, MMX, 8)\
  1957. H264_MC(put_, 8, MMX, 8)\
  1958. H264_MC(put_, 16,MMX, 8)\
  1959. H264_MC(avg_, 4, MMX, 8)\
  1960. H264_MC(avg_, 8, MMX, 8)\
  1961. H264_MC(avg_, 16,MMX, 8)\
  1962. #define H264_MC_816(QPEL, XMM)\
  1963. QPEL(put_, 8, XMM, 16)\
  1964. QPEL(put_, 16,XMM, 16)\
  1965. QPEL(avg_, 8, XMM, 16)\
  1966. QPEL(avg_, 16,XMM, 16)\
  1967. #define AVG_3DNOW_OP(a,b,temp, size) \
  1968. "mov" #size " " #b ", " #temp " \n\t"\
  1969. "pavgusb " #temp ", " #a " \n\t"\
  1970. "mov" #size " " #a ", " #b " \n\t"
  1971. #define AVG_MMX2_OP(a,b,temp, size) \
  1972. "mov" #size " " #b ", " #temp " \n\t"\
  1973. "pavgb " #temp ", " #a " \n\t"\
  1974. "mov" #size " " #a ", " #b " \n\t"
  1975. #define PAVGB "pavgusb"
  1976. QPEL_H264(put_, PUT_OP, 3dnow)
  1977. QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
  1978. #undef PAVGB
  1979. #define PAVGB "pavgb"
  1980. QPEL_H264(put_, PUT_OP, mmx2)
  1981. QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
  1982. QPEL_H264_V_XMM(put_, PUT_OP, sse2)
  1983. QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
  1984. QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
  1985. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
  1986. #if HAVE_SSSE3
  1987. QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
  1988. QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
  1989. QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
  1990. QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
  1991. QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
  1992. QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
  1993. #endif
  1994. #undef PAVGB
  1995. H264_MC_4816(3dnow)
  1996. H264_MC_4816(mmx2)
  1997. H264_MC_816(H264_MC_V, sse2)
  1998. H264_MC_816(H264_MC_HV, sse2)
  1999. #if HAVE_SSSE3
  2000. H264_MC_816(H264_MC_H, ssse3)
  2001. H264_MC_816(H264_MC_HV, ssse3)
  2002. #endif
  2003. /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */
  2004. DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = {
  2005. 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL
  2006. };
  2007. #define H264_CHROMA_OP(S,D)
  2008. #define H264_CHROMA_OP4(S,D,T)
  2009. #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx
  2010. #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx
  2011. #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
  2012. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  2013. #include "dsputil_h264_template_mmx.c"
  2014. static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2015. {
  2016. put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
  2017. }
  2018. static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2019. {
  2020. put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2);
  2021. }
  2022. static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2023. {
  2024. put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg);
  2025. }
  2026. #undef H264_CHROMA_OP
  2027. #undef H264_CHROMA_OP4
  2028. #undef H264_CHROMA_MC8_TMPL
  2029. #undef H264_CHROMA_MC4_TMPL
  2030. #undef H264_CHROMA_MC2_TMPL
  2031. #undef H264_CHROMA_MC8_MV0
  2032. #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
  2033. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  2034. "pavgb " #T ", " #D " \n\t"
  2035. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2
  2036. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2
  2037. #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
  2038. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  2039. #include "dsputil_h264_template_mmx.c"
  2040. static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2041. {
  2042. avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
  2043. }
  2044. static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2045. {
  2046. avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2);
  2047. }
  2048. static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2049. {
  2050. avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg);
  2051. }
  2052. #undef H264_CHROMA_OP
  2053. #undef H264_CHROMA_OP4
  2054. #undef H264_CHROMA_MC8_TMPL
  2055. #undef H264_CHROMA_MC4_TMPL
  2056. #undef H264_CHROMA_MC2_TMPL
  2057. #undef H264_CHROMA_MC8_MV0
  2058. #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
  2059. #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
  2060. "pavgusb " #T ", " #D " \n\t"
  2061. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow
  2062. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow
  2063. #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
  2064. #include "dsputil_h264_template_mmx.c"
  2065. static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2066. {
  2067. avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
  2068. }
  2069. static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2070. {
  2071. avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg);
  2072. }
  2073. #undef H264_CHROMA_OP
  2074. #undef H264_CHROMA_OP4
  2075. #undef H264_CHROMA_MC8_TMPL
  2076. #undef H264_CHROMA_MC4_TMPL
  2077. #undef H264_CHROMA_MC8_MV0
  2078. #if HAVE_SSSE3
  2079. #define AVG_OP(X)
  2080. #undef H264_CHROMA_MC8_TMPL
  2081. #undef H264_CHROMA_MC4_TMPL
  2082. #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
  2083. #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
  2084. #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
  2085. #include "dsputil_h264_template_ssse3.c"
  2086. static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2087. {
  2088. put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
  2089. }
  2090. static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2091. {
  2092. put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
  2093. }
  2094. #undef AVG_OP
  2095. #undef H264_CHROMA_MC8_TMPL
  2096. #undef H264_CHROMA_MC4_TMPL
  2097. #undef H264_CHROMA_MC8_MV0
  2098. #define AVG_OP(X) X
  2099. #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
  2100. #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
  2101. #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
  2102. #include "dsputil_h264_template_ssse3.c"
  2103. static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2104. {
  2105. avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
  2106. }
  2107. static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
  2108. {
  2109. avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
  2110. }
  2111. #undef AVG_OP
  2112. #undef H264_CHROMA_MC8_TMPL
  2113. #undef H264_CHROMA_MC4_TMPL
  2114. #undef H264_CHROMA_MC8_MV0
  2115. #endif
  2116. /***********************************/
  2117. /* weighted prediction */
  2118. static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
  2119. {
  2120. int x, y;
  2121. offset <<= log2_denom;
  2122. offset += (1 << log2_denom) >> 1;
  2123. __asm__ volatile(
  2124. "movd %0, %%mm4 \n\t"
  2125. "movd %1, %%mm5 \n\t"
  2126. "movd %2, %%mm6 \n\t"
  2127. "pshufw $0, %%mm4, %%mm4 \n\t"
  2128. "pshufw $0, %%mm5, %%mm5 \n\t"
  2129. "pxor %%mm7, %%mm7 \n\t"
  2130. :: "g"(weight), "g"(offset), "g"(log2_denom)
  2131. );
  2132. for(y=0; y<h; y+=2){
  2133. for(x=0; x<w; x+=4){
  2134. __asm__ volatile(
  2135. "movd %0, %%mm0 \n\t"
  2136. "movd %1, %%mm1 \n\t"
  2137. "punpcklbw %%mm7, %%mm0 \n\t"
  2138. "punpcklbw %%mm7, %%mm1 \n\t"
  2139. "pmullw %%mm4, %%mm0 \n\t"
  2140. "pmullw %%mm4, %%mm1 \n\t"
  2141. "paddsw %%mm5, %%mm0 \n\t"
  2142. "paddsw %%mm5, %%mm1 \n\t"
  2143. "psraw %%mm6, %%mm0 \n\t"
  2144. "psraw %%mm6, %%mm1 \n\t"
  2145. "packuswb %%mm7, %%mm0 \n\t"
  2146. "packuswb %%mm7, %%mm1 \n\t"
  2147. "movd %%mm0, %0 \n\t"
  2148. "movd %%mm1, %1 \n\t"
  2149. : "+m"(*(uint32_t*)(dst+x)),
  2150. "+m"(*(uint32_t*)(dst+x+stride))
  2151. );
  2152. }
  2153. dst += 2*stride;
  2154. }
  2155. }
  2156. static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
  2157. {
  2158. int x, y;
  2159. offset = ((offset + 1) | 1) << log2_denom;
  2160. __asm__ volatile(
  2161. "movd %0, %%mm3 \n\t"
  2162. "movd %1, %%mm4 \n\t"
  2163. "movd %2, %%mm5 \n\t"
  2164. "movd %3, %%mm6 \n\t"
  2165. "pshufw $0, %%mm3, %%mm3 \n\t"
  2166. "pshufw $0, %%mm4, %%mm4 \n\t"
  2167. "pshufw $0, %%mm5, %%mm5 \n\t"
  2168. "pxor %%mm7, %%mm7 \n\t"
  2169. :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
  2170. );
  2171. for(y=0; y<h; y++){
  2172. for(x=0; x<w; x+=4){
  2173. __asm__ volatile(
  2174. "movd %0, %%mm0 \n\t"
  2175. "movd %1, %%mm1 \n\t"
  2176. "punpcklbw %%mm7, %%mm0 \n\t"
  2177. "punpcklbw %%mm7, %%mm1 \n\t"
  2178. "pmullw %%mm3, %%mm0 \n\t"
  2179. "pmullw %%mm4, %%mm1 \n\t"
  2180. "paddsw %%mm1, %%mm0 \n\t"
  2181. "paddsw %%mm5, %%mm0 \n\t"
  2182. "psraw %%mm6, %%mm0 \n\t"
  2183. "packuswb %%mm0, %%mm0 \n\t"
  2184. "movd %%mm0, %0 \n\t"
  2185. : "+m"(*(uint32_t*)(dst+x))
  2186. : "m"(*(uint32_t*)(src+x))
  2187. );
  2188. }
  2189. src += stride;
  2190. dst += stride;
  2191. }
  2192. }
  2193. #define H264_WEIGHT(W,H) \
  2194. static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
  2195. ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
  2196. } \
  2197. static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
  2198. ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
  2199. }
  2200. H264_WEIGHT(16,16)
  2201. H264_WEIGHT(16, 8)
  2202. H264_WEIGHT( 8,16)
  2203. H264_WEIGHT( 8, 8)
  2204. H264_WEIGHT( 8, 4)
  2205. H264_WEIGHT( 4, 8)
  2206. H264_WEIGHT( 4, 4)
  2207. H264_WEIGHT( 4, 2)
  2208. void ff_h264_biweight_8x8_sse2(uint8_t *dst, uint8_t *src, int stride,
  2209. int log2_denom, int weightd, int weights,
  2210. int offset);
  2211. void ff_h264_biweight_16x16_sse2(uint8_t *dst, uint8_t *src, int stride,
  2212. int log2_denom, int weightd, int weights,
  2213. int offset);
  2214. void ff_h264_biweight_8x8_ssse3(uint8_t *dst, uint8_t *src, int stride,
  2215. int log2_denom, int weightd, int weights,
  2216. int offset);
  2217. void ff_h264_biweight_16x16_ssse3(uint8_t *dst, uint8_t *src, int stride,
  2218. int log2_denom, int weightd, int weights,
  2219. int offset);
  2220. void ff_pred16x16_vertical_mmx (uint8_t *src, int stride);
  2221. void ff_pred16x16_vertical_sse (uint8_t *src, int stride);
  2222. void ff_pred16x16_horizontal_mmx (uint8_t *src, int stride);
  2223. void ff_pred16x16_horizontal_mmxext(uint8_t *src, int stride);
  2224. void ff_pred16x16_horizontal_ssse3 (uint8_t *src, int stride);
  2225. void ff_pred16x16_dc_mmxext (uint8_t *src, int stride);
  2226. void ff_pred16x16_dc_sse2 (uint8_t *src, int stride);
  2227. void ff_pred16x16_dc_ssse3 (uint8_t *src, int stride);
  2228. void ff_pred16x16_tm_vp8_mmx (uint8_t *src, int stride);
  2229. void ff_pred16x16_tm_vp8_mmxext (uint8_t *src, int stride);
  2230. void ff_pred16x16_tm_vp8_sse2 (uint8_t *src, int stride);
  2231. void ff_pred8x8_dc_rv40_mmxext (uint8_t *src, int stride);
  2232. void ff_pred8x8_vertical_mmx (uint8_t *src, int stride);
  2233. void ff_pred8x8_horizontal_mmx (uint8_t *src, int stride);
  2234. void ff_pred8x8_horizontal_mmxext (uint8_t *src, int stride);
  2235. void ff_pred8x8_horizontal_ssse3 (uint8_t *src, int stride);
  2236. void ff_pred8x8_tm_vp8_mmx (uint8_t *src, int stride);
  2237. void ff_pred8x8_tm_vp8_mmxext (uint8_t *src, int stride);
  2238. void ff_pred8x8_tm_vp8_sse2 (uint8_t *src, int stride);
  2239. void ff_pred8x8_tm_vp8_ssse3 (uint8_t *src, int stride);
  2240. void ff_pred4x4_dc_mmxext (uint8_t *src, const uint8_t *topright, int stride);
  2241. void ff_pred4x4_tm_vp8_mmx (uint8_t *src, const uint8_t *topright, int stride);
  2242. void ff_pred4x4_tm_vp8_mmxext (uint8_t *src, const uint8_t *topright, int stride);
  2243. void ff_pred4x4_tm_vp8_ssse3 (uint8_t *src, const uint8_t *topright, int stride);
  2244. void ff_pred4x4_vertical_vp8_mmxext(uint8_t *src, const uint8_t *topright, int stride);
  2245. #if CONFIG_H264PRED
  2246. void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
  2247. {
  2248. int mm_flags = mm_support();
  2249. #if HAVE_YASM
  2250. if (mm_flags & FF_MM_MMX) {
  2251. h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx;
  2252. h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx;
  2253. h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx;
  2254. h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx;
  2255. if (codec_id == CODEC_ID_VP8) {
  2256. h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmx;
  2257. h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmx;
  2258. h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx;
  2259. }
  2260. }
  2261. if (mm_flags & FF_MM_MMX2) {
  2262. h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
  2263. h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
  2264. h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
  2265. h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext;
  2266. if (codec_id == CODEC_ID_VP8) {
  2267. h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext;
  2268. h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext;
  2269. h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmxext;
  2270. h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext;
  2271. h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext;
  2272. }
  2273. }
  2274. if (mm_flags & FF_MM_SSE) {
  2275. h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse;
  2276. }
  2277. if (mm_flags & FF_MM_SSE2) {
  2278. h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2;
  2279. if (codec_id == CODEC_ID_VP8) {
  2280. h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2;
  2281. h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2;
  2282. }
  2283. }
  2284. if (mm_flags & FF_MM_SSSE3) {
  2285. h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3;
  2286. h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3;
  2287. h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3;
  2288. if (codec_id == CODEC_ID_VP8) {
  2289. h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3;
  2290. h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3;
  2291. }
  2292. }
  2293. #endif
  2294. }
  2295. #endif