You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

516 lines
21KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/x86_cpu.h"
  22. #include "libavcodec/h264dsp.h"
  23. #include "dsputil_mmx.h"
  24. DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
  25. /***********************************/
  26. /* IDCT */
  27. #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
  28. void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
  29. IDCT_ADD_FUNC(, 8, mmx)
  30. IDCT_ADD_FUNC(, 10, sse2)
  31. IDCT_ADD_FUNC(_dc, 8, mmx2)
  32. IDCT_ADD_FUNC(_dc, 10, mmx2)
  33. IDCT_ADD_FUNC(8_dc, 8, mmx2)
  34. IDCT_ADD_FUNC(8_dc, 10, sse2)
  35. IDCT_ADD_FUNC(8, 8, mmx)
  36. IDCT_ADD_FUNC(8, 8, sse2)
  37. IDCT_ADD_FUNC(8, 10, sse2)
  38. #if HAVE_AVX
  39. IDCT_ADD_FUNC(, 10, avx)
  40. IDCT_ADD_FUNC(8_dc, 10, avx)
  41. IDCT_ADD_FUNC(8, 10, avx)
  42. #endif
  43. #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
  44. void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
  45. (uint8_t *dst, const int *block_offset, \
  46. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  47. IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
  48. IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
  49. IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
  50. IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
  51. IDCT_ADD_REP_FUNC(8, 4, 10, avx)
  52. IDCT_ADD_REP_FUNC(, 16, 8, mmx)
  53. IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
  54. IDCT_ADD_REP_FUNC(, 16, 8, sse2)
  55. IDCT_ADD_REP_FUNC(, 16, 10, sse2)
  56. IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
  57. IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
  58. IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
  59. IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
  60. #if HAVE_AVX
  61. IDCT_ADD_REP_FUNC(, 16, 10, avx)
  62. IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
  63. #endif
  64. #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
  65. void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
  66. (uint8_t **dst, const int *block_offset, \
  67. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  68. IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
  69. IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
  70. IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
  71. IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
  72. #if HAVE_AVX
  73. IDCT_ADD_REP_FUNC2(, 8, 10, avx)
  74. #endif
  75. void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
  76. void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
  77. /***********************************/
  78. /* deblocking */
  79. #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
  80. do { \
  81. x86_reg b_idx; \
  82. mask_mv <<= 3; \
  83. for( b_idx=0; b_idx<edges; b_idx+=step ) { \
  84. if (!mask_dir) \
  85. __asm__ volatile( \
  86. "pxor %%mm0, %%mm0 \n\t" \
  87. :: \
  88. ); \
  89. if(!(mask_mv & b_idx)) { \
  90. if(bidir) { \
  91. __asm__ volatile( \
  92. "movd %a3(%0,%2), %%mm2 \n" \
  93. "punpckldq %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
  94. "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
  95. "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
  96. "pshufw $0x4E, %%mm2, %%mm3 \n" \
  97. "psubb %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
  98. "psubb %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
  99. \
  100. "por %%mm1, %%mm0 \n" \
  101. "movq %a5(%1,%2,4), %%mm1 \n" \
  102. "movq %a6(%1,%2,4), %%mm2 \n" \
  103. "movq %%mm1, %%mm3 \n" \
  104. "movq %%mm2, %%mm4 \n" \
  105. "psubw 48(%1,%2,4), %%mm1 \n" \
  106. "psubw 56(%1,%2,4), %%mm2 \n" \
  107. "psubw 208(%1,%2,4), %%mm3 \n" \
  108. "psubw 216(%1,%2,4), %%mm4 \n" \
  109. "packsswb %%mm2, %%mm1 \n" \
  110. "packsswb %%mm4, %%mm3 \n" \
  111. "paddb %%mm6, %%mm1 \n" \
  112. "paddb %%mm6, %%mm3 \n" \
  113. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  114. "psubusb %%mm5, %%mm3 \n" \
  115. "packsswb %%mm3, %%mm1 \n" \
  116. \
  117. "por %%mm1, %%mm0 \n" \
  118. "movq %a7(%1,%2,4), %%mm1 \n" \
  119. "movq %a8(%1,%2,4), %%mm2 \n" \
  120. "movq %%mm1, %%mm3 \n" \
  121. "movq %%mm2, %%mm4 \n" \
  122. "psubw 48(%1,%2,4), %%mm1 \n" \
  123. "psubw 56(%1,%2,4), %%mm2 \n" \
  124. "psubw 208(%1,%2,4), %%mm3 \n" \
  125. "psubw 216(%1,%2,4), %%mm4 \n" \
  126. "packsswb %%mm2, %%mm1 \n" \
  127. "packsswb %%mm4, %%mm3 \n" \
  128. "paddb %%mm6, %%mm1 \n" \
  129. "paddb %%mm6, %%mm3 \n" \
  130. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  131. "psubusb %%mm5, %%mm3 \n" \
  132. "packsswb %%mm3, %%mm1 \n" \
  133. \
  134. "pshufw $0x4E, %%mm1, %%mm1 \n" \
  135. "por %%mm1, %%mm0 \n" \
  136. "pshufw $0x4E, %%mm0, %%mm1 \n" \
  137. "pminub %%mm1, %%mm0 \n" \
  138. ::"r"(ref), \
  139. "r"(mv), \
  140. "r"(b_idx), \
  141. "i"(d_idx+12), \
  142. "i"(d_idx+52), \
  143. "i"(d_idx*4+48), \
  144. "i"(d_idx*4+56), \
  145. "i"(d_idx*4+208), \
  146. "i"(d_idx*4+216) \
  147. ); \
  148. } else { \
  149. __asm__ volatile( \
  150. "movd 12(%0,%2), %%mm0 \n" \
  151. "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
  152. "movq 48(%1,%2,4), %%mm1 \n" \
  153. "movq 56(%1,%2,4), %%mm2 \n" \
  154. "psubw %a4(%1,%2,4), %%mm1 \n" \
  155. "psubw %a5(%1,%2,4), %%mm2 \n" \
  156. "packsswb %%mm2, %%mm1 \n" \
  157. "paddb %%mm6, %%mm1 \n" \
  158. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  159. "packsswb %%mm1, %%mm1 \n" \
  160. "por %%mm1, %%mm0 \n" \
  161. ::"r"(ref), \
  162. "r"(mv), \
  163. "r"(b_idx), \
  164. "i"(d_idx+12), \
  165. "i"(d_idx*4+48), \
  166. "i"(d_idx*4+56) \
  167. ); \
  168. } \
  169. } \
  170. __asm__ volatile( \
  171. "movd 12(%0,%1), %%mm1 \n" \
  172. "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
  173. ::"r"(nnz), \
  174. "r"(b_idx), \
  175. "i"(d_idx+12) \
  176. ); \
  177. __asm__ volatile( \
  178. "pminub %%mm7, %%mm1 \n" \
  179. "pminub %%mm7, %%mm0 \n" \
  180. "psllw $1, %%mm1 \n" \
  181. "pxor %%mm2, %%mm2 \n" \
  182. "pmaxub %%mm0, %%mm1 \n" \
  183. "punpcklbw %%mm2, %%mm1 \n" \
  184. "movq %%mm1, %a1(%0,%2) \n" \
  185. ::"r"(bS), \
  186. "i"(32*dir), \
  187. "r"(b_idx) \
  188. :"memory" \
  189. ); \
  190. } \
  191. } while (0)
  192. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  193. int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
  194. __asm__ volatile(
  195. "movq %0, %%mm7 \n"
  196. "movq %1, %%mm6 \n"
  197. ::"m"(ff_pb_1), "m"(ff_pb_3)
  198. );
  199. if(field)
  200. __asm__ volatile(
  201. "movq %0, %%mm6 \n"
  202. ::"m"(ff_pb_3_1)
  203. );
  204. __asm__ volatile(
  205. "movq %%mm6, %%mm5 \n"
  206. "paddb %%mm5, %%mm5 \n"
  207. :);
  208. // could do a special case for dir==0 && edges==1, but it only reduces the
  209. // average filter time by 1.2%
  210. step <<= 3;
  211. edges <<= 3;
  212. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8, 0);
  213. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, 32, 8, mask_mv0, 0, -1, -1);
  214. __asm__ volatile(
  215. "movq (%0), %%mm0 \n\t"
  216. "movq 8(%0), %%mm1 \n\t"
  217. "movq 16(%0), %%mm2 \n\t"
  218. "movq 24(%0), %%mm3 \n\t"
  219. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  220. "movq %%mm0, (%0) \n\t"
  221. "movq %%mm3, 8(%0) \n\t"
  222. "movq %%mm4, 16(%0) \n\t"
  223. "movq %%mm2, 24(%0) \n\t"
  224. ::"r"(bS[0])
  225. :"memory"
  226. );
  227. }
  228. #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
  229. void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
  230. int alpha, int beta, int8_t *tc0);
  231. #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
  232. void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
  233. int alpha, int beta);
  234. #define LF_FUNCS(type, depth)\
  235. LF_FUNC (h, chroma, depth, mmxext)\
  236. LF_IFUNC(h, chroma_intra, depth, mmxext)\
  237. LF_FUNC (v, chroma, depth, mmxext)\
  238. LF_IFUNC(v, chroma_intra, depth, mmxext)\
  239. LF_FUNC (h, luma, depth, mmxext)\
  240. LF_IFUNC(h, luma_intra, depth, mmxext)\
  241. LF_FUNC (h, luma, depth, sse2)\
  242. LF_IFUNC(h, luma_intra, depth, sse2)\
  243. LF_FUNC (v, luma, depth, sse2)\
  244. LF_IFUNC(v, luma_intra, depth, sse2)\
  245. LF_FUNC (h, chroma, depth, sse2)\
  246. LF_IFUNC(h, chroma_intra, depth, sse2)\
  247. LF_FUNC (v, chroma, depth, sse2)\
  248. LF_IFUNC(v, chroma_intra, depth, sse2)\
  249. LF_FUNC (h, luma, depth, avx)\
  250. LF_IFUNC(h, luma_intra, depth, avx)\
  251. LF_FUNC (v, luma, depth, avx)\
  252. LF_IFUNC(v, luma_intra, depth, avx)\
  253. LF_FUNC (h, chroma, depth, avx)\
  254. LF_IFUNC(h, chroma_intra, depth, avx)\
  255. LF_FUNC (v, chroma, depth, avx)\
  256. LF_IFUNC(v, chroma_intra, depth, avx)
  257. LF_FUNCS( uint8_t, 8)
  258. LF_FUNCS(uint16_t, 10)
  259. #if ARCH_X86_32
  260. LF_FUNC (v8, luma, 8, mmxext)
  261. static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  262. {
  263. if((tc0[0] & tc0[1]) >= 0)
  264. ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
  265. if((tc0[2] & tc0[3]) >= 0)
  266. ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
  267. }
  268. LF_IFUNC(v8, luma_intra, 8, mmxext)
  269. static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
  270. {
  271. ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
  272. ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
  273. }
  274. #endif /* ARCH_X86_32 */
  275. LF_FUNC (v, luma, 10, mmxext)
  276. LF_IFUNC(v, luma_intra, 10, mmxext)
  277. /***********************************/
  278. /* weighted prediction */
  279. #define H264_WEIGHT(W, OPT) \
  280. void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, \
  281. int stride, int height, int log2_denom, int weight, int offset);
  282. #define H264_BIWEIGHT(W, OPT) \
  283. void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, \
  284. uint8_t *src, int stride, int height, int log2_denom, int weightd, \
  285. int weights, int offset);
  286. #define H264_BIWEIGHT_MMX(W) \
  287. H264_WEIGHT (W, mmx2) \
  288. H264_BIWEIGHT(W, mmx2)
  289. #define H264_BIWEIGHT_MMX_SSE(W) \
  290. H264_BIWEIGHT_MMX(W) \
  291. H264_WEIGHT (W, sse2) \
  292. H264_BIWEIGHT (W, sse2) \
  293. H264_BIWEIGHT (W, ssse3)
  294. H264_BIWEIGHT_MMX_SSE(16)
  295. H264_BIWEIGHT_MMX_SSE( 8)
  296. H264_BIWEIGHT_MMX ( 4)
  297. #define H264_WEIGHT_10(W, DEPTH, OPT) \
  298. void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
  299. int stride, int height, int log2_denom, int weight, int offset);
  300. #define H264_BIWEIGHT_10(W, DEPTH, OPT) \
  301. void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT \
  302. (uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, \
  303. int weightd, int weights, int offset);
  304. #define H264_BIWEIGHT_10_SSE(W, DEPTH) \
  305. H264_WEIGHT_10 (W, DEPTH, sse2) \
  306. H264_WEIGHT_10 (W, DEPTH, sse4) \
  307. H264_BIWEIGHT_10(W, DEPTH, sse2) \
  308. H264_BIWEIGHT_10(W, DEPTH, sse4)
  309. H264_BIWEIGHT_10_SSE(16, 10)
  310. H264_BIWEIGHT_10_SSE( 8, 10)
  311. H264_BIWEIGHT_10_SSE( 4, 10)
  312. void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
  313. {
  314. int mm_flags = av_get_cpu_flags();
  315. if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMX2) {
  316. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  317. }
  318. if (bit_depth == 8) {
  319. #if HAVE_YASM
  320. if (mm_flags & AV_CPU_FLAG_MMX) {
  321. c->h264_idct_dc_add =
  322. c->h264_idct_add = ff_h264_idct_add_8_mmx;
  323. c->h264_idct8_dc_add =
  324. c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
  325. c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
  326. c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
  327. if (chroma_format_idc == 1)
  328. c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
  329. c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
  330. if (mm_flags & AV_CPU_FLAG_CMOV)
  331. c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx;
  332. if (mm_flags & AV_CPU_FLAG_MMX2) {
  333. c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmx2;
  334. c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
  335. c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
  336. c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
  337. if (chroma_format_idc == 1)
  338. c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
  339. c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
  340. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
  341. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
  342. if (chroma_format_idc == 1) {
  343. c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
  344. c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
  345. }
  346. #if ARCH_X86_32
  347. c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
  348. c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
  349. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
  350. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
  351. #endif
  352. c->weight_h264_pixels_tab[0]= ff_h264_weight_16_mmx2;
  353. c->weight_h264_pixels_tab[1]= ff_h264_weight_8_mmx2;
  354. c->weight_h264_pixels_tab[2]= ff_h264_weight_4_mmx2;
  355. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_mmx2;
  356. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_mmx2;
  357. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_4_mmx2;
  358. if (mm_flags&AV_CPU_FLAG_SSE2) {
  359. c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
  360. c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
  361. c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
  362. if (chroma_format_idc == 1)
  363. c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
  364. c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
  365. c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
  366. c->weight_h264_pixels_tab[0]= ff_h264_weight_16_sse2;
  367. c->weight_h264_pixels_tab[1]= ff_h264_weight_8_sse2;
  368. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_sse2;
  369. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_sse2;
  370. #if HAVE_ALIGNED_STACK
  371. c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
  372. c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
  373. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
  374. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
  375. #endif
  376. }
  377. if (mm_flags&AV_CPU_FLAG_SSSE3) {
  378. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_ssse3;
  379. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_ssse3;
  380. }
  381. if (mm_flags&AV_CPU_FLAG_AVX) {
  382. #if HAVE_ALIGNED_STACK
  383. c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
  384. c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
  385. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
  386. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
  387. #endif
  388. }
  389. }
  390. }
  391. #endif
  392. } else if (bit_depth == 10) {
  393. #if HAVE_YASM
  394. if (mm_flags & AV_CPU_FLAG_MMX) {
  395. if (mm_flags & AV_CPU_FLAG_MMX2) {
  396. #if ARCH_X86_32
  397. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
  398. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
  399. c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
  400. c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
  401. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
  402. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
  403. #endif
  404. c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
  405. if (mm_flags&AV_CPU_FLAG_SSE2) {
  406. c->h264_idct_add = ff_h264_idct_add_10_sse2;
  407. c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
  408. c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
  409. if (chroma_format_idc == 1)
  410. c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
  411. c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
  412. #if HAVE_ALIGNED_STACK
  413. c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
  414. c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
  415. #endif
  416. c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
  417. c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
  418. c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
  419. c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
  420. c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
  421. c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
  422. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
  423. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
  424. #if HAVE_ALIGNED_STACK
  425. c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
  426. c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
  427. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
  428. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
  429. #endif
  430. }
  431. if (mm_flags&AV_CPU_FLAG_SSE4) {
  432. c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
  433. c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
  434. c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
  435. c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
  436. c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
  437. c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
  438. }
  439. #if HAVE_AVX
  440. if (mm_flags&AV_CPU_FLAG_AVX) {
  441. c->h264_idct_dc_add =
  442. c->h264_idct_add = ff_h264_idct_add_10_avx;
  443. c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
  444. c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
  445. if (chroma_format_idc == 1)
  446. c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
  447. c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
  448. #if HAVE_ALIGNED_STACK
  449. c->h264_idct8_add = ff_h264_idct8_add_10_avx;
  450. c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
  451. #endif
  452. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
  453. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
  454. #if HAVE_ALIGNED_STACK
  455. c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
  456. c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
  457. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
  458. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
  459. #endif
  460. }
  461. #endif /* HAVE_AVX */
  462. }
  463. }
  464. #endif
  465. }
  466. }