You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

446 lines
20KB

  1. /*
  2. * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/x86_cpu.h"
  22. #include "libavcodec/h264dsp.h"
  23. #include "dsputil_mmx.h"
  24. DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
  25. /***********************************/
  26. /* IDCT */
  27. void ff_h264_idct_add_mmx (uint8_t *dst, int16_t *block, int stride);
  28. void ff_h264_idct8_add_mmx (uint8_t *dst, int16_t *block, int stride);
  29. void ff_h264_idct8_add_sse2 (uint8_t *dst, int16_t *block, int stride);
  30. void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
  31. void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
  32. void ff_h264_idct_add16_mmx (uint8_t *dst, const int *block_offset,
  33. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  34. void ff_h264_idct8_add4_mmx (uint8_t *dst, const int *block_offset,
  35. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  36. void ff_h264_idct_add16_mmx2 (uint8_t *dst, const int *block_offset,
  37. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  38. void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
  39. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  40. void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
  41. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  42. void ff_h264_idct8_add4_mmx2 (uint8_t *dst, const int *block_offset,
  43. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  44. void ff_h264_idct8_add4_sse2 (uint8_t *dst, const int *block_offset,
  45. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  46. void ff_h264_idct_add8_mmx (uint8_t **dest, const int *block_offset,
  47. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  48. void ff_h264_idct_add8_mmx2 (uint8_t **dest, const int *block_offset,
  49. DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
  50. void ff_h264_idct_add16_sse2 (uint8_t *dst, const int *block_offset, DCTELEM *block,
  51. int stride, const uint8_t nnzc[6*8]);
  52. void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
  53. int stride, const uint8_t nnzc[6*8]);
  54. void ff_h264_idct_add8_sse2 (uint8_t **dest, const int *block_offset, DCTELEM *block,
  55. int stride, const uint8_t nnzc[6*8]);
  56. void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
  57. void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
  58. /***********************************/
  59. /* deblocking */
  60. #define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
  61. do { \
  62. x86_reg b_idx; \
  63. mask_mv <<= 3; \
  64. for( b_idx=0; b_idx<edges; b_idx+=step ) { \
  65. if (!mask_dir) \
  66. __asm__ volatile( \
  67. "pxor %%mm0, %%mm0 \n\t" \
  68. :: \
  69. ); \
  70. if(!(mask_mv & b_idx)) { \
  71. if(bidir) { \
  72. __asm__ volatile( \
  73. "movd %a3(%0,%2), %%mm2 \n" \
  74. "punpckldq %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
  75. "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
  76. "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
  77. "pshufw $0x4E, %%mm2, %%mm3 \n" \
  78. "psubb %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
  79. "psubb %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
  80. \
  81. "por %%mm1, %%mm0 \n" \
  82. "movq %a5(%1,%2,4), %%mm1 \n" \
  83. "movq %a6(%1,%2,4), %%mm2 \n" \
  84. "movq %%mm1, %%mm3 \n" \
  85. "movq %%mm2, %%mm4 \n" \
  86. "psubw 48(%1,%2,4), %%mm1 \n" \
  87. "psubw 56(%1,%2,4), %%mm2 \n" \
  88. "psubw 208(%1,%2,4), %%mm3 \n" \
  89. "psubw 216(%1,%2,4), %%mm4 \n" \
  90. "packsswb %%mm2, %%mm1 \n" \
  91. "packsswb %%mm4, %%mm3 \n" \
  92. "paddb %%mm6, %%mm1 \n" \
  93. "paddb %%mm6, %%mm3 \n" \
  94. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  95. "psubusb %%mm5, %%mm3 \n" \
  96. "packsswb %%mm3, %%mm1 \n" \
  97. \
  98. "por %%mm1, %%mm0 \n" \
  99. "movq %a7(%1,%2,4), %%mm1 \n" \
  100. "movq %a8(%1,%2,4), %%mm2 \n" \
  101. "movq %%mm1, %%mm3 \n" \
  102. "movq %%mm2, %%mm4 \n" \
  103. "psubw 48(%1,%2,4), %%mm1 \n" \
  104. "psubw 56(%1,%2,4), %%mm2 \n" \
  105. "psubw 208(%1,%2,4), %%mm3 \n" \
  106. "psubw 216(%1,%2,4), %%mm4 \n" \
  107. "packsswb %%mm2, %%mm1 \n" \
  108. "packsswb %%mm4, %%mm3 \n" \
  109. "paddb %%mm6, %%mm1 \n" \
  110. "paddb %%mm6, %%mm3 \n" \
  111. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  112. "psubusb %%mm5, %%mm3 \n" \
  113. "packsswb %%mm3, %%mm1 \n" \
  114. \
  115. "pshufw $0x4E, %%mm1, %%mm1 \n" \
  116. "por %%mm1, %%mm0 \n" \
  117. "pshufw $0x4E, %%mm0, %%mm1 \n" \
  118. "pminub %%mm1, %%mm0 \n" \
  119. ::"r"(ref), \
  120. "r"(mv), \
  121. "r"(b_idx), \
  122. "i"(d_idx+12), \
  123. "i"(d_idx+52), \
  124. "i"(d_idx*4+48), \
  125. "i"(d_idx*4+56), \
  126. "i"(d_idx*4+208), \
  127. "i"(d_idx*4+216) \
  128. ); \
  129. } else { \
  130. __asm__ volatile( \
  131. "movd 12(%0,%2), %%mm0 \n" \
  132. "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
  133. "movq 48(%1,%2,4), %%mm1 \n" \
  134. "movq 56(%1,%2,4), %%mm2 \n" \
  135. "psubw %a4(%1,%2,4), %%mm1 \n" \
  136. "psubw %a5(%1,%2,4), %%mm2 \n" \
  137. "packsswb %%mm2, %%mm1 \n" \
  138. "paddb %%mm6, %%mm1 \n" \
  139. "psubusb %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
  140. "packsswb %%mm1, %%mm1 \n" \
  141. "por %%mm1, %%mm0 \n" \
  142. ::"r"(ref), \
  143. "r"(mv), \
  144. "r"(b_idx), \
  145. "i"(d_idx+12), \
  146. "i"(d_idx*4+48), \
  147. "i"(d_idx*4+56) \
  148. ); \
  149. } \
  150. } \
  151. __asm__ volatile( \
  152. "movd 12(%0,%1), %%mm1 \n" \
  153. "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
  154. ::"r"(nnz), \
  155. "r"(b_idx), \
  156. "i"(d_idx+12) \
  157. ); \
  158. __asm__ volatile( \
  159. "pminub %%mm7, %%mm1 \n" \
  160. "pminub %%mm7, %%mm0 \n" \
  161. "psllw $1, %%mm1 \n" \
  162. "pxor %%mm2, %%mm2 \n" \
  163. "pmaxub %%mm0, %%mm1 \n" \
  164. "punpcklbw %%mm2, %%mm1 \n" \
  165. "movq %%mm1, %a1(%0,%2) \n" \
  166. ::"r"(bS), \
  167. "i"(32*dir), \
  168. "r"(b_idx) \
  169. :"memory" \
  170. ); \
  171. } \
  172. } while (0)
  173. static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
  174. int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
  175. __asm__ volatile(
  176. "movq %0, %%mm7 \n"
  177. "movq %1, %%mm6 \n"
  178. ::"m"(ff_pb_1), "m"(ff_pb_3)
  179. );
  180. if(field)
  181. __asm__ volatile(
  182. "movq %0, %%mm6 \n"
  183. ::"m"(ff_pb_3_1)
  184. );
  185. __asm__ volatile(
  186. "movq %%mm6, %%mm5 \n"
  187. "paddb %%mm5, %%mm5 \n"
  188. :);
  189. // could do a special case for dir==0 && edges==1, but it only reduces the
  190. // average filter time by 1.2%
  191. step <<= 3;
  192. edges <<= 3;
  193. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8, 0);
  194. h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, 32, 8, mask_mv0, 0, -1, -1);
  195. __asm__ volatile(
  196. "movq (%0), %%mm0 \n\t"
  197. "movq 8(%0), %%mm1 \n\t"
  198. "movq 16(%0), %%mm2 \n\t"
  199. "movq 24(%0), %%mm3 \n\t"
  200. TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
  201. "movq %%mm0, (%0) \n\t"
  202. "movq %%mm3, 8(%0) \n\t"
  203. "movq %%mm4, 16(%0) \n\t"
  204. "movq %%mm2, 24(%0) \n\t"
  205. ::"r"(bS[0])
  206. :"memory"
  207. );
  208. }
  209. #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
  210. void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
  211. int alpha, int beta, int8_t *tc0);
  212. #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
  213. void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
  214. int alpha, int beta);
  215. #define LF_FUNCS(type, depth)\
  216. LF_FUNC (h, chroma, depth, mmxext)\
  217. LF_IFUNC(h, chroma_intra, depth, mmxext)\
  218. LF_FUNC (v, chroma, depth, mmxext)\
  219. LF_IFUNC(v, chroma_intra, depth, mmxext)\
  220. LF_FUNC (h, luma, depth, mmxext)\
  221. LF_IFUNC(h, luma_intra, depth, mmxext)\
  222. LF_FUNC (h, luma, depth, sse2)\
  223. LF_IFUNC(h, luma_intra, depth, sse2)\
  224. LF_FUNC (v, luma, depth, sse2)\
  225. LF_IFUNC(v, luma_intra, depth, sse2)\
  226. LF_FUNC (h, chroma, depth, sse2)\
  227. LF_IFUNC(h, chroma_intra, depth, sse2)\
  228. LF_FUNC (v, chroma, depth, sse2)\
  229. LF_IFUNC(v, chroma_intra, depth, sse2)\
  230. LF_FUNC (h, luma, depth, avx)\
  231. LF_IFUNC(h, luma_intra, depth, avx)\
  232. LF_FUNC (v, luma, depth, avx)\
  233. LF_IFUNC(v, luma_intra, depth, avx)\
  234. LF_FUNC (h, chroma, depth, avx)\
  235. LF_IFUNC(h, chroma_intra, depth, avx)\
  236. LF_FUNC (v, chroma, depth, avx)\
  237. LF_IFUNC(v, chroma_intra, depth, avx)
  238. LF_FUNCS( uint8_t, 8)
  239. LF_FUNCS(uint16_t, 10)
  240. #if ARCH_X86_32
  241. LF_FUNC (v8, luma, 8, mmxext)
  242. static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
  243. {
  244. if((tc0[0] & tc0[1]) >= 0)
  245. ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
  246. if((tc0[2] & tc0[3]) >= 0)
  247. ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
  248. }
  249. LF_IFUNC(v8, luma_intra, 8, mmxext)
  250. static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
  251. {
  252. ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
  253. ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
  254. }
  255. #endif /* ARCH_X86_32 */
  256. LF_FUNC (v, luma, 10, mmxext)
  257. LF_IFUNC(v, luma_intra, 10, mmxext)
  258. /***********************************/
  259. /* weighted prediction */
  260. #define H264_WEIGHT(W, H, OPT) \
  261. void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
  262. int stride, int log2_denom, int weight, int offset);
  263. #define H264_BIWEIGHT(W, H, OPT) \
  264. void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
  265. uint8_t *src, int stride, int log2_denom, int weightd, \
  266. int weights, int offset);
  267. #define H264_BIWEIGHT_MMX(W,H) \
  268. H264_WEIGHT (W, H, mmx2) \
  269. H264_BIWEIGHT(W, H, mmx2)
  270. #define H264_BIWEIGHT_MMX_SSE(W,H) \
  271. H264_BIWEIGHT_MMX(W, H) \
  272. H264_WEIGHT (W, H, sse2) \
  273. H264_BIWEIGHT (W, H, sse2) \
  274. H264_BIWEIGHT (W, H, ssse3)
  275. H264_BIWEIGHT_MMX_SSE(16, 16)
  276. H264_BIWEIGHT_MMX_SSE(16, 8)
  277. H264_BIWEIGHT_MMX_SSE( 8, 16)
  278. H264_BIWEIGHT_MMX_SSE( 8, 8)
  279. H264_BIWEIGHT_MMX_SSE( 8, 4)
  280. H264_BIWEIGHT_MMX ( 4, 8)
  281. H264_BIWEIGHT_MMX ( 4, 4)
  282. H264_BIWEIGHT_MMX ( 4, 2)
  283. void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
  284. {
  285. int mm_flags = av_get_cpu_flags();
  286. if (bit_depth == 8) {
  287. if (mm_flags & AV_CPU_FLAG_MMX2) {
  288. c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
  289. }
  290. #if HAVE_YASM
  291. if (mm_flags & AV_CPU_FLAG_MMX) {
  292. c->h264_idct_dc_add=
  293. c->h264_idct_add= ff_h264_idct_add_mmx;
  294. c->h264_idct8_dc_add=
  295. c->h264_idct8_add= ff_h264_idct8_add_mmx;
  296. c->h264_idct_add16 = ff_h264_idct_add16_mmx;
  297. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
  298. c->h264_idct_add8 = ff_h264_idct_add8_mmx;
  299. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
  300. c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
  301. if (mm_flags & AV_CPU_FLAG_MMX2) {
  302. c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
  303. c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
  304. c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
  305. c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
  306. c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
  307. c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
  308. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
  309. c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
  310. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
  311. c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
  312. #if ARCH_X86_32
  313. c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
  314. c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
  315. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
  316. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
  317. #endif
  318. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
  319. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
  320. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
  321. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
  322. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
  323. c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
  324. c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
  325. c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
  326. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
  327. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
  328. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
  329. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
  330. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
  331. c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
  332. c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
  333. c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
  334. if (mm_flags&AV_CPU_FLAG_SSE2) {
  335. c->h264_idct8_add = ff_h264_idct8_add_sse2;
  336. c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
  337. c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
  338. c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
  339. c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
  340. c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
  341. c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
  342. c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
  343. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
  344. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
  345. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
  346. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
  347. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
  348. #if HAVE_ALIGNED_STACK
  349. c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
  350. c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
  351. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
  352. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
  353. #endif
  354. c->h264_idct_add16 = ff_h264_idct_add16_sse2;
  355. c->h264_idct_add8 = ff_h264_idct_add8_sse2;
  356. c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
  357. }
  358. if (mm_flags&AV_CPU_FLAG_SSSE3) {
  359. c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
  360. c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
  361. c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
  362. c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
  363. c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
  364. }
  365. if (mm_flags&AV_CPU_FLAG_AVX) {
  366. #if HAVE_ALIGNED_STACK
  367. c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
  368. c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
  369. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
  370. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
  371. #endif
  372. }
  373. }
  374. }
  375. #endif
  376. } else if (bit_depth == 10) {
  377. #if HAVE_YASM
  378. if (mm_flags & AV_CPU_FLAG_MMX) {
  379. if (mm_flags & AV_CPU_FLAG_MMX2) {
  380. #if ARCH_X86_32
  381. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
  382. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
  383. c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
  384. c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
  385. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
  386. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
  387. #endif
  388. if (mm_flags&AV_CPU_FLAG_SSE2) {
  389. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
  390. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
  391. #if HAVE_ALIGNED_STACK
  392. c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
  393. c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
  394. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
  395. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
  396. #endif
  397. }
  398. if (mm_flags&AV_CPU_FLAG_AVX) {
  399. c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
  400. c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
  401. #if HAVE_ALIGNED_STACK
  402. c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
  403. c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
  404. c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
  405. c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
  406. #endif
  407. }
  408. }
  409. }
  410. #endif
  411. }
  412. }