* commit '7f75f2f2bd692857c1c1ca7f414eb30ece3de93d': ppc: Drop unnecessary ff_ name prefixes from static functions x86: Drop unnecessary ff_ name prefixes from static functions arm: Drop unnecessary ff_ name prefixes from static functions Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n2.0
| @@ -68,8 +68,8 @@ void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset, | |||
| int16_t *block, int stride, | |||
| const uint8_t nnzc[6*8]); | |||
| static av_cold void ff_h264dsp_init_neon(H264DSPContext *c, const int bit_depth, | |||
| const int chroma_format_idc) | |||
| static av_cold void h264dsp_init_neon(H264DSPContext *c, const int bit_depth, | |||
| const int chroma_format_idc) | |||
| { | |||
| #if HAVE_NEON | |||
| if (bit_depth == 8) { | |||
| @@ -107,5 +107,5 @@ av_cold void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth, | |||
| int cpu_flags = av_get_cpu_flags(); | |||
| if (have_neon(cpu_flags)) | |||
| ff_h264dsp_init_neon(c, bit_depth, chroma_format_idc); | |||
| h264dsp_init_neon(c, bit_depth, chroma_format_idc); | |||
| } | |||
| @@ -45,9 +45,9 @@ void ff_pred8x8_0lt_dc_neon(uint8_t *src, ptrdiff_t stride); | |||
| void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride); | |||
| void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride); | |||
| static av_cold void ff_h264_pred_init_neon(H264PredContext *h, int codec_id, | |||
| const int bit_depth, | |||
| const int chroma_format_idc) | |||
| static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, | |||
| const int bit_depth, | |||
| const int chroma_format_idc) | |||
| { | |||
| #if HAVE_NEON | |||
| const int high_depth = bit_depth > 8; | |||
| @@ -88,5 +88,5 @@ av_cold void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, | |||
| int cpu_flags = av_get_cpu_flags(); | |||
| if (have_neon(cpu_flags)) | |||
| ff_h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc); | |||
| h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc); | |||
| } | |||
| @@ -70,7 +70,7 @@ void ff_rv40_v_weak_loop_filter_neon(uint8_t *src, ptrdiff_t stride, int filter_ | |||
| int filter_q1, int alpha, int beta, | |||
| int lim_p0q0, int lim_q1, int lim_p1); | |||
| static av_cold void ff_rv40dsp_init_neon(RV34DSPContext *c) | |||
| static av_cold void rv40dsp_init_neon(RV34DSPContext *c) | |||
| { | |||
| c->put_pixels_tab[0][ 1] = ff_put_rv40_qpel16_mc10_neon; | |||
| c->put_pixels_tab[0][ 3] = ff_put_rv40_qpel16_mc30_neon; | |||
| @@ -144,5 +144,5 @@ av_cold void ff_rv40dsp_init_arm(RV34DSPContext *c) | |||
| int cpu_flags = av_get_cpu_flags(); | |||
| if (have_neon(cpu_flags)) | |||
| ff_rv40dsp_init_neon(c); | |||
| rv40dsp_init_neon(c); | |||
| } | |||
| @@ -38,7 +38,7 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z); | |||
| void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z); | |||
| #if HAVE_GNU_AS && HAVE_ALTIVEC | |||
| static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input) | |||
| static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input) | |||
| { | |||
| int j, k; | |||
| int n = 1 << s->mdct_bits; | |||
| @@ -118,7 +118,7 @@ static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSam | |||
| } while(k >= 0); | |||
| } | |||
| static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input) | |||
| static void imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input) | |||
| { | |||
| int k; | |||
| int n = 1 << s->mdct_bits; | |||
| @@ -128,7 +128,7 @@ static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSam | |||
| vec_u32 *p0 = (vec_u32*)(output+n4); | |||
| vec_u32 *p1 = (vec_u32*)(output+n4*3); | |||
| ff_imdct_half_altivec(s, output+n4, input); | |||
| imdct_half_altivec(s, output + n4, input); | |||
| for (k = 0; k < n16; k++) { | |||
| vec_u32 a = p0[k] ^ sign; | |||
| @@ -144,8 +144,8 @@ av_cold void ff_fft_init_ppc(FFTContext *s) | |||
| #if HAVE_GNU_AS && HAVE_ALTIVEC | |||
| s->fft_calc = ff_fft_calc_interleave_altivec; | |||
| if (s->mdct_bits >= 5) { | |||
| s->imdct_calc = ff_imdct_calc_altivec; | |||
| s->imdct_half = ff_imdct_half_altivec; | |||
| s->imdct_calc = imdct_calc_altivec; | |||
| s->imdct_half = imdct_half_altivec; | |||
| } | |||
| #endif /* HAVE_GNU_AS && HAVE_ALTIVEC */ | |||
| } | |||
| @@ -70,7 +70,7 @@ | |||
| va_u32 = vec_splat((vec_u32)va_u8, 0); \ | |||
| vec_ste(va_u32, element, (uint32_t*)dst); | |||
| static void ff_h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| { | |||
| vec_s16 va0, va1, va2, va3; | |||
| vec_s16 vz0, vz1, vz2, vz3; | |||
| @@ -185,7 +185,8 @@ static void ff_h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| vec_st( hv, 0, dest ); \ | |||
| } | |||
| static void ff_h264_idct8_add_altivec( uint8_t *dst, int16_t *dct, int stride ) { | |||
| static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride) | |||
| { | |||
| vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; | |||
| vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; | |||
| vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; | |||
| @@ -281,47 +282,59 @@ static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| h264_idct_dc_add_internal(dst, block, stride, 4); | |||
| } | |||
| static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride) | |||
| { | |||
| h264_idct_dc_add_internal(dst, block, stride, 8); | |||
| } | |||
| static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){ | |||
| static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, | |||
| int16_t *block, int stride, | |||
| const uint8_t nnzc[15 * 8]) | |||
| { | |||
| int i; | |||
| for(i=0; i<16; i++){ | |||
| int nnz = nnzc[ scan8[i] ]; | |||
| if(nnz){ | |||
| if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| else h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| } | |||
| } | |||
| } | |||
| static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){ | |||
| static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, | |||
| int16_t *block, int stride, | |||
| const uint8_t nnzc[15 * 8]) | |||
| { | |||
| int i; | |||
| for(i=0; i<16; i++){ | |||
| if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| } | |||
| } | |||
| static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){ | |||
| static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, | |||
| int16_t *block, int stride, | |||
| const uint8_t nnzc[15 * 8]) | |||
| { | |||
| int i; | |||
| for(i=0; i<16; i+=4){ | |||
| int nnz = nnzc[ scan8[i] ]; | |||
| if(nnz){ | |||
| if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride); | |||
| if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| else h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride); | |||
| } | |||
| } | |||
| } | |||
| static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){ | |||
| static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, | |||
| int16_t *block, int stride, | |||
| const uint8_t nnzc[15 * 8]) | |||
| { | |||
| int i, j; | |||
| for (j = 1; j < 3; j++) { | |||
| for(i = j * 16; i < j * 16 + 4; i++){ | |||
| if(nnzc[ scan8[i] ]) | |||
| ff_h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |||
| h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |||
| else if(block[i*16]) | |||
| h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); | |||
| } | |||
| @@ -713,12 +726,14 @@ void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height, | |||
| } | |||
| #define H264_WEIGHT(W) \ | |||
| static void ff_weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \ | |||
| int log2_denom, int weight, int offset){ \ | |||
| static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \ | |||
| int log2_denom, int weight, int offset) \ | |||
| { \ | |||
| weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \ | |||
| }\ | |||
| static void ff_biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \ | |||
| int log2_denom, int weightd, int weights, int offset){ \ | |||
| static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \ | |||
| int log2_denom, int weightd, int weights, int offset) \ | |||
| { \ | |||
| biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \ | |||
| } | |||
| @@ -732,22 +747,22 @@ av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, | |||
| #if HAVE_ALTIVEC | |||
| if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) { | |||
| if (bit_depth == 8) { | |||
| c->h264_idct_add = ff_h264_idct_add_altivec; | |||
| c->h264_idct_add = h264_idct_add_altivec; | |||
| if (chroma_format_idc == 1) | |||
| c->h264_idct_add8 = ff_h264_idct_add8_altivec; | |||
| c->h264_idct_add16 = ff_h264_idct_add16_altivec; | |||
| c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec; | |||
| c->h264_idct_add8 = h264_idct_add8_altivec; | |||
| c->h264_idct_add16 = h264_idct_add16_altivec; | |||
| c->h264_idct_add16intra = h264_idct_add16intra_altivec; | |||
| c->h264_idct_dc_add= h264_idct_dc_add_altivec; | |||
| c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec; | |||
| c->h264_idct8_add = ff_h264_idct8_add_altivec; | |||
| c->h264_idct8_add4 = ff_h264_idct8_add4_altivec; | |||
| c->h264_idct8_dc_add = h264_idct8_dc_add_altivec; | |||
| c->h264_idct8_add = h264_idct8_add_altivec; | |||
| c->h264_idct8_add4 = h264_idct8_add4_altivec; | |||
| c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; | |||
| c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; | |||
| c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16_altivec; | |||
| c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels8_altivec; | |||
| c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16_altivec; | |||
| c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels8_altivec; | |||
| c->weight_h264_pixels_tab[0] = weight_h264_pixels16_altivec; | |||
| c->weight_h264_pixels_tab[1] = weight_h264_pixels8_altivec; | |||
| c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec; | |||
| c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec; | |||
| } | |||
| } | |||
| #endif /* HAVE_ALTIVEC */ | |||
| @@ -413,22 +413,22 @@ static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstS | |||
| }\ | |||
| #define CAVS_MC(OPNAME, SIZE, MMX) \ | |||
| static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| {\ | |||
| OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\ | |||
| }\ | |||
| \ | |||
| static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| {\ | |||
| OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\ | |||
| }\ | |||
| \ | |||
| static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| {\ | |||
| OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\ | |||
| }\ | |||
| \ | |||
| static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\ | |||
| {\ | |||
| OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\ | |||
| }\ | |||
| @@ -459,11 +459,11 @@ static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c, | |||
| } | |||
| #endif /* HAVE_MMX_INLINE */ | |||
| #define DSPFUNC(PFX, IDX, NUM, EXT) \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = ff_ ## PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \ | |||
| #define DSPFUNC(PFX, IDX, NUM, EXT) \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \ | |||
| c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \ | |||
| #if HAVE_MMXEXT_INLINE | |||
| QPEL_CAVS(put_, PUT_OP, mmxext) | |||
| @@ -474,8 +474,8 @@ CAVS_MC(put_, 16, mmxext) | |||
| CAVS_MC(avg_, 8, mmxext) | |||
| CAVS_MC(avg_, 16, mmxext) | |||
| static av_cold void ff_cavsdsp_init_mmxext(CAVSDSPContext *c, | |||
| AVCodecContext *avctx) | |||
| static av_cold void cavsdsp_init_mmxext(CAVSDSPContext *c, | |||
| AVCodecContext *avctx) | |||
| { | |||
| DSPFUNC(put, 0, 16, mmxext); | |||
| DSPFUNC(put, 1, 8, mmxext); | |||
| @@ -493,8 +493,8 @@ CAVS_MC(put_, 16,3dnow) | |||
| CAVS_MC(avg_, 8, 3dnow) | |||
| CAVS_MC(avg_, 16,3dnow) | |||
| static av_cold void ff_cavsdsp_init_3dnow(CAVSDSPContext *c, | |||
| AVCodecContext *avctx) | |||
| static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c, | |||
| AVCodecContext *avctx) | |||
| { | |||
| DSPFUNC(put, 0, 16, 3dnow); | |||
| DSPFUNC(put, 1, 8, 3dnow); | |||
| @@ -512,9 +512,11 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) | |||
| cavsdsp_init_mmx(c, avctx); | |||
| #endif /* HAVE_MMX_INLINE */ | |||
| #if HAVE_MMXEXT_INLINE | |||
| if (mm_flags & AV_CPU_FLAG_MMXEXT) ff_cavsdsp_init_mmxext(c, avctx); | |||
| if (mm_flags & AV_CPU_FLAG_MMXEXT) | |||
| cavsdsp_init_mmxext(c, avctx); | |||
| #endif /* HAVE_MMXEXT_INLINE */ | |||
| #if HAVE_AMD3DNOW_INLINE | |||
| if (mm_flags & AV_CPU_FLAG_3DNOW) ff_cavsdsp_init_3dnow(c, avctx); | |||
| if (mm_flags & AV_CPU_FLAG_3DNOW) | |||
| cavsdsp_init_3dnow(c, avctx); | |||
| #endif /* HAVE_AMD3DNOW_INLINE */ | |||
| } | |||
| @@ -206,7 +206,12 @@ static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint | |||
| ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |||
| } | |||
| static av_always_inline void ff_put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){ | |||
| static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, | |||
| uint8_t *src, | |||
| int tmpStride, | |||
| int srcStride, | |||
| int size) | |||
| { | |||
| int w = (size+8)>>3; | |||
| src -= 2*srcStride+2; | |||
| while(w--){ | |||
| @@ -218,7 +223,7 @@ static av_always_inline void ff_put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp | |||
| #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\ | |||
| static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ | |||
| ff_put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ | |||
| put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ | |||
| ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ | |||
| }\ | |||
| static av_always_inline void ff_ ## OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |||
| @@ -132,8 +132,8 @@ LF_FUNCS(uint16_t, 10) | |||
| #if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL | |||
| LF_FUNC(v8, luma, 8, mmxext) | |||
| static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, | |||
| int beta, int8_t *tc0) | |||
| static void deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, | |||
| int beta, int8_t *tc0) | |||
| { | |||
| if ((tc0[0] & tc0[1]) >= 0) | |||
| ff_deblock_v8_luma_8_mmxext(pix + 0, stride, alpha, beta, tc0); | |||
| @@ -141,8 +141,8 @@ static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, | |||
| ff_deblock_v8_luma_8_mmxext(pix + 8, stride, alpha, beta, tc0 + 2); | |||
| } | |||
| LF_IFUNC(v8, luma_intra, 8, mmxext) | |||
| static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, | |||
| int alpha, int beta) | |||
| static void deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, | |||
| int alpha, int beta) | |||
| { | |||
| ff_deblock_v8_luma_intra_8_mmxext(pix + 0, stride, alpha, beta); | |||
| ff_deblock_v8_luma_intra_8_mmxext(pix + 8, stride, alpha, beta); | |||
| @@ -248,9 +248,9 @@ av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, | |||
| c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext; | |||
| } | |||
| #if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL | |||
| c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_mmxext; | |||
| c->h264_v_loop_filter_luma = deblock_v_luma_8_mmxext; | |||
| c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmxext; | |||
| c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext; | |||
| c->h264_v_loop_filter_luma_intra = deblock_v_luma_intra_8_mmxext; | |||
| c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext; | |||
| #endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */ | |||
| c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmxext; | |||
| @@ -25,52 +25,52 @@ | |||
| */ | |||
| //FIXME the following could be optimized too ... | |||
| static void DEF(ff_put_no_rnd_pixels16_x2)(uint8_t *block, | |||
| const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, | |||
| const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_put_no_rnd_pixels8_x2)(block, pixels, line_size, h); | |||
| DEF(ff_put_no_rnd_pixels8_x2)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_put_pixels8_y2)(block, pixels, line_size, h); | |||
| DEF(ff_put_pixels8_y2)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_put_no_rnd_pixels16_y2)(uint8_t *block, | |||
| const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, | |||
| const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_put_no_rnd_pixels8_y2)(block, pixels, line_size, h); | |||
| DEF(ff_put_no_rnd_pixels8_y2)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_avg_pixels16)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_avg_pixels8)(block, pixels, line_size, h); | |||
| DEF(ff_avg_pixels8)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_avg_pixels8_x2)(block, pixels, line_size, h); | |||
| DEF(ff_avg_pixels8_x2)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_avg_pixels8_y2)(block, pixels, line_size, h); | |||
| DEF(ff_avg_pixels8_y2)(block + 8, pixels + 8, line_size, h); | |||
| } | |||
| static void DEF(ff_avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, | |||
| ptrdiff_t line_size, int h) | |||
| { | |||
| DEF(ff_avg_pixels8_xy2)(block, pixels, line_size, h); | |||
| DEF(ff_avg_pixels8_xy2)(block + 8, pixels + 8, line_size, h); | |||
| @@ -226,11 +226,11 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags) | |||
| { | |||
| #if HAVE_YASM | |||
| c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext; | |||
| c->put_pixels_tab[0][2] = ff_put_pixels16_y2_mmxext; | |||
| c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext; | |||
| c->avg_pixels_tab[0][0] = ff_avg_pixels16_mmxext; | |||
| c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_mmxext; | |||
| c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_mmxext; | |||
| c->avg_pixels_tab[0][0] = avg_pixels16_mmxext; | |||
| c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext; | |||
| c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext; | |||
| c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext; | |||
| c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext; | |||
| @@ -240,12 +240,12 @@ static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int mm_flags) | |||
| c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext; | |||
| if (!(flags & CODEC_FLAG_BITEXACT)) { | |||
| c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_mmxext; | |||
| c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_mmxext; | |||
| c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext; | |||
| c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext; | |||
| c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext; | |||
| c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext; | |||
| c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_mmxext; | |||
| c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext; | |||
| c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext; | |||
| } | |||
| @@ -260,11 +260,11 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags) | |||
| { | |||
| #if HAVE_YASM | |||
| c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow; | |||
| c->put_pixels_tab[0][2] = ff_put_pixels16_y2_3dnow; | |||
| c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; | |||
| c->avg_pixels_tab[0][0] = ff_avg_pixels16_3dnow; | |||
| c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_3dnow; | |||
| c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_3dnow; | |||
| c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; | |||
| c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; | |||
| c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; | |||
| c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow; | |||
| c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow; | |||
| @@ -274,12 +274,12 @@ static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int mm_flags) | |||
| c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow; | |||
| if (!(flags & CODEC_FLAG_BITEXACT)){ | |||
| c->put_no_rnd_pixels_tab[0][1] = ff_put_no_rnd_pixels16_x2_3dnow; | |||
| c->put_no_rnd_pixels_tab[0][2] = ff_put_no_rnd_pixels16_y2_3dnow; | |||
| c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |||
| c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |||
| c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow; | |||
| c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow; | |||
| c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_3dnow; | |||
| c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |||
| c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow; | |||
| } | |||
| @@ -620,18 +620,18 @@ av_cold void ff_yuv2rgb_init_tables_altivec(SwsContext *c, | |||
| return; | |||
| } | |||
| static av_always_inline void ff_yuv2packedX_altivec(SwsContext *c, | |||
| const int16_t *lumFilter, | |||
| const int16_t **lumSrc, | |||
| int lumFilterSize, | |||
| const int16_t *chrFilter, | |||
| const int16_t **chrUSrc, | |||
| const int16_t **chrVSrc, | |||
| int chrFilterSize, | |||
| const int16_t **alpSrc, | |||
| uint8_t *dest, | |||
| int dstW, int dstY, | |||
| enum AVPixelFormat target) | |||
| static av_always_inline void yuv2packedX_altivec(SwsContext *c, | |||
| const int16_t *lumFilter, | |||
| const int16_t **lumSrc, | |||
| int lumFilterSize, | |||
| const int16_t *chrFilter, | |||
| const int16_t **chrUSrc, | |||
| const int16_t **chrVSrc, | |||
| int chrFilterSize, | |||
| const int16_t **alpSrc, | |||
| uint8_t *dest, | |||
| int dstW, int dstY, | |||
| enum AVPixelFormat target) | |||
| { | |||
| int i, j; | |||
| vector signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1, U, V; | |||
| @@ -840,10 +840,10 @@ void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \ | |||
| const int16_t **alpSrc, \ | |||
| uint8_t *dest, int dstW, int dstY) \ | |||
| { \ | |||
| ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ | |||
| chrFilter, chrUSrc, chrVSrc, \ | |||
| chrFilterSize, alpSrc, \ | |||
| dest, dstW, dstY, pixfmt); \ | |||
| yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ | |||
| chrFilter, chrUSrc, chrVSrc, \ | |||
| chrFilterSize, alpSrc, \ | |||
| dest, dstW, dstY, pixfmt); \ | |||
| } | |||
| YUV2PACKEDX_WRAPPER(abgr, AV_PIX_FMT_ABGR); | |||