The mmxext optimizations should be at least equally fast if available and amd3dnow optimizations are being deprecated. Thus the former should override the latter, not the other way around.tags/n2.2-rc1
@@ -547,12 +547,12 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) | |||||
if (INLINE_MMX(cpu_flags)) | if (INLINE_MMX(cpu_flags)) | ||||
cavsdsp_init_mmx(c, avctx); | cavsdsp_init_mmx(c, avctx); | ||||
#endif /* HAVE_MMX_INLINE */ | #endif /* HAVE_MMX_INLINE */ | ||||
#if HAVE_MMXEXT_INLINE | |||||
if (INLINE_MMXEXT(cpu_flags)) | |||||
cavsdsp_init_mmxext(c, avctx); | |||||
#endif /* HAVE_MMXEXT_INLINE */ | |||||
#if HAVE_AMD3DNOW_INLINE | #if HAVE_AMD3DNOW_INLINE | ||||
if (INLINE_AMD3DNOW(cpu_flags)) | if (INLINE_AMD3DNOW(cpu_flags)) | ||||
cavsdsp_init_3dnow(c, avctx); | cavsdsp_init_3dnow(c, avctx); | ||||
#endif /* HAVE_AMD3DNOW_INLINE */ | #endif /* HAVE_AMD3DNOW_INLINE */ | ||||
#if HAVE_MMXEXT_INLINE | |||||
if (INLINE_MMXEXT(cpu_flags)) | |||||
cavsdsp_init_mmxext(c, avctx); | |||||
#endif /* HAVE_MMXEXT_INLINE */ | |||||
} | } |
@@ -991,6 +991,13 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx) | |||||
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; | c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; | ||||
} | } | ||||
if (INLINE_AMD3DNOW(cpu_flags)) { | |||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { | |||||
c->try_8x8basis = try_8x8basis_3dnow; | |||||
} | |||||
c->add_8x8basis = add_8x8basis_3dnow; | |||||
} | |||||
if (INLINE_MMXEXT(cpu_flags)) { | if (INLINE_MMXEXT(cpu_flags)) { | ||||
if (avctx->bits_per_raw_sample <= 8 && | if (avctx->bits_per_raw_sample <= 8 && | ||||
(dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) | (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) | ||||
@@ -1023,13 +1030,6 @@ av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx) | |||||
c->sum_abs_dctelem = sum_abs_dctelem_ssse3; | c->sum_abs_dctelem = sum_abs_dctelem_ssse3; | ||||
} | } | ||||
#endif | #endif | ||||
if (INLINE_AMD3DNOW(cpu_flags)) { | |||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { | |||||
c->try_8x8basis = try_8x8basis_3dnow; | |||||
} | |||||
c->add_8x8basis = add_8x8basis_3dnow; | |||||
} | |||||
#endif /* HAVE_INLINE_ASM */ | #endif /* HAVE_INLINE_ASM */ | ||||
if (EXTERNAL_MMX(cpu_flags)) { | if (EXTERNAL_MMX(cpu_flags)) { | ||||
@@ -258,12 +258,12 @@ void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags) | |||||
if (INLINE_MMX(cpu_flags)) | if (INLINE_MMX(cpu_flags)) | ||||
hpeldsp_init_mmx(c, flags, cpu_flags); | hpeldsp_init_mmx(c, flags, cpu_flags); | ||||
if (EXTERNAL_MMXEXT(cpu_flags)) | |||||
hpeldsp_init_mmxext(c, flags, cpu_flags); | |||||
if (EXTERNAL_AMD3DNOW(cpu_flags)) | if (EXTERNAL_AMD3DNOW(cpu_flags)) | ||||
hpeldsp_init_3dnow(c, flags, cpu_flags); | hpeldsp_init_3dnow(c, flags, cpu_flags); | ||||
if (EXTERNAL_MMXEXT(cpu_flags)) | |||||
hpeldsp_init_mmxext(c, flags, cpu_flags); | |||||
if (EXTERNAL_SSE2(cpu_flags)) | if (EXTERNAL_SSE2(cpu_flags)) | ||||
hpeldsp_init_sse2(c, flags, cpu_flags); | hpeldsp_init_sse2(c, flags, cpu_flags); | ||||
} | } |