Also disable ff_vp8_idct_dc_add_mmx on x86_64 as the presence of sse2 is guaranteed in such builds. Signed-off-by: James Almer <jamrial@gmail.com>tags/n3.3
@@ -906,6 +906,7 @@ cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height | |||||
%4 [dst2q+strideq+%3], m5 | %4 [dst2q+strideq+%3], m5 | ||||
%endmacro | %endmacro | ||||
%if ARCH_X86_32 | |||||
INIT_MMX mmx | INIT_MMX mmx | ||||
cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride | cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride | ||||
; load data | ; load data | ||||
@@ -929,8 +930,9 @@ cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride | |||||
lea dst2q, [dst1q+strideq*2] | lea dst2q, [dst1q+strideq*2] | ||||
ADD_DC m0, m1, 0, movh | ADD_DC m0, m1, 0, movh | ||||
RET | RET | ||||
%endif | |||||
INIT_XMM sse4 | |||||
%macro VP8_IDCT_DC_ADD 0 | |||||
cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride | cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride | ||||
; load data | ; load data | ||||
movd m0, [blockq] | movd m0, [blockq] | ||||
@@ -956,10 +958,25 @@ cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride | |||||
paddw m4, m0 | paddw m4, m0 | ||||
packuswb m2, m4 | packuswb m2, m4 | ||||
movd [dst1q], m2 | movd [dst1q], m2 | ||||
%if cpuflag(sse4) | |||||
pextrd [dst1q+strideq], m2, 1 | pextrd [dst1q+strideq], m2, 1 | ||||
pextrd [dst2q], m2, 2 | pextrd [dst2q], m2, 2 | ||||
pextrd [dst2q+strideq], m2, 3 | pextrd [dst2q+strideq], m2, 3 | ||||
%else | |||||
psrldq m2, 4 | |||||
movd [dst1q+strideq], m2 | |||||
psrldq m2, 4 | |||||
movd [dst2q], m2 | |||||
psrldq m2, 4 | |||||
movd [dst2q+strideq], m2 | |||||
%endif | |||||
RET | RET | ||||
%endmacro | |||||
INIT_XMM sse2 | |||||
VP8_IDCT_DC_ADD | |||||
INIT_XMM sse4 | |||||
VP8_IDCT_DC_ADD | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], int stride); | ; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], int stride); | ||||
@@ -233,6 +233,8 @@ HVBILIN(ssse3, 8, 16, 16) | |||||
void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16], | void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16], | ||||
ptrdiff_t stride); | ptrdiff_t stride); | ||||
void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16], | |||||
ptrdiff_t stride); | |||||
void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16], | void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16], | ||||
ptrdiff_t stride); | ptrdiff_t stride); | ||||
void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16], | void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16], | ||||
@@ -370,9 +372,9 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c) | |||||
int cpu_flags = av_get_cpu_flags(); | int cpu_flags = av_get_cpu_flags(); | ||||
if (EXTERNAL_MMX(cpu_flags)) { | if (EXTERNAL_MMX(cpu_flags)) { | ||||
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; | |||||
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; | c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; | ||||
#if ARCH_X86_32 | #if ARCH_X86_32 | ||||
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; | |||||
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; | c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; | ||||
c->vp8_idct_add = ff_vp8_idct_add_mmx; | c->vp8_idct_add = ff_vp8_idct_add_mmx; | ||||
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; | c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; | ||||
@@ -427,6 +429,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c) | |||||
} | } | ||||
if (EXTERNAL_SSE2(cpu_flags)) { | if (EXTERNAL_SSE2(cpu_flags)) { | ||||
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2; | |||||
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; | c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; | ||||
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; | c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; | ||||