Further performance improvements and security fixes by Vittorio Giovara, Luca Barbato and Diego Biurrun. Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com> Signed-off-by: Luca Barbato <lu_zero@gentoo.org> Signed-off-by: Diego Biurrun <diego@biurrun.de>tags/n2.3
| @@ -13,6 +13,7 @@ version <next>: | |||||
| - OpenEXR image decoder | - OpenEXR image decoder | ||||
| - support encoding and decoding 4-channel SGI images | - support encoding and decoding 4-channel SGI images | ||||
| - support decoding 16-bit RLE SGI images | - support decoding 16-bit RLE SGI images | ||||
| - VP7 video decoder | |||||
| version 10: | version 10: | ||||
| @@ -1847,6 +1847,7 @@ vp5_decoder_select="h264chroma hpeldsp videodsp vp3dsp" | |||||
| vp6_decoder_select="h264chroma hpeldsp huffman videodsp vp3dsp" | vp6_decoder_select="h264chroma hpeldsp huffman videodsp vp3dsp" | ||||
| vp6a_decoder_select="vp6_decoder" | vp6a_decoder_select="vp6_decoder" | ||||
| vp6f_decoder_select="vp6_decoder" | vp6f_decoder_select="vp6_decoder" | ||||
| vp7_decoder_select="h264pred videodsp" | |||||
| vp8_decoder_select="h264pred videodsp" | vp8_decoder_select="h264pred videodsp" | ||||
| vp9_decoder_select="videodsp" | vp9_decoder_select="videodsp" | ||||
| webp_decoder_select="vp8_decoder" | webp_decoder_select="vp8_decoder" | ||||
| @@ -650,6 +650,8 @@ following image formats are supported: | |||||
| @tab fourcc: VP50 | @tab fourcc: VP50 | ||||
| @item On2 VP6 @tab @tab X | @item On2 VP6 @tab @tab X | ||||
| @tab fourcc: VP60,VP61,VP62 | @tab fourcc: VP60,VP61,VP62 | ||||
| @item On2 VP7 @tab @tab X | |||||
| @tab fourcc: VP70,VP71 | |||||
| @item VP8 @tab E @tab X | @item VP8 @tab E @tab X | ||||
| @tab fourcc: VP80, encoding supported through external library libvpx | @tab fourcc: VP80, encoding supported through external library libvpx | ||||
| @item VP9 @tab E @tab X | @item VP9 @tab E @tab X | ||||
| @@ -386,6 +386,7 @@ OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \ | |||||
| vp56rac.o | vp56rac.o | ||||
| OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \ | OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \ | ||||
| vp6dsp.o vp56rac.o | vp6dsp.o vp56rac.o | ||||
| OBJS-$(CONFIG_VP7_DECODER) += vp8.o vp8dsp.o vp56rac.o | |||||
| OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o | OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o | ||||
| OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9data.o vp9dsp.o \ | OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9data.o vp9dsp.o \ | ||||
| vp9block.o vp9prob.o vp9mvs.o vp56rac.o | vp9block.o vp9prob.o vp9mvs.o vp56rac.o | ||||
| @@ -261,6 +261,7 @@ void avcodec_register_all(void) | |||||
| REGISTER_DECODER(VP6, vp6); | REGISTER_DECODER(VP6, vp6); | ||||
| REGISTER_DECODER(VP6A, vp6a); | REGISTER_DECODER(VP6A, vp6a); | ||||
| REGISTER_DECODER(VP6F, vp6f); | REGISTER_DECODER(VP6F, vp6f); | ||||
| REGISTER_DECODER(VP7, vp7); | |||||
| REGISTER_DECODER(VP8, vp8); | REGISTER_DECODER(VP8, vp8); | ||||
| REGISTER_DECODER(VP9, vp9); | REGISTER_DECODER(VP9, vp9); | ||||
| REGISTER_DECODER(VQA, vqa); | REGISTER_DECODER(VQA, vqa); | ||||
| @@ -31,6 +31,7 @@ OBJS-$(CONFIG_MLP_DECODER) += arm/mlpdsp_init_arm.o | |||||
| OBJS-$(CONFIG_VC1_DECODER) += arm/vc1dsp_init_arm.o | OBJS-$(CONFIG_VC1_DECODER) += arm/vc1dsp_init_arm.o | ||||
| OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_init_arm.o | OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_init_arm.o | ||||
| OBJS-$(CONFIG_VP6_DECODER) += arm/vp6dsp_init_arm.o | OBJS-$(CONFIG_VP6_DECODER) += arm/vp6dsp_init_arm.o | ||||
| OBJS-$(CONFIG_VP7_DECODER) += arm/vp8dsp_init_arm.o | |||||
| OBJS-$(CONFIG_VP8_DECODER) += arm/vp8dsp_init_arm.o | OBJS-$(CONFIG_VP8_DECODER) += arm/vp8dsp_init_arm.o | ||||
| OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_init_arm.o | OBJS-$(CONFIG_RV30_DECODER) += arm/rv34dsp_init_arm.o | ||||
| OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_init_arm.o \ | OBJS-$(CONFIG_RV40_DECODER) += arm/rv34dsp_init_arm.o \ | ||||
| @@ -55,6 +56,9 @@ ARMV6-OBJS-$(CONFIG_HPELDSP) += arm/hpeldsp_init_armv6.o \ | |||||
| ARMV6-OBJS-$(CONFIG_MPEGAUDIODSP) += arm/mpegaudiodsp_fixed_armv6.o | ARMV6-OBJS-$(CONFIG_MPEGAUDIODSP) += arm/mpegaudiodsp_fixed_armv6.o | ||||
| ARMV6-OBJS-$(CONFIG_MLP_DECODER) += arm/mlpdsp_armv6.o | ARMV6-OBJS-$(CONFIG_MLP_DECODER) += arm/mlpdsp_armv6.o | ||||
| ARMV6-OBJS-$(CONFIG_VP7_DECODER) += arm/vp8_armv6.o \ | |||||
| arm/vp8dsp_init_armv6.o \ | |||||
| arm/vp8dsp_armv6.o | |||||
| ARMV6-OBJS-$(CONFIG_VP8_DECODER) += arm/vp8_armv6.o \ | ARMV6-OBJS-$(CONFIG_VP8_DECODER) += arm/vp8_armv6.o \ | ||||
| arm/vp8dsp_init_armv6.o \ | arm/vp8dsp_init_armv6.o \ | ||||
| arm/vp8dsp_armv6.o | arm/vp8dsp_armv6.o | ||||
| @@ -102,5 +106,7 @@ NEON-OBJS-$(CONFIG_VC1_DECODER) += arm/vc1dsp_init_neon.o \ | |||||
| arm/vc1dsp_neon.o | arm/vc1dsp_neon.o | ||||
| NEON-OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_neon.o | NEON-OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_neon.o | ||||
| NEON-OBJS-$(CONFIG_VP6_DECODER) += arm/vp6dsp_neon.o | NEON-OBJS-$(CONFIG_VP6_DECODER) += arm/vp6dsp_neon.o | ||||
| NEON-OBJS-$(CONFIG_VP7_DECODER) += arm/vp8dsp_init_neon.o \ | |||||
| arm/vp8dsp_neon.o | |||||
| NEON-OBJS-$(CONFIG_VP8_DECODER) += arm/vp8dsp_init_neon.o \ | NEON-OBJS-$(CONFIG_VP8_DECODER) += arm/vp8dsp_init_neon.o \ | ||||
| arm/vp8dsp_neon.o | arm/vp8dsp_neon.o | ||||
| @@ -56,10 +56,11 @@ static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, | |||||
| h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon; | h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon; | ||||
| h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon; | h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon; | ||||
| if (codec_id != AV_CODEC_ID_VP8) | |||||
| if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) | |||||
| h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon; | h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon; | ||||
| h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon; | h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon; | ||||
| if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8) { | |||||
| if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && | |||||
| codec_id != AV_CODEC_ID_VP8) { | |||||
| h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon; | h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon; | ||||
| h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon; | h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon; | ||||
| h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon; | h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon; | ||||
| @@ -75,7 +76,8 @@ static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, | |||||
| h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon; | h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon; | ||||
| h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon; | h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon; | ||||
| h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon; | h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon; | ||||
| if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8) | |||||
| if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 && | |||||
| codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) | |||||
| h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon; | h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon; | ||||
| } | } | ||||
| @@ -21,6 +21,8 @@ | |||||
| #include "libavcodec/vp8dsp.h" | #include "libavcodec/vp8dsp.h" | ||||
| void ff_vp78dsp_init_armv6(VP8DSPContext *dsp); | |||||
| void ff_vp78dsp_init_neon(VP8DSPContext *dsp); | |||||
| void ff_vp8dsp_init_armv6(VP8DSPContext *dsp); | void ff_vp8dsp_init_armv6(VP8DSPContext *dsp); | ||||
| void ff_vp8dsp_init_neon(VP8DSPContext *dsp); | void ff_vp8dsp_init_neon(VP8DSPContext *dsp); | ||||
| @@ -23,6 +23,16 @@ | |||||
| #include "libavcodec/vp8dsp.h" | #include "libavcodec/vp8dsp.h" | ||||
| #include "vp8dsp.h" | #include "vp8dsp.h" | ||||
| av_cold void ff_vp78dsp_init_arm(VP8DSPContext *dsp) | |||||
| { | |||||
| int cpu_flags = av_get_cpu_flags(); | |||||
| if (have_armv6(cpu_flags)) | |||||
| ff_vp78dsp_init_armv6(dsp); | |||||
| if (have_neon(cpu_flags)) | |||||
| ff_vp78dsp_init_neon(dsp); | |||||
| } | |||||
| av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp) | av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp) | ||||
| { | { | ||||
| int cpu_flags = av_get_cpu_flags(); | int cpu_flags = av_get_cpu_flags(); | ||||
| @@ -40,29 +40,8 @@ VP8_BILIN(16, armv6); | |||||
| VP8_BILIN(8, armv6); | VP8_BILIN(8, armv6); | ||||
| VP8_BILIN(4, armv6); | VP8_BILIN(4, armv6); | ||||
| av_cold void ff_vp8dsp_init_armv6(VP8DSPContext *dsp) | |||||
| av_cold void ff_vp78dsp_init_armv6(VP8DSPContext *dsp) | |||||
| { | { | ||||
| dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_armv6; | |||||
| dsp->vp8_luma_dc_wht_dc = ff_vp8_luma_dc_wht_dc_armv6; | |||||
| dsp->vp8_idct_add = ff_vp8_idct_add_armv6; | |||||
| dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_armv6; | |||||
| dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_armv6; | |||||
| dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_armv6; | |||||
| dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_armv6; | |||||
| dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_armv6; | |||||
| dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_armv6; | |||||
| dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_armv6; | |||||
| dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_armv6; | |||||
| dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_armv6; | |||||
| dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_armv6; | |||||
| dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_armv6; | |||||
| dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_armv6; | |||||
| dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_armv6; | |||||
| dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_armv6; | dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_armv6; | ||||
| dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_armv6; | dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_armv6; | ||||
| dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_armv6; | dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_armv6; | ||||
| @@ -118,3 +97,27 @@ av_cold void ff_vp8dsp_init_armv6(VP8DSPContext *dsp) | |||||
| dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_armv6; | dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_armv6; | ||||
| dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_armv6; | dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_armv6; | ||||
| } | } | ||||
| av_cold void ff_vp8dsp_init_armv6(VP8DSPContext *dsp) | |||||
| { | |||||
| dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_armv6; | |||||
| dsp->vp8_luma_dc_wht_dc = ff_vp8_luma_dc_wht_dc_armv6; | |||||
| dsp->vp8_idct_add = ff_vp8_idct_add_armv6; | |||||
| dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_armv6; | |||||
| dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_armv6; | |||||
| dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_armv6; | |||||
| dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_armv6; | |||||
| dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_armv6; | |||||
| dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_armv6; | |||||
| dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_armv6; | |||||
| dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_armv6; | |||||
| dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_armv6; | |||||
| dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_armv6; | |||||
| dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_armv6; | |||||
| dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_armv6; | |||||
| dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_armv6; | |||||
| } | |||||
| @@ -39,28 +39,8 @@ VP8_BILIN(16, neon); | |||||
| VP8_BILIN(8, neon); | VP8_BILIN(8, neon); | ||||
| VP8_BILIN(4, neon); | VP8_BILIN(4, neon); | ||||
| av_cold void ff_vp8dsp_init_neon(VP8DSPContext *dsp) | |||||
| av_cold void ff_vp78dsp_init_neon(VP8DSPContext *dsp) | |||||
| { | { | ||||
| dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon; | |||||
| dsp->vp8_idct_add = ff_vp8_idct_add_neon; | |||||
| dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon; | |||||
| dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon; | |||||
| dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon; | |||||
| dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon; | |||||
| dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon; | |||||
| dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon; | |||||
| dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon; | |||||
| dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon; | |||||
| dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon; | |||||
| dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon; | |||||
| dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon; | |||||
| dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon; | |||||
| dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon; | |||||
| dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; | dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; | ||||
| dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon; | dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon; | ||||
| dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon; | dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon; | ||||
| @@ -114,3 +94,26 @@ av_cold void ff_vp8dsp_init_neon(VP8DSPContext *dsp) | |||||
| dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon; | dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon; | ||||
| dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon; | dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon; | ||||
| } | } | ||||
| av_cold void ff_vp8dsp_init_neon(VP8DSPContext *dsp) | |||||
| { | |||||
| dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon; | |||||
| dsp->vp8_idct_add = ff_vp8_idct_add_neon; | |||||
| dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon; | |||||
| dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon; | |||||
| dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon; | |||||
| dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon; | |||||
| dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon; | |||||
| dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon; | |||||
| dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon; | |||||
| dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon; | |||||
| dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon; | |||||
| dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon; | |||||
| dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon; | |||||
| dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon; | |||||
| dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon; | |||||
| } | |||||
| @@ -288,6 +288,7 @@ enum AVCodecID { | |||||
| AV_CODEC_ID_BRENDER_PIX, | AV_CODEC_ID_BRENDER_PIX, | ||||
| AV_CODEC_ID_PAF_VIDEO, | AV_CODEC_ID_PAF_VIDEO, | ||||
| AV_CODEC_ID_EXR, | AV_CODEC_ID_EXR, | ||||
| AV_CODEC_ID_VP7, | |||||
| /* various PCM "codecs" */ | /* various PCM "codecs" */ | ||||
| AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs | AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs | ||||
| @@ -1099,6 +1099,13 @@ static const AVCodecDescriptor codec_descriptors[] = { | |||||
| .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"), | .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"), | ||||
| .props = AV_CODEC_PROP_LOSSY, | .props = AV_CODEC_PROP_LOSSY, | ||||
| }, | }, | ||||
| { | |||||
| .id = AV_CODEC_ID_VP7, | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .name = "vp7", | |||||
| .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"), | |||||
| .props = AV_CODEC_PROP_LOSSY, | |||||
| }, | |||||
| /* image codecs */ | /* image codecs */ | ||||
| { | { | ||||
| @@ -410,7 +410,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| #define H264_PRED(depth) \ | #define H264_PRED(depth) \ | ||||
| if(codec_id != AV_CODEC_ID_RV40){\ | if(codec_id != AV_CODEC_ID_RV40){\ | ||||
| if(codec_id == AV_CODEC_ID_VP8) {\ | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\ | |||||
| h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\ | h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\ | ||||
| h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\ | h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\ | ||||
| } else {\ | } else {\ | ||||
| @@ -425,15 +425,14 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\ | h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\ | ||||
| h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\ | h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\ | ||||
| h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\ | h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\ | ||||
| if (codec_id == AV_CODEC_ID_VP8) {\ | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\ | |||||
| h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\ | h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\ | ||||
| } else\ | } else\ | ||||
| h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\ | h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\ | ||||
| h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\ | h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\ | ||||
| if(codec_id != AV_CODEC_ID_VP8) {\ | |||||
| if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\ | |||||
| h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\ | h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\ | ||||
| h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\ | h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\ | ||||
| h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\ | |||||
| } else {\ | } else {\ | ||||
| h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\ | h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\ | ||||
| h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\ | h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\ | ||||
| @@ -441,6 +440,8 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\ | h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\ | ||||
| h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\ | h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\ | ||||
| }\ | }\ | ||||
| if (codec_id != AV_CODEC_ID_VP8)\ | |||||
| h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\ | |||||
| }else{\ | }else{\ | ||||
| h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\ | h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\ | ||||
| h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\ | h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\ | ||||
| @@ -479,7 +480,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\ | h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\ | ||||
| h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\ | h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\ | ||||
| }\ | }\ | ||||
| if (codec_id != AV_CODEC_ID_VP8) {\ | |||||
| if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\ | |||||
| if (chroma_format_idc <= 1) {\ | if (chroma_format_idc <= 1) {\ | ||||
| h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\ | h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\ | ||||
| } else {\ | } else {\ | ||||
| @@ -487,7 +488,8 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| }\ | }\ | ||||
| } else\ | } else\ | ||||
| h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\ | h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\ | ||||
| if(codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8){\ | |||||
| if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && \ | |||||
| codec_id != AV_CODEC_ID_VP8) {\ | |||||
| if (chroma_format_idc <= 1) {\ | if (chroma_format_idc <= 1) {\ | ||||
| h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\ | h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\ | ||||
| h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\ | h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\ | ||||
| @@ -509,7 +511,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\ | h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\ | ||||
| h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\ | h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\ | ||||
| h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\ | h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\ | ||||
| if (codec_id == AV_CODEC_ID_VP8) {\ | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\ | |||||
| h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\ | h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\ | ||||
| h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\ | h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\ | ||||
| }\ | }\ | ||||
| @@ -530,6 +532,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, | |||||
| case AV_CODEC_ID_RV40:\ | case AV_CODEC_ID_RV40:\ | ||||
| h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\ | h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\ | ||||
| break;\ | break;\ | ||||
| case AV_CODEC_ID_VP7:\ | |||||
| case AV_CODEC_ID_VP8:\ | case AV_CODEC_ID_VP8:\ | ||||
| h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\ | h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\ | ||||
| h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\ | h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\ | ||||
| @@ -60,7 +60,7 @@ | |||||
| #define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of | #define VERT_VP8_PRED 10 ///< for VP8, #VERT_PRED is the average of | ||||
| ///< (left col+cur col x2+right col) / 4; | ///< (left col+cur col x2+right col) / 4; | ||||
| ///< this is the "unaveraged" one | ///< this is the "unaveraged" one | ||||
| #define HOR_VP8_PRED 11 ///< unaveraged version of #HOR_PRED, see | |||||
| #define HOR_VP8_PRED 14 ///< unaveraged version of #HOR_PRED, see | |||||
| ///< #VERT_VP8_PRED for details | ///< #VERT_VP8_PRED for details | ||||
| #define DC_127_PRED 12 | #define DC_127_PRED 12 | ||||
| #define DC_129_PRED 13 | #define DC_129_PRED 13 | ||||
| @@ -13,6 +13,7 @@ OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o | |||||
| OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o | OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o | ||||
| OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o | OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o | ||||
| OBJS-$(CONFIG_VP7_DECODER) += ppc/vp8dsp_altivec.o | |||||
| OBJS-$(CONFIG_VP8_DECODER) += ppc/vp8dsp_altivec.o | OBJS-$(CONFIG_VP8_DECODER) += ppc/vp8dsp_altivec.o | ||||
| ALTIVEC-OBJS-$(CONFIG_DSPUTIL) += ppc/dsputil_altivec.o \ | ALTIVEC-OBJS-$(CONFIG_DSPUTIL) += ppc/dsputil_altivec.o \ | ||||
| @@ -311,7 +311,8 @@ static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *s | |||||
| #endif /* HAVE_ALTIVEC */ | #endif /* HAVE_ALTIVEC */ | ||||
| av_cold void ff_vp8dsp_init_ppc(VP8DSPContext *c) | |||||
| av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c) | |||||
| { | { | ||||
| #if HAVE_ALTIVEC | #if HAVE_ALTIVEC | ||||
| if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) | if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) | ||||
| @@ -29,7 +29,7 @@ | |||||
| #include "libavutil/version.h" | #include "libavutil/version.h" | ||||
| #define LIBAVCODEC_VERSION_MAJOR 55 | #define LIBAVCODEC_VERSION_MAJOR 55 | ||||
| #define LIBAVCODEC_VERSION_MINOR 42 | |||||
| #define LIBAVCODEC_VERSION_MINOR 43 | |||||
| #define LIBAVCODEC_VERSION_MICRO 0 | #define LIBAVCODEC_VERSION_MICRO 0 | ||||
| #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ | #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ | ||||
| @@ -237,6 +237,7 @@ typedef struct VP8Context { | |||||
| uint8_t pred8x8c[3]; | uint8_t pred8x8c[3]; | ||||
| uint8_t token[4][16][3][NUM_DCT_TOKENS - 1]; | uint8_t token[4][16][3][NUM_DCT_TOKENS - 1]; | ||||
| uint8_t mvc[2][19]; | uint8_t mvc[2][19]; | ||||
| uint8_t scan[16]; | |||||
| } prob[2]; | } prob[2]; | ||||
| VP8Macroblock *macroblocks_base; | VP8Macroblock *macroblocks_base; | ||||
| @@ -270,6 +271,26 @@ typedef struct VP8Context { | |||||
| * 1 -> Macroblocks for entire frame alloced (sliced thread). | * 1 -> Macroblocks for entire frame alloced (sliced thread). | ||||
| */ | */ | ||||
| int mb_layout; | int mb_layout; | ||||
| /** | |||||
| * Fade bit present in bitstream (VP7) | |||||
| */ | |||||
| int fade_present; | |||||
| /** | |||||
| * Interframe DC prediction (VP7) | |||||
| * [0] VP56_FRAME_PREVIOUS | |||||
| * [1] VP56_FRAME_GOLDEN | |||||
| */ | |||||
| uint16_t inter_dc_pred[2][2]; | |||||
| /** | |||||
| * Macroblock features (VP7) | |||||
| */ | |||||
| uint8_t feature_enabled[4]; | |||||
| uint8_t feature_present_prob[4]; | |||||
| uint8_t feature_index_prob[4][3]; | |||||
| uint8_t feature_value[4][4]; | |||||
| } VP8Context; | } VP8Context; | ||||
| int ff_vp8_decode_init(AVCodecContext *avctx); | int ff_vp8_decode_init(AVCodecContext *avctx); | ||||
| @@ -30,6 +30,13 @@ | |||||
| #include "vp8.h" | #include "vp8.h" | ||||
| #include "h264pred.h" | #include "h264pred.h" | ||||
| static const uint8_t vp7_pred4x4_mode[] = { | |||||
| [DC_PRED8x8] = DC_PRED, | |||||
| [VERT_PRED8x8] = TM_VP8_PRED, | |||||
| [HOR_PRED8x8] = TM_VP8_PRED, | |||||
| [PLANE_PRED8x8] = TM_VP8_PRED, | |||||
| }; | |||||
| static const uint8_t vp8_pred4x4_mode[] = { | static const uint8_t vp8_pred4x4_mode[] = { | ||||
| [DC_PRED8x8] = DC_PRED, | [DC_PRED8x8] = DC_PRED, | ||||
| [VERT_PRED8x8] = VERT_PRED, | [VERT_PRED8x8] = VERT_PRED, | ||||
| @@ -51,6 +58,63 @@ static const int8_t vp8_pred16x16_tree_inter[4][2] = { | |||||
| { -PLANE_PRED8x8, -MODE_I4x4 }, // '110', '111' | { -PLANE_PRED8x8, -MODE_I4x4 }, // '110', '111' | ||||
| }; | }; | ||||
| typedef struct VP7MVPred { | |||||
| int8_t yoffset; | |||||
| int8_t xoffset; | |||||
| uint8_t subblock; | |||||
| uint8_t score; | |||||
| } VP7MVPred; | |||||
| #define VP7_MV_PRED_COUNT 12 | |||||
| static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT] = { | |||||
| { -1, 0, 12, 8 }, | |||||
| { 0, -1, 3, 8 }, | |||||
| { -1, -1, 15, 2 }, | |||||
| { -1, 1, 12, 2 }, | |||||
| { -2, 0, 12, 2 }, | |||||
| { 0, -2, 3, 2 }, | |||||
| { -1, -2, 15, 1 }, | |||||
| { -2, -1, 15, 1 }, | |||||
| { -2, 1, 12, 1 }, | |||||
| { -1, 2, 12, 1 }, | |||||
| { -2, -2, 15, 1 }, | |||||
| { -2, 2, 12, 1 }, | |||||
| }; | |||||
| static const int vp7_mode_contexts[31][4] = { | |||||
| { 3, 3, 1, 246 }, | |||||
| { 7, 89, 66, 239 }, | |||||
| { 10, 90, 78, 238 }, | |||||
| { 14, 118, 95, 241 }, | |||||
| { 14, 123, 106, 238 }, | |||||
| { 20, 140, 109, 240 }, | |||||
| { 13, 155, 103, 238 }, | |||||
| { 21, 158, 99, 240 }, | |||||
| { 27, 82, 108, 232 }, | |||||
| { 19, 99, 123, 217 }, | |||||
| { 45, 139, 148, 236 }, | |||||
| { 50, 117, 144, 235 }, | |||||
| { 57, 128, 164, 238 }, | |||||
| { 69, 139, 171, 239 }, | |||||
| { 74, 154, 179, 238 }, | |||||
| { 112, 165, 186, 242 }, | |||||
| { 98, 143, 185, 245 }, | |||||
| { 105, 153, 190, 250 }, | |||||
| { 124, 167, 192, 245 }, | |||||
| { 131, 186, 203, 246 }, | |||||
| { 59, 184, 222, 224 }, | |||||
| { 148, 215, 214, 213 }, | |||||
| { 137, 211, 210, 219 }, | |||||
| { 190, 227, 128, 228 }, | |||||
| { 183, 228, 128, 228 }, | |||||
| { 194, 234, 128, 228 }, | |||||
| { 202, 236, 128, 228 }, | |||||
| { 205, 240, 128, 228 }, | |||||
| { 205, 244, 128, 228 }, | |||||
| { 225, 246, 128, 228 }, | |||||
| { 233, 251, 128, 228 }, | |||||
| }; | |||||
| static const int vp8_mode_contexts[6][4] = { | static const int vp8_mode_contexts[6][4] = { | ||||
| { 7, 1, 1, 143 }, | { 7, 1, 1, 143 }, | ||||
| { 14, 18, 14, 107 }, | { 14, 18, 14, 107 }, | ||||
| @@ -82,6 +146,10 @@ static const uint8_t vp8_mbsplit_prob[3] = { | |||||
| 110, 111, 150 | 110, 111, 150 | ||||
| }; | }; | ||||
| static const uint8_t vp7_submv_prob[3] = { | |||||
| 180, 162, 25 | |||||
| }; | |||||
| static const uint8_t vp8_submv_prob[5][3] = { | static const uint8_t vp8_submv_prob[5][3] = { | ||||
| { 147, 136, 18 }, | { 147, 136, 18 }, | ||||
| { 106, 145, 1 }, | { 106, 145, 1 }, | ||||
| @@ -94,7 +162,7 @@ static const uint8_t vp8_pred16x16_prob_intra[4] = { | |||||
| 145, 156, 163, 128 | 145, 156, 163, 128 | ||||
| }; | }; | ||||
| static const uint8_t vp8_pred16x16_prob_inter[4] = { | static const uint8_t vp8_pred16x16_prob_inter[4] = { | ||||
| 112, 86, 140, 37 | |||||
| 112, 86, 140, 37 | |||||
| }; | }; | ||||
| static const int8_t vp8_pred4x4_tree[9][2] = { | static const int8_t vp8_pred4x4_tree[9][2] = { | ||||
| @@ -641,7 +709,7 @@ static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS - 1] = { | |||||
| }; | }; | ||||
| // fixme: copied from h264data.h | // fixme: copied from h264data.h | ||||
| static const uint8_t zigzag_scan[16] = { | |||||
| static const uint8_t zigzag_scan[16]={ | |||||
| 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, | 0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, | ||||
| 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4, | 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4, | ||||
| 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, | 1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, | ||||
| @@ -674,11 +742,22 @@ static const uint8_t vp8_mv_update_prob[2][19] = { | |||||
| { 237, | { 237, | ||||
| 246, | 246, | ||||
| 253, 253, 254, 254, 254, 254, 254, | 253, 253, 254, 254, 254, 254, 254, | ||||
| 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 }, | |||||
| 254, 254, 254, 254, 254, 250, 250, 252, /* VP8 only: */ 254, 254 }, | |||||
| { 231, | { 231, | ||||
| 243, | 243, | ||||
| 245, 253, 254, 254, 254, 254, 254, | 245, 253, 254, 254, 254, 254, 254, | ||||
| 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 } | |||||
| 254, 254, 254, 254, 254, 251, 251, 254, /* VP8 only: */ 254, 254 } | |||||
| }; | |||||
| static const uint8_t vp7_mv_default_prob[2][17] = { | |||||
| { 162, | |||||
| 128, | |||||
| 225, 146, 172, 147, 214, 39, 156, | |||||
| 247, 210, 135, 68, 138, 220, 239, 246 }, | |||||
| { 164, | |||||
| 128, | |||||
| 204, 170, 119, 235, 140, 230, 228, | |||||
| 244, 184, 201, 44, 173, 221, 239, 253 } | |||||
| }; | }; | ||||
| static const uint8_t vp8_mv_default_prob[2][19] = { | static const uint8_t vp8_mv_default_prob[2][19] = { | ||||
| @@ -692,4 +771,68 @@ static const uint8_t vp8_mv_default_prob[2][19] = { | |||||
| 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 } | 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 } | ||||
| }; | }; | ||||
| static const uint8_t vp7_feature_value_size[2][4] = { | |||||
| { 7, 6, 0, 8 }, | |||||
| { 7, 6, 0, 5 }, | |||||
| }; | |||||
| static const int8_t vp7_feature_index_tree[4][2] = | |||||
| { | |||||
| { 1, 2 }, | |||||
| { -0, -1 }, // '00', '01' | |||||
| { -2, -3 }, // '10', '11' | |||||
| }; | |||||
| static const uint16_t vp7_ydc_qlookup[] = { | |||||
| 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15, | |||||
| 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, | |||||
| 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38, 39, 39, 40, 41, | |||||
| 41, 42, 43, 43, 44, 45, 45, 46, 47, 48, 48, 49, 50, 51, 52, | |||||
| 53, 53, 54, 56, 57, 58, 59, 60, 62, 63, 65, 66, 68, 70, 72, | |||||
| 74, 76, 79, 81, 84, 87, 90, 93, 96, 100, 104, 108, 112, 116, 121, | |||||
| 126, 131, 136, 142, 148, 154, 160, 167, 174, 182, 189, 198, 206, 215, 224, | |||||
| 234, 244, 254, 265, 277, 288, 301, 313, 327, 340, 355, 370, 385, 401, 417, | |||||
| 434, 452, 470, 489, 509, 529, 550, 572, | |||||
| }; | |||||
| static const uint16_t vp7_yac_qlookup[] = { | |||||
| 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 15, | |||||
| 16, 17, 19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, | |||||
| 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 50, 51, 53, 54, | |||||
| 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 67, 68, 69, 70, | |||||
| 72, 73, 75, 76, 78, 80, 82, 84, 86, 88, 91, 93, 96, 99, | |||||
| 102, 105, 109, 112, 116, 121, 125, 130, 135, 140, 146, 152, 158, 165, | |||||
| 172, 180, 188, 196, 205, 214, 224, 234, 245, 256, 268, 281, 294, 308, | |||||
| 322, 337, 353, 369, 386, 404, 423, 443, 463, 484, 506, 529, 553, 578, | |||||
| 604, 631, 659, 688, 718, 749, 781, 814, 849, 885, 922, 960, 1000, 1041, | |||||
| 1083, 1127, | |||||
| }; | |||||
| static const uint16_t vp7_y2dc_qlookup[] = { | |||||
| 7, 9, 11, 13, 15, 17, 19, 21, 23, 26, 28, 30, 33, 35, | |||||
| 37, 39, 42, 44, 46, 48, 51, 53, 55, 57, 59, 61, 63, 65, | |||||
| 67, 69, 70, 72, 74, 75, 77, 78, 80, 81, 83, 84, 85, 87, | |||||
| 88, 89, 90, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 104, | |||||
| 105, 106, 108, 109, 111, 113, 114, 116, 118, 120, 123, 125, 128, 131, | |||||
| 134, 137, 140, 144, 148, 152, 156, 161, 166, 171, 176, 182, 188, 195, | |||||
| 202, 209, 217, 225, 234, 243, 253, 263, 274, 285, 297, 309, 322, 336, | |||||
| 350, 365, 381, 397, 414, 432, 450, 470, 490, 511, 533, 556, 579, 604, | |||||
| 630, 656, 684, 713, 742, 773, 805, 838, 873, 908, 945, 983, 1022, 1063, | |||||
| 1105, 1148, | |||||
| }; | |||||
| static const uint16_t vp7_y2ac_qlookup[] = { | |||||
| 7, 9, 11, 13, 16, 18, 21, 24, 26, 29, 32, 35, | |||||
| 38, 41, 43, 46, 49, 52, 55, 58, 61, 64, 66, 69, | |||||
| 72, 74, 77, 79, 82, 84, 86, 88, 91, 93, 95, 97, | |||||
| 98, 100, 102, 104, 105, 107, 109, 110, 112, 113, 115, 116, | |||||
| 117, 119, 120, 122, 123, 125, 127, 128, 130, 132, 134, 136, | |||||
| 138, 141, 143, 146, 149, 152, 155, 158, 162, 166, 171, 175, | |||||
| 180, 185, 191, 197, 204, 210, 218, 226, 234, 243, 252, 262, | |||||
| 273, 284, 295, 308, 321, 335, 350, 365, 381, 398, 416, 435, | |||||
| 455, 476, 497, 520, 544, 569, 595, 622, 650, 680, 711, 743, | |||||
| 776, 811, 848, 885, 925, 965, 1008, 1052, 1097, 1144, 1193, 1244, | |||||
| 1297, 1351, 1407, 1466, 1526, 1588, 1652, 1719, | |||||
| }; | |||||
| #endif /* AVCODEC_VP8DATA_H */ | #endif /* AVCODEC_VP8DATA_H */ | ||||
| @@ -1,6 +1,7 @@ | |||||
| /* | /* | ||||
| * Copyright (C) 2010 David Conrad | * Copyright (C) 2010 David Conrad | ||||
| * Copyright (C) 2010 Ronald S. Bultje | * Copyright (C) 2010 Ronald S. Bultje | ||||
| * Copyright (C) 2014 Peter Ross | |||||
| * | * | ||||
| * This file is part of Libav. | * This file is part of Libav. | ||||
| * | * | ||||
| @@ -29,7 +30,126 @@ | |||||
| #include "mathops.h" | #include "mathops.h" | ||||
| #include "vp8dsp.h" | #include "vp8dsp.h" | ||||
| #define MK_IDCT_DC_ADD4_C(name) \ | |||||
| static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], \ | |||||
| ptrdiff_t stride) \ | |||||
| { \ | |||||
| name ## _idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); \ | |||||
| name ## _idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); \ | |||||
| name ## _idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); \ | |||||
| name ## _idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); \ | |||||
| } \ | |||||
| \ | |||||
| static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], \ | |||||
| ptrdiff_t stride) \ | |||||
| { \ | |||||
| name ## _idct_dc_add_c(dst + 0, block[0], stride); \ | |||||
| name ## _idct_dc_add_c(dst + 4, block[1], stride); \ | |||||
| name ## _idct_dc_add_c(dst + 8, block[2], stride); \ | |||||
| name ## _idct_dc_add_c(dst + 12, block[3], stride); \ | |||||
| } | |||||
| #if CONFIG_VP7_DECODER | |||||
| static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16]) | |||||
| { | |||||
| int i, a1, b1, c1, d1; | |||||
| int16_t tmp[16]; | |||||
| for (i = 0; i < 4; i++) { | |||||
| a1 = (dc[i * 4 + 0] + dc[i * 4 + 2]) * 23170; | |||||
| b1 = (dc[i * 4 + 0] - dc[i * 4 + 2]) * 23170; | |||||
| c1 = dc[i * 4 + 1] * 12540 - dc[i * 4 + 3] * 30274; | |||||
| d1 = dc[i * 4 + 1] * 30274 + dc[i * 4 + 3] * 12540; | |||||
| tmp[i * 4 + 0] = (a1 + d1) >> 14; | |||||
| tmp[i * 4 + 3] = (a1 - d1) >> 14; | |||||
| tmp[i * 4 + 1] = (b1 + c1) >> 14; | |||||
| tmp[i * 4 + 2] = (b1 - c1) >> 14; | |||||
| } | |||||
| for (i = 0; i < 4; i++) { | |||||
| a1 = (tmp[i + 0] + tmp[i + 8]) * 23170; | |||||
| b1 = (tmp[i + 0] - tmp[i + 8]) * 23170; | |||||
| c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274; | |||||
| d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540; | |||||
| dc[i * 4 + 0] = 0; | |||||
| dc[i * 4 + 1] = 0; | |||||
| dc[i * 4 + 2] = 0; | |||||
| dc[i * 4 + 3] = 0; | |||||
| block[0][i][0] = (a1 + d1 + 0x20000) >> 18; | |||||
| block[3][i][0] = (a1 - d1 + 0x20000) >> 18; | |||||
| block[1][i][0] = (b1 + c1 + 0x20000) >> 18; | |||||
| block[2][i][0] = (b1 - c1 + 0x20000) >> 18; | |||||
| } | |||||
| } | |||||
| static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16]) | |||||
| { | |||||
| int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18; | |||||
| dc[0] = 0; | |||||
| for (i = 0; i < 4; i++) { | |||||
| block[i][0][0] = val; | |||||
| block[i][1][0] = val; | |||||
| block[i][2][0] = val; | |||||
| block[i][3][0] = val; | |||||
| } | |||||
| } | |||||
| static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride) | |||||
| { | |||||
| int i, a1, b1, c1, d1; | |||||
| int16_t tmp[16]; | |||||
| for (i = 0; i < 4; i++) { | |||||
| a1 = (block[i * 4 + 0] + block[i * 4 + 2]) * 23170; | |||||
| b1 = (block[i * 4 + 0] - block[i * 4 + 2]) * 23170; | |||||
| c1 = block[i * 4 + 1] * 12540 - block[i * 4 + 3] * 30274; | |||||
| d1 = block[i * 4 + 1] * 30274 + block[i * 4 + 3] * 12540; | |||||
| block[i * 4 + 0] = 0; | |||||
| block[i * 4 + 1] = 0; | |||||
| block[i * 4 + 2] = 0; | |||||
| block[i * 4 + 3] = 0; | |||||
| tmp[i * 4 + 0] = (a1 + d1) >> 14; | |||||
| tmp[i * 4 + 3] = (a1 - d1) >> 14; | |||||
| tmp[i * 4 + 1] = (b1 + c1) >> 14; | |||||
| tmp[i * 4 + 2] = (b1 - c1) >> 14; | |||||
| } | |||||
| for (i = 0; i < 4; i++) { | |||||
| a1 = (tmp[i + 0] + tmp[i + 8]) * 23170; | |||||
| b1 = (tmp[i + 0] - tmp[i + 8]) * 23170; | |||||
| c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274; | |||||
| d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540; | |||||
| dst[0 * stride + i] = av_clip_uint8(dst[0 * stride + i] + | |||||
| ((a1 + d1 + 0x20000) >> 18)); | |||||
| dst[3 * stride + i] = av_clip_uint8(dst[3 * stride + i] + | |||||
| ((a1 - d1 + 0x20000) >> 18)); | |||||
| dst[1 * stride + i] = av_clip_uint8(dst[1 * stride + i] + | |||||
| ((b1 + c1 + 0x20000) >> 18)); | |||||
| dst[2 * stride + i] = av_clip_uint8(dst[2 * stride + i] + | |||||
| ((b1 - c1 + 0x20000) >> 18)); | |||||
| } | |||||
| } | |||||
| static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride) | |||||
| { | |||||
| int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18; | |||||
| block[0] = 0; | |||||
| for (i = 0; i < 4; i++) { | |||||
| dst[0] = av_clip_uint8(dst[0] + dc); | |||||
| dst[1] = av_clip_uint8(dst[1] + dc); | |||||
| dst[2] = av_clip_uint8(dst[2] + dc); | |||||
| dst[3] = av_clip_uint8(dst[3] + dc); | |||||
| dst += stride; | |||||
| } | |||||
| } | |||||
| MK_IDCT_DC_ADD4_C(vp7) | |||||
| #endif /* CONFIG_VP7_DECODER */ | |||||
| // TODO: Maybe add dequant | // TODO: Maybe add dequant | ||||
| #if CONFIG_VP8_DECODER | |||||
| static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16]) | static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16]) | ||||
| { | { | ||||
| int i, t0, t1, t2, t3; | int i, t0, t1, t2, t3; | ||||
| @@ -128,23 +248,8 @@ static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride) | |||||
| } | } | ||||
| } | } | ||||
| static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], | |||||
| ptrdiff_t stride) | |||||
| { | |||||
| vp8_idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); | |||||
| vp8_idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); | |||||
| vp8_idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); | |||||
| vp8_idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); | |||||
| } | |||||
| static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], | |||||
| ptrdiff_t stride) | |||||
| { | |||||
| vp8_idct_dc_add_c(dst + 0, block[0], stride); | |||||
| vp8_idct_dc_add_c(dst + 4, block[1], stride); | |||||
| vp8_idct_dc_add_c(dst + 8, block[2], stride); | |||||
| vp8_idct_dc_add_c(dst + 12, block[3], stride); | |||||
| } | |||||
| MK_IDCT_DC_ADD4_C(vp8) | |||||
| #endif /* CONFIG_VP8_DECODER */ | |||||
| // because I like only having two parameters to pass functions... | // because I like only having two parameters to pass functions... | ||||
| #define LOAD_PIXELS \ | #define LOAD_PIXELS \ | ||||
| @@ -160,7 +265,7 @@ static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], | |||||
| #define clip_int8(n) (cm[n + 0x80] - 0x80) | #define clip_int8(n) (cm[n + 0x80] - 0x80) | ||||
| static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, | static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, | ||||
| int is4tap) | |||||
| int is4tap, int is_vp7) | |||||
| { | { | ||||
| LOAD_PIXELS | LOAD_PIXELS | ||||
| int a, f1, f2; | int a, f1, f2; | ||||
| @@ -176,7 +281,11 @@ static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, | |||||
| // We deviate from the spec here with c(a+3) >> 3 | // We deviate from the spec here with c(a+3) >> 3 | ||||
| // since that's what libvpx does. | // since that's what libvpx does. | ||||
| f1 = FFMIN(a + 4, 127) >> 3; | f1 = FFMIN(a + 4, 127) >> 3; | ||||
| f2 = FFMIN(a + 3, 127) >> 3; | |||||
| if (is_vp7) | |||||
| f2 = f1 - ((a & 7) == 4); | |||||
| else | |||||
| f2 = FFMIN(a + 3, 127) >> 3; | |||||
| // Despite what the spec says, we do need to clamp here to | // Despite what the spec says, we do need to clamp here to | ||||
| // be bitexact with libvpx. | // be bitexact with libvpx. | ||||
| @@ -185,13 +294,33 @@ static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, | |||||
| // only used for _inner on blocks without high edge variance | // only used for _inner on blocks without high edge variance | ||||
| if (!is4tap) { | if (!is4tap) { | ||||
| a = (f1 + 1) >> 1; | |||||
| a = (f1 + 1) >> 1; | |||||
| p[-2 * stride] = cm[p1 + a]; | p[-2 * stride] = cm[p1 + a]; | ||||
| p[ 1 * stride] = cm[q1 - a]; | p[ 1 * stride] = cm[q1 - a]; | ||||
| } | } | ||||
| } | } | ||||
| static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim) | |||||
| static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride, | |||||
| int is4tap) | |||||
| { | |||||
| filter_common(p, stride, is4tap, IS_VP7); | |||||
| } | |||||
| static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride, | |||||
| int is4tap) | |||||
| { | |||||
| filter_common(p, stride, is4tap, IS_VP8); | |||||
| } | |||||
| static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride, | |||||
| int flim) | |||||
| { | |||||
| LOAD_PIXELS | |||||
| return FFABS(p0 - q0) <= flim; | |||||
| } | |||||
| static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride, | |||||
| int flim) | |||||
| { | { | ||||
| LOAD_PIXELS | LOAD_PIXELS | ||||
| return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim; | return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim; | ||||
| @@ -201,19 +330,21 @@ static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim) | |||||
| * E - limit at the macroblock edge | * E - limit at the macroblock edge | ||||
| * I - limit for interior difference | * I - limit for interior difference | ||||
| */ | */ | ||||
| static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, | |||||
| int E, int I) | |||||
| { | |||||
| LOAD_PIXELS | |||||
| return simple_limit(p, stride, E) && | |||||
| FFABS(p3 - p2) <= I && | |||||
| FFABS(p2 - p1) <= I && | |||||
| FFABS(p1 - p0) <= I && | |||||
| FFABS(q3 - q2) <= I && | |||||
| FFABS(q2 - q1) <= I && | |||||
| FFABS(q1 - q0) <= I; | |||||
| #define NORMAL_LIMIT(vpn) \ | |||||
| static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, \ | |||||
| ptrdiff_t stride, \ | |||||
| int E, int I) \ | |||||
| { \ | |||||
| LOAD_PIXELS \ | |||||
| return vp ## vpn ## _simple_limit(p, stride, E) && \ | |||||
| FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I && \ | |||||
| FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I && \ | |||||
| FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I; \ | |||||
| } | } | ||||
| NORMAL_LIMIT(7) | |||||
| NORMAL_LIMIT(8) | |||||
| // high edge variance | // high edge variance | ||||
| static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh) | static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh) | ||||
| { | { | ||||
| @@ -243,82 +374,91 @@ static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride) | |||||
| p[ 2 * stride] = cm[q2 - a2]; | p[ 2 * stride] = cm[q2 - a2]; | ||||
| } | } | ||||
| #define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \ | |||||
| #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \ | |||||
| static maybe_inline \ | static maybe_inline \ | ||||
| void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \ | |||||
| ptrdiff_t stride, \ | |||||
| int flim_E, int flim_I, \ | |||||
| int hev_thresh) \ | |||||
| void vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \ | |||||
| ptrdiff_t stride, \ | |||||
| int flim_E, int flim_I, \ | |||||
| int hev_thresh) \ | |||||
| { \ | { \ | ||||
| int i; \ | int i; \ | ||||
| for (i = 0; i < size; i++) \ | for (i = 0; i < size; i++) \ | ||||
| if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \ | |||||
| if (vpn ## _normal_limit(dst + i * stridea, strideb, \ | |||||
| flim_E, flim_I)) { \ | |||||
| if (hev(dst + i * stridea, strideb, hev_thresh)) \ | if (hev(dst + i * stridea, strideb, hev_thresh)) \ | ||||
| filter_common(dst + i * stridea, strideb, 1); \ | |||||
| vpn ## _filter_common(dst + i * stridea, strideb, 1); \ | |||||
| else \ | else \ | ||||
| filter_mbedge(dst + i * stridea, strideb); \ | filter_mbedge(dst + i * stridea, strideb); \ | ||||
| } \ | } \ | ||||
| } \ | } \ | ||||
| \ | \ | ||||
| static maybe_inline \ | static maybe_inline \ | ||||
| void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \ | |||||
| ptrdiff_t stride, \ | |||||
| int flim_E, int flim_I, \ | |||||
| int hev_thresh) \ | |||||
| void vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \ | |||||
| ptrdiff_t stride, \ | |||||
| int flim_E, \ | |||||
| int flim_I, \ | |||||
| int hev_thresh) \ | |||||
| { \ | { \ | ||||
| int i; \ | int i; \ | ||||
| for (i = 0; i < size; i++) \ | for (i = 0; i < size; i++) \ | ||||
| if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \ | |||||
| if (vpn ## _normal_limit(dst + i * stridea, strideb, \ | |||||
| flim_E, flim_I)) { \ | |||||
| int hv = hev(dst + i * stridea, strideb, hev_thresh); \ | int hv = hev(dst + i * stridea, strideb, hev_thresh); \ | ||||
| if (hv) \ | if (hv) \ | ||||
| filter_common(dst + i * stridea, strideb, 1); \ | |||||
| vpn ## _filter_common(dst + i * stridea, strideb, 1); \ | |||||
| else \ | else \ | ||||
| filter_common(dst + i * stridea, strideb, 0); \ | |||||
| vpn ## _filter_common(dst + i * stridea, strideb, 0); \ | |||||
| } \ | } \ | ||||
| } | } | ||||
| LOOP_FILTER(v, 16, 1, stride, ) | |||||
| LOOP_FILTER(h, 16, stride, 1, ) | |||||
| #define UV_LOOP_FILTER(dir, stridea, strideb) \ | |||||
| LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \ | |||||
| static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, \ | |||||
| ptrdiff_t stride, int fE, \ | |||||
| int fI, int hev_thresh) \ | |||||
| #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \ | |||||
| LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \ | |||||
| static void vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, \ | |||||
| uint8_t *dstV, \ | |||||
| ptrdiff_t stride, int fE, \ | |||||
| int fI, int hev_thresh) \ | |||||
| { \ | { \ | ||||
| vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \ | |||||
| vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \ | |||||
| vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \ | |||||
| vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \ | |||||
| } \ | } \ | ||||
| \ | \ | ||||
| static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \ | |||||
| uint8_t *dstV, \ | |||||
| ptrdiff_t stride, int fE, \ | |||||
| int fI, int hev_thresh) \ | |||||
| static void vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \ | |||||
| uint8_t *dstV, \ | |||||
| ptrdiff_t stride, \ | |||||
| int fE, int fI, \ | |||||
| int hev_thresh) \ | |||||
| { \ | { \ | ||||
| vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh); \ | |||||
| vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh); \ | |||||
| vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, \ | |||||
| hev_thresh); \ | |||||
| vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, \ | |||||
| hev_thresh); \ | |||||
| } | } | ||||
| UV_LOOP_FILTER(v, 1, stride) | |||||
| UV_LOOP_FILTER(h, stride, 1) | |||||
| static void vp8_v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim) | |||||
| { | |||||
| int i; | |||||
| for (i = 0; i < 16; i++) | |||||
| if (simple_limit(dst + i, stride, flim)) | |||||
| filter_common(dst + i, stride, 1); | |||||
| #define LOOP_FILTER_SIMPLE(vpn) \ | |||||
| static void vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \ | |||||
| int flim) \ | |||||
| { \ | |||||
| int i; \ | |||||
| for (i = 0; i < 16; i++) \ | |||||
| if (vpn ## _simple_limit(dst + i, stride, flim)) \ | |||||
| vpn ## _filter_common(dst + i, stride, 1); \ | |||||
| } \ | |||||
| \ | |||||
| static void vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \ | |||||
| int flim) \ | |||||
| { \ | |||||
| int i; \ | |||||
| for (i = 0; i < 16; i++) \ | |||||
| if (vpn ## _simple_limit(dst + i * stride, 1, flim)) \ | |||||
| vpn ## _filter_common(dst + i * stride, 1, 1); \ | |||||
| } | } | ||||
| static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim) | |||||
| { | |||||
| int i; | |||||
| for (i = 0; i < 16; i++) | |||||
| if (simple_limit(dst + i * stride, 1, flim)) | |||||
| filter_common(dst + i * stride, 1, 1); | |||||
| } | |||||
| #define LOOP_FILTERS(vpn) \ | |||||
| LOOP_FILTER(vpn, v, 16, 1, stride, ) \ | |||||
| LOOP_FILTER(vpn, h, 16, stride, 1, ) \ | |||||
| UV_LOOP_FILTER(vpn, v, 1, stride) \ | |||||
| UV_LOOP_FILTER(vpn, h, stride, 1) \ | |||||
| LOOP_FILTER_SIMPLE(vpn) \ | |||||
| static const uint8_t subpel_filters[7][6] = { | static const uint8_t subpel_filters[7][6] = { | ||||
| { 0, 6, 123, 12, 1, 0 }, | { 0, 6, 123, 12, 1, 0 }, | ||||
| @@ -507,7 +647,7 @@ VP8_BILINEAR(16) | |||||
| VP8_BILINEAR(8) | VP8_BILINEAR(8) | ||||
| VP8_BILINEAR(4) | VP8_BILINEAR(4) | ||||
| #define VP8_MC_FUNC(IDX, SIZE) \ | |||||
| #define VP78_MC_FUNC(IDX, SIZE) \ | |||||
| dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ | dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ | ||||
| dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \ | dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \ | ||||
| dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \ | dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \ | ||||
| @@ -518,8 +658,8 @@ VP8_BILINEAR(4) | |||||
| dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \ | dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \ | ||||
| dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c | dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c | ||||
| #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \ | |||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ | |||||
| #define VP78_BILINEAR_MC_FUNC(IDX, SIZE) \ | |||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \ | |||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \ | dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \ | ||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \ | dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \ | ||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \ | dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \ | ||||
| @@ -529,6 +669,54 @@ VP8_BILINEAR(4) | |||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \ | dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \ | ||||
| dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c | dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c | ||||
| av_cold void ff_vp78dsp_init(VP8DSPContext *dsp) | |||||
| { | |||||
| VP78_MC_FUNC(0, 16); | |||||
| VP78_MC_FUNC(1, 8); | |||||
| VP78_MC_FUNC(2, 4); | |||||
| VP78_BILINEAR_MC_FUNC(0, 16); | |||||
| VP78_BILINEAR_MC_FUNC(1, 8); | |||||
| VP78_BILINEAR_MC_FUNC(2, 4); | |||||
| if (ARCH_ARM) | |||||
| ff_vp78dsp_init_arm(dsp); | |||||
| if (ARCH_PPC) | |||||
| ff_vp78dsp_init_ppc(dsp); | |||||
| if (ARCH_X86) | |||||
| ff_vp78dsp_init_x86(dsp); | |||||
| } | |||||
| #if CONFIG_VP7_DECODER | |||||
| LOOP_FILTERS(vp7) | |||||
| av_cold void ff_vp7dsp_init(VP8DSPContext *dsp) | |||||
| { | |||||
| dsp->vp8_luma_dc_wht = vp7_luma_dc_wht_c; | |||||
| dsp->vp8_luma_dc_wht_dc = vp7_luma_dc_wht_dc_c; | |||||
| dsp->vp8_idct_add = vp7_idct_add_c; | |||||
| dsp->vp8_idct_dc_add = vp7_idct_dc_add_c; | |||||
| dsp->vp8_idct_dc_add4y = vp7_idct_dc_add4y_c; | |||||
| dsp->vp8_idct_dc_add4uv = vp7_idct_dc_add4uv_c; | |||||
| dsp->vp8_v_loop_filter16y = vp7_v_loop_filter16_c; | |||||
| dsp->vp8_h_loop_filter16y = vp7_h_loop_filter16_c; | |||||
| dsp->vp8_v_loop_filter8uv = vp7_v_loop_filter8uv_c; | |||||
| dsp->vp8_h_loop_filter8uv = vp7_h_loop_filter8uv_c; | |||||
| dsp->vp8_v_loop_filter16y_inner = vp7_v_loop_filter16_inner_c; | |||||
| dsp->vp8_h_loop_filter16y_inner = vp7_h_loop_filter16_inner_c; | |||||
| dsp->vp8_v_loop_filter8uv_inner = vp7_v_loop_filter8uv_inner_c; | |||||
| dsp->vp8_h_loop_filter8uv_inner = vp7_h_loop_filter8uv_inner_c; | |||||
| dsp->vp8_v_loop_filter_simple = vp7_v_loop_filter_simple_c; | |||||
| dsp->vp8_h_loop_filter_simple = vp7_h_loop_filter_simple_c; | |||||
| } | |||||
| #endif /* CONFIG_VP7_DECODER */ | |||||
| #if CONFIG_VP8_DECODER | |||||
| LOOP_FILTERS(vp8) | |||||
| av_cold void ff_vp8dsp_init(VP8DSPContext *dsp) | av_cold void ff_vp8dsp_init(VP8DSPContext *dsp) | ||||
| { | { | ||||
| dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c; | dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c; | ||||
| @@ -551,18 +739,9 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp) | |||||
| dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c; | dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c; | ||||
| dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c; | dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c; | ||||
| VP8_MC_FUNC(0, 16); | |||||
| VP8_MC_FUNC(1, 8); | |||||
| VP8_MC_FUNC(2, 4); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8); | |||||
| VP8_BILINEAR_MC_FUNC(2, 4); | |||||
| if (ARCH_ARM) | if (ARCH_ARM) | ||||
| ff_vp8dsp_init_arm(dsp); | ff_vp8dsp_init_arm(dsp); | ||||
| if (ARCH_PPC) | |||||
| ff_vp8dsp_init_ppc(dsp); | |||||
| if (ARCH_X86) | if (ARCH_X86) | ||||
| ff_vp8dsp_init_x86(dsp); | ff_vp8dsp_init_x86(dsp); | ||||
| } | } | ||||
| #endif /* CONFIG_VP8_DECODER */ | |||||
| @@ -88,9 +88,18 @@ void ff_put_vp8_pixels8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride, | |||||
| void ff_put_vp8_pixels4_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride, | void ff_put_vp8_pixels4_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride, | ||||
| int h, int x, int y); | int h, int x, int y); | ||||
| void ff_vp7dsp_init(VP8DSPContext *c); | |||||
| void ff_vp78dsp_init(VP8DSPContext *c); | |||||
| void ff_vp78dsp_init_arm(VP8DSPContext *c); | |||||
| void ff_vp78dsp_init_ppc(VP8DSPContext *c); | |||||
| void ff_vp78dsp_init_x86(VP8DSPContext *c); | |||||
| void ff_vp8dsp_init(VP8DSPContext *c); | void ff_vp8dsp_init(VP8DSPContext *c); | ||||
| void ff_vp8dsp_init_x86(VP8DSPContext *c); | |||||
| void ff_vp8dsp_init_arm(VP8DSPContext *c); | void ff_vp8dsp_init_arm(VP8DSPContext *c); | ||||
| void ff_vp8dsp_init_ppc(VP8DSPContext *c); | |||||
| void ff_vp8dsp_init_x86(VP8DSPContext *c); | |||||
| #define IS_VP7 1 | |||||
| #define IS_VP8 0 | |||||
| #endif /* AVCODEC_VP8DSP_H */ | #endif /* AVCODEC_VP8DSP_H */ | ||||
| @@ -37,6 +37,7 @@ OBJS-$(CONFIG_TRUEHD_DECODER) += x86/mlpdsp.o | |||||
| OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_init.o | OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_init.o | ||||
| OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp_init.o | OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp_init.o | ||||
| OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp_init.o | OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp_init.o | ||||
| OBJS-$(CONFIG_VP7_DECODER) += x86/vp8dsp_init.o | |||||
| OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp_init.o | OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp_init.o | ||||
| OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp_init.o | OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp_init.o | ||||
| @@ -92,6 +93,8 @@ YASM-OBJS-$(CONFIG_RV40_DECODER) += x86/rv34dsp.o \ | |||||
| YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp.o | YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp.o | ||||
| YASM-OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp.o | YASM-OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp.o | ||||
| YASM-OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp.o | YASM-OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp.o | ||||
| YASM-OBJS-$(CONFIG_VP7_DECODER) += x86/vp8dsp.o \ | |||||
| x86/vp8dsp_loopfilter.o | |||||
| YASM-OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp.o \ | YASM-OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp.o \ | ||||
| x86/vp8dsp_loopfilter.o | x86/vp8dsp_loopfilter.o | ||||
| YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp.o | YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp.o | ||||
| @@ -195,7 +195,7 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, | |||||
| h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx; | h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx; | ||||
| h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx; | h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx; | ||||
| } | } | ||||
| if (codec_id == AV_CODEC_ID_VP8) { | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) { | |||||
| h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmx; | h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmx; | ||||
| h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx; | h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx; | ||||
| h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx; | h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx; | ||||
| @@ -231,7 +231,8 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, | |||||
| h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_8_mmxext; | h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_8_mmxext; | ||||
| h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_8_mmxext; | h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_8_mmxext; | ||||
| h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_8_mmxext; | h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_8_mmxext; | ||||
| if (codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_H264) { | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8 || | |||||
| codec_id == AV_CODEC_ID_H264) { | |||||
| h->pred4x4 [DIAG_DOWN_LEFT_PRED] = ff_pred4x4_down_left_8_mmxext; | h->pred4x4 [DIAG_DOWN_LEFT_PRED] = ff_pred4x4_down_left_8_mmxext; | ||||
| } | } | ||||
| if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) { | if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) { | ||||
| @@ -246,7 +247,7 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, | |||||
| h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext; | h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext; | ||||
| } | } | ||||
| } | } | ||||
| if (codec_id == AV_CODEC_ID_VP8) { | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) { | |||||
| h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmxext; | h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmxext; | ||||
| h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_8_mmxext; | h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_8_mmxext; | ||||
| h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmxext; | h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmxext; | ||||
| @@ -276,7 +277,7 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, | |||||
| h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_sse2; | h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_sse2; | ||||
| h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_sse2; | h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_sse2; | ||||
| h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_sse2; | h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_sse2; | ||||
| if (codec_id == AV_CODEC_ID_VP8) { | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) { | |||||
| h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2; | h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2; | ||||
| h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2; | h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2; | ||||
| } else { | } else { | ||||
| @@ -307,7 +308,7 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, | |||||
| h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_ssse3; | h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_ssse3; | ||||
| h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_ssse3; | h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_ssse3; | ||||
| h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_ssse3; | h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_ssse3; | ||||
| if (codec_id == AV_CODEC_ID_VP8) { | |||||
| if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) { | |||||
| h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3; | h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3; | ||||
| h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3; | h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3; | ||||
| } else { | } else { | ||||
| @@ -315,25 +315,69 @@ DECLARE_LOOP_FILTER(sse4) | |||||
| c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT | c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT | ||||
| av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | |||||
| av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c) | |||||
| { | { | ||||
| #if HAVE_YASM | #if HAVE_YASM | ||||
| int cpu_flags = av_get_cpu_flags(); | int cpu_flags = av_get_cpu_flags(); | ||||
| if (EXTERNAL_MMX(cpu_flags)) { | if (EXTERNAL_MMX(cpu_flags)) { | ||||
| c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; | |||||
| c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; | |||||
| #if ARCH_X86_32 | #if ARCH_X86_32 | ||||
| c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; | |||||
| c->vp8_idct_add = ff_vp8_idct_add_mmx; | |||||
| c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; | |||||
| c->put_vp8_epel_pixels_tab[0][0][0] = | c->put_vp8_epel_pixels_tab[0][0][0] = | ||||
| c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; | c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; | ||||
| #endif | #endif | ||||
| c->put_vp8_epel_pixels_tab[1][0][0] = | c->put_vp8_epel_pixels_tab[1][0][0] = | ||||
| c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; | c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; | ||||
| } | |||||
| /* note that 4-tap width=16 functions are missing because w=16 | |||||
| * is only used for luma, and luma is always a copy or sixtap. */ | |||||
| if (EXTERNAL_MMXEXT(cpu_flags)) { | |||||
| VP8_MC_FUNC(2, 4, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(2, 4, mmxext); | |||||
| #if ARCH_X86_32 | |||||
| VP8_LUMA_MC_FUNC(0, 16, mmxext); | |||||
| VP8_MC_FUNC(1, 8, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, mmxext); | |||||
| #endif | |||||
| } | |||||
| if (EXTERNAL_SSE(cpu_flags)) { | |||||
| c->put_vp8_epel_pixels_tab[0][0][0] = | |||||
| c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; | |||||
| } | |||||
| if (EXTERNAL_SSE2(cpu_flags) && (cpu_flags & AV_CPU_FLAG_SSE2SLOW)) { | |||||
| VP8_LUMA_MC_FUNC(0, 16, sse2); | |||||
| VP8_MC_FUNC(1, 8, sse2); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, sse2); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, sse2); | |||||
| } | |||||
| if (EXTERNAL_SSSE3(cpu_flags)) { | |||||
| VP8_LUMA_MC_FUNC(0, 16, ssse3); | |||||
| VP8_MC_FUNC(1, 8, ssse3); | |||||
| VP8_MC_FUNC(2, 4, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(2, 4, ssse3); | |||||
| } | |||||
| #endif /* HAVE_YASM */ | |||||
| } | |||||
| av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c) | |||||
| { | |||||
| #if HAVE_YASM | |||||
| int cpu_flags = av_get_cpu_flags(); | |||||
| if (EXTERNAL_MMX(cpu_flags)) { | |||||
| c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; | |||||
| c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; | |||||
| #if ARCH_X86_32 | #if ARCH_X86_32 | ||||
| c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; | |||||
| c->vp8_idct_add = ff_vp8_idct_add_mmx; | |||||
| c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; | |||||
| c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; | c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; | ||||
| c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; | c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; | ||||
| @@ -352,14 +396,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | |||||
| /* note that 4-tap width=16 functions are missing because w=16 | /* note that 4-tap width=16 functions are missing because w=16 | ||||
| * is only used for luma, and luma is always a copy or sixtap. */ | * is only used for luma, and luma is always a copy or sixtap. */ | ||||
| if (EXTERNAL_MMXEXT(cpu_flags)) { | if (EXTERNAL_MMXEXT(cpu_flags)) { | ||||
| VP8_MC_FUNC(2, 4, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(2, 4, mmxext); | |||||
| #if ARCH_X86_32 | #if ARCH_X86_32 | ||||
| VP8_LUMA_MC_FUNC(0, 16, mmxext); | |||||
| VP8_MC_FUNC(1, 8, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, mmxext); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, mmxext); | |||||
| c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; | c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; | ||||
| c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; | c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; | ||||
| @@ -378,16 +415,9 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | |||||
| if (EXTERNAL_SSE(cpu_flags)) { | if (EXTERNAL_SSE(cpu_flags)) { | ||||
| c->vp8_idct_add = ff_vp8_idct_add_sse; | c->vp8_idct_add = ff_vp8_idct_add_sse; | ||||
| c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse; | c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse; | ||||
| c->put_vp8_epel_pixels_tab[0][0][0] = | |||||
| c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; | |||||
| } | } | ||||
| if (EXTERNAL_SSE2(cpu_flags) && (cpu_flags & AV_CPU_FLAG_SSE2SLOW)) { | if (EXTERNAL_SSE2(cpu_flags) && (cpu_flags & AV_CPU_FLAG_SSE2SLOW)) { | ||||
| VP8_LUMA_MC_FUNC(0, 16, sse2); | |||||
| VP8_MC_FUNC(1, 8, sse2); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, sse2); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, sse2); | |||||
| c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; | c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; | ||||
| c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; | c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; | ||||
| @@ -410,13 +440,6 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | |||||
| } | } | ||||
| if (EXTERNAL_SSSE3(cpu_flags)) { | if (EXTERNAL_SSSE3(cpu_flags)) { | ||||
| VP8_LUMA_MC_FUNC(0, 16, ssse3); | |||||
| VP8_MC_FUNC(1, 8, ssse3); | |||||
| VP8_MC_FUNC(2, 4, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(0, 16, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(1, 8, ssse3); | |||||
| VP8_BILINEAR_MC_FUNC(2, 4, ssse3); | |||||
| c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; | c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; | ||||
| c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; | c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; | ||||
| @@ -251,6 +251,8 @@ const AVCodecTag ff_codec_bmp_tags[] = { | |||||
| { AV_CODEC_ID_VP6A, MKTAG('V', 'P', '6', 'A') }, | { AV_CODEC_ID_VP6A, MKTAG('V', 'P', '6', 'A') }, | ||||
| { AV_CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') }, | { AV_CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') }, | ||||
| { AV_CODEC_ID_VP6F, MKTAG('F', 'L', 'V', '4') }, | { AV_CODEC_ID_VP6F, MKTAG('F', 'L', 'V', '4') }, | ||||
| { AV_CODEC_ID_VP7, MKTAG('V', 'P', '7', '0') }, | |||||
| { AV_CODEC_ID_VP7, MKTAG('V', 'P', '7', '1') }, | |||||
| { AV_CODEC_ID_VP8, MKTAG('V', 'P', '8', '0') }, | { AV_CODEC_ID_VP8, MKTAG('V', 'P', '8', '0') }, | ||||
| { AV_CODEC_ID_VP9, MKTAG('V', 'P', '9', '0') }, | { AV_CODEC_ID_VP9, MKTAG('V', 'P', '9', '0') }, | ||||
| { AV_CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') }, | { AV_CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') }, | ||||