Signed-off-by: Anton Khirnov <anton@khirnov.net>tags/n2.1
| @@ -1716,14 +1716,8 @@ h264_vaapi_hwaccel_deps="vaapi" | |||||
| h264_vaapi_hwaccel_select="h264_decoder" | h264_vaapi_hwaccel_select="h264_decoder" | ||||
| h264_vda_hwaccel_deps="vda" | h264_vda_hwaccel_deps="vda" | ||||
| h264_vda_hwaccel_select="h264_decoder" | h264_vda_hwaccel_select="h264_decoder" | ||||
| h264_vdpau_decoder_deps="vdpau" | |||||
| h264_vdpau_decoder_select="h264_decoder" | |||||
| h264_vdpau_hwaccel_deps="vdpau" | h264_vdpau_hwaccel_deps="vdpau" | ||||
| h264_vdpau_hwaccel_select="h264_decoder" | h264_vdpau_hwaccel_select="h264_decoder" | ||||
| mpeg_vdpau_decoder_deps="vdpau" | |||||
| mpeg_vdpau_decoder_select="mpeg2video_decoder" | |||||
| mpeg1_vdpau_decoder_deps="vdpau" | |||||
| mpeg1_vdpau_decoder_select="mpeg1video_decoder" | |||||
| mpeg1_vdpau_hwaccel_deps="vdpau" | mpeg1_vdpau_hwaccel_deps="vdpau" | ||||
| mpeg1_vdpau_hwaccel_select="mpeg1video_decoder" | mpeg1_vdpau_hwaccel_select="mpeg1video_decoder" | ||||
| mpeg2_dxva2_hwaccel_deps="dxva2" | mpeg2_dxva2_hwaccel_deps="dxva2" | ||||
| @@ -1734,21 +1728,16 @@ mpeg2_vdpau_hwaccel_deps="vdpau" | |||||
| mpeg2_vdpau_hwaccel_select="mpeg2video_decoder" | mpeg2_vdpau_hwaccel_select="mpeg2video_decoder" | ||||
| mpeg4_vaapi_hwaccel_deps="vaapi" | mpeg4_vaapi_hwaccel_deps="vaapi" | ||||
| mpeg4_vaapi_hwaccel_select="mpeg4_decoder" | mpeg4_vaapi_hwaccel_select="mpeg4_decoder" | ||||
| mpeg4_vdpau_decoder_deps="vdpau" | |||||
| mpeg4_vdpau_decoder_select="mpeg4_decoder" | |||||
| mpeg4_vdpau_hwaccel_deps="vdpau" | mpeg4_vdpau_hwaccel_deps="vdpau" | ||||
| mpeg4_vdpau_hwaccel_select="mpeg4_decoder" | mpeg4_vdpau_hwaccel_select="mpeg4_decoder" | ||||
| vc1_dxva2_hwaccel_deps="dxva2" | vc1_dxva2_hwaccel_deps="dxva2" | ||||
| vc1_dxva2_hwaccel_select="vc1_decoder" | vc1_dxva2_hwaccel_select="vc1_decoder" | ||||
| vc1_vaapi_hwaccel_deps="vaapi" | vc1_vaapi_hwaccel_deps="vaapi" | ||||
| vc1_vaapi_hwaccel_select="vc1_decoder" | vc1_vaapi_hwaccel_select="vc1_decoder" | ||||
| vc1_vdpau_decoder_deps="vdpau" | |||||
| vc1_vdpau_decoder_select="vc1_decoder" | |||||
| vc1_vdpau_hwaccel_deps="vdpau" | vc1_vdpau_hwaccel_deps="vdpau" | ||||
| vc1_vdpau_hwaccel_select="vc1_decoder" | vc1_vdpau_hwaccel_select="vc1_decoder" | ||||
| wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel" | wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel" | ||||
| wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel" | wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel" | ||||
| wmv3_vdpau_decoder_select="vc1_vdpau_decoder" | |||||
| wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel" | wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel" | ||||
| # parsers | # parsers | ||||
| @@ -153,7 +153,6 @@ void avcodec_register_all(void) | |||||
| REGISTER_DECODER(H263I, h263i); | REGISTER_DECODER(H263I, h263i); | ||||
| REGISTER_ENCODER(H263P, h263p); | REGISTER_ENCODER(H263P, h263p); | ||||
| REGISTER_DECODER(H264, h264); | REGISTER_DECODER(H264, h264); | ||||
| REGISTER_DECODER(H264_VDPAU, h264_vdpau); | |||||
| REGISTER_ENCDEC (HUFFYUV, huffyuv); | REGISTER_ENCDEC (HUFFYUV, huffyuv); | ||||
| REGISTER_DECODER(IDCIN, idcin); | REGISTER_DECODER(IDCIN, idcin); | ||||
| REGISTER_DECODER(IFF_BYTERUN1, iff_byterun1); | REGISTER_DECODER(IFF_BYTERUN1, iff_byterun1); | ||||
| @@ -181,9 +180,6 @@ void avcodec_register_all(void) | |||||
| REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video); | REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video); | ||||
| REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video); | REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video); | ||||
| REGISTER_ENCDEC (MPEG4, mpeg4); | REGISTER_ENCDEC (MPEG4, mpeg4); | ||||
| REGISTER_DECODER(MPEG4_VDPAU, mpeg4_vdpau); | |||||
| REGISTER_DECODER(MPEG_VDPAU, mpeg_vdpau); | |||||
| REGISTER_DECODER(MPEG1_VDPAU, mpeg1_vdpau); | |||||
| REGISTER_DECODER(MSA1, msa1); | REGISTER_DECODER(MSA1, msa1); | ||||
| REGISTER_DECODER(MSMPEG4V1, msmpeg4v1); | REGISTER_DECODER(MSMPEG4V1, msmpeg4v1); | ||||
| REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2); | REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2); | ||||
| @@ -246,7 +242,6 @@ void avcodec_register_all(void) | |||||
| REGISTER_DECODER(VB, vb); | REGISTER_DECODER(VB, vb); | ||||
| REGISTER_DECODER(VBLE, vble); | REGISTER_DECODER(VBLE, vble); | ||||
| REGISTER_DECODER(VC1, vc1); | REGISTER_DECODER(VC1, vc1); | ||||
| REGISTER_DECODER(VC1_VDPAU, vc1_vdpau); | |||||
| REGISTER_DECODER(VC1IMAGE, vc1image); | REGISTER_DECODER(VC1IMAGE, vc1image); | ||||
| REGISTER_DECODER(VCR1, vcr1); | REGISTER_DECODER(VCR1, vcr1); | ||||
| REGISTER_DECODER(VMDVIDEO, vmdvideo); | REGISTER_DECODER(VMDVIDEO, vmdvideo); | ||||
| @@ -261,7 +256,6 @@ void avcodec_register_all(void) | |||||
| REGISTER_ENCDEC (WMV1, wmv1); | REGISTER_ENCDEC (WMV1, wmv1); | ||||
| REGISTER_ENCDEC (WMV2, wmv2); | REGISTER_ENCDEC (WMV2, wmv2); | ||||
| REGISTER_DECODER(WMV3, wmv3); | REGISTER_DECODER(WMV3, wmv3); | ||||
| REGISTER_DECODER(WMV3_VDPAU, wmv3_vdpau); | |||||
| REGISTER_DECODER(WMV3IMAGE, wmv3image); | REGISTER_DECODER(WMV3IMAGE, wmv3image); | ||||
| REGISTER_DECODER(WNV1, wnv1); | REGISTER_DECODER(WNV1, wnv1); | ||||
| REGISTER_DECODER(XAN_WC3, xan_wc3); | REGISTER_DECODER(XAN_WC3, xan_wc3); | ||||
| @@ -824,7 +824,6 @@ void ff_er_frame_end(ERContext *s) | |||||
| * though it should not crash if enabled. */ | * though it should not crash if enabled. */ | ||||
| if (!s->avctx->err_recognition || s->error_count == 0 || | if (!s->avctx->err_recognition || s->error_count == 0 || | ||||
| s->avctx->hwaccel || | s->avctx->hwaccel || | ||||
| s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || | |||||
| !s->cur_pic || s->cur_pic->field_picture || | !s->cur_pic || s->cur_pic->field_picture || | ||||
| s->error_count == 3 * s->mb_width * | s->error_count == 3 * s->mb_width * | ||||
| (s->avctx->skip_top + s->avctx->skip_bottom)) { | (s->avctx->skip_top + s->avctx->skip_bottom)) { | ||||
| @@ -34,7 +34,6 @@ | |||||
| #include "h263_parser.h" | #include "h263_parser.h" | ||||
| #include "mpeg4video_parser.h" | #include "mpeg4video_parser.h" | ||||
| #include "msmpeg4.h" | #include "msmpeg4.h" | ||||
| #include "vdpau_internal.h" | |||||
| #include "thread.h" | #include "thread.h" | ||||
| #include "flv.h" | #include "flv.h" | ||||
| #include "mpeg4video.h" | #include "mpeg4video.h" | ||||
| @@ -624,11 +623,6 @@ retry: | |||||
| if (!s->divx_packed && !avctx->hwaccel) | if (!s->divx_packed && !avctx->hwaccel) | ||||
| ff_thread_finish_setup(avctx); | ff_thread_finish_setup(avctx); | ||||
| if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) { | |||||
| ff_vdpau_mpeg4_decode_picture(s, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); | |||||
| goto frame_end; | |||||
| } | |||||
| if (avctx->hwaccel) { | if (avctx->hwaccel) { | ||||
| if (avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer) < 0) | if (avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer) < 0) | ||||
| return -1; | return -1; | ||||
| @@ -673,7 +667,6 @@ retry: | |||||
| } | } | ||||
| assert(s->bitstream_buffer_size==0); | assert(s->bitstream_buffer_size==0); | ||||
| frame_end: | |||||
| /* divx 5.01+ bistream reorder stuff */ | /* divx 5.01+ bistream reorder stuff */ | ||||
| if(s->codec_id==AV_CODEC_ID_MPEG4 && s->divx_packed){ | if(s->codec_id==AV_CODEC_ID_MPEG4 && s->divx_packed){ | ||||
| int current_pos= get_bits_count(&s->gb)>>3; | int current_pos= get_bits_count(&s->gb)>>3; | ||||
| @@ -43,7 +43,6 @@ | |||||
| #include "rectangle.h" | #include "rectangle.h" | ||||
| #include "svq3.h" | #include "svq3.h" | ||||
| #include "thread.h" | #include "thread.h" | ||||
| #include "vdpau_internal.h" | |||||
| // #undef NDEBUG | // #undef NDEBUG | ||||
| #include <assert.h> | #include <assert.h> | ||||
| @@ -2769,10 +2768,6 @@ static int field_end(H264Context *h, int in_setup) | |||||
| ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, | ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, | ||||
| h->picture_structure == PICT_BOTTOM_FIELD); | h->picture_structure == PICT_BOTTOM_FIELD); | ||||
| if (CONFIG_H264_VDPAU_DECODER && | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| ff_vdpau_h264_set_reference_frames(h); | |||||
| if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { | if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { | ||||
| if (!h->droppable) { | if (!h->droppable) { | ||||
| err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); | err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); | ||||
| @@ -2790,10 +2785,6 @@ static int field_end(H264Context *h, int in_setup) | |||||
| "hardware accelerator failed to decode picture\n"); | "hardware accelerator failed to decode picture\n"); | ||||
| } | } | ||||
| if (CONFIG_H264_VDPAU_DECODER && | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| ff_vdpau_h264_picture_complete(h); | |||||
| /* | /* | ||||
| * FIXME: Error handling code does not seem to support interlaced | * FIXME: Error handling code does not seem to support interlaced | ||||
| * when slices span multiple rows | * when slices span multiple rows | ||||
| @@ -2897,13 +2888,6 @@ static int h264_set_parameter_from_sps(H264Context *h) | |||||
| if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || | if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || | ||||
| h->cur_chroma_format_idc != h->sps.chroma_format_idc) { | h->cur_chroma_format_idc != h->sps.chroma_format_idc) { | ||||
| if (h->avctx->codec && | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU && | |||||
| (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { | |||||
| av_log(h->avctx, AV_LOG_ERROR, | |||||
| "VDPAU decoding does not support video colorspace.\n"); | |||||
| return AVERROR_INVALIDDATA; | |||||
| } | |||||
| if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) { | if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) { | ||||
| h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; | h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; | ||||
| h->cur_chroma_format_idc = h->sps.chroma_format_idc; | h->cur_chroma_format_idc = h->sps.chroma_format_idc; | ||||
| @@ -4310,8 +4294,7 @@ static int execute_decode_slices(H264Context *h, int context_count) | |||||
| H264Context *hx; | H264Context *hx; | ||||
| int i; | int i; | ||||
| if (h->avctx->hwaccel || | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| if (h->avctx->hwaccel) | |||||
| return 0; | return 0; | ||||
| if (context_count == 1) { | if (context_count == 1) { | ||||
| return decode_slice(avctx, &h); | return decode_slice(avctx, &h); | ||||
| @@ -4503,9 +4486,6 @@ again: | |||||
| if (h->avctx->hwaccel && | if (h->avctx->hwaccel && | ||||
| h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0) | h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0) | ||||
| return -1; | return -1; | ||||
| if (CONFIG_H264_VDPAU_DECODER && | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| ff_vdpau_h264_picture_start(h); | |||||
| } | } | ||||
| if (hx->redundant_pic_count == 0 && | if (hx->redundant_pic_count == 0 && | ||||
| @@ -4521,14 +4501,6 @@ again: | |||||
| &buf[buf_index - consumed], | &buf[buf_index - consumed], | ||||
| consumed) < 0) | consumed) < 0) | ||||
| return -1; | return -1; | ||||
| } else if (CONFIG_H264_VDPAU_DECODER && | |||||
| h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { | |||||
| ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], | |||||
| start_code, | |||||
| sizeof(start_code)); | |||||
| ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], | |||||
| &buf[buf_index - consumed], | |||||
| consumed); | |||||
| } else | } else | ||||
| context_count++; | context_count++; | ||||
| } | } | ||||
| @@ -4809,21 +4781,3 @@ AVCodec ff_h264_decoder = { | |||||
| .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context), | .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context), | ||||
| .profiles = NULL_IF_CONFIG_SMALL(profiles), | .profiles = NULL_IF_CONFIG_SMALL(profiles), | ||||
| }; | }; | ||||
| #if CONFIG_H264_VDPAU_DECODER | |||||
| AVCodec ff_h264_vdpau_decoder = { | |||||
| .name = "h264_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_H264, | |||||
| .priv_data_size = sizeof(H264Context), | |||||
| .init = ff_h264_decode_init, | |||||
| .close = h264_decode_end, | |||||
| .decode = decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, | |||||
| .flush = flush_dpb, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), | |||||
| .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264, | |||||
| AV_PIX_FMT_NONE}, | |||||
| .profiles = NULL_IF_CONFIG_SMALL(profiles), | |||||
| }; | |||||
| #endif | |||||
| @@ -35,7 +35,6 @@ | |||||
| #include "mpeg12data.h" | #include "mpeg12data.h" | ||||
| #include "mpeg12decdata.h" | #include "mpeg12decdata.h" | ||||
| #include "bytestream.h" | #include "bytestream.h" | ||||
| #include "vdpau_internal.h" | |||||
| #include "xvmc_internal.h" | #include "xvmc_internal.h" | ||||
| #include "thread.h" | #include "thread.h" | ||||
| @@ -36,7 +36,6 @@ | |||||
| #include "mpeg12data.h" | #include "mpeg12data.h" | ||||
| #include "mpeg12decdata.h" | #include "mpeg12decdata.h" | ||||
| #include "bytestream.h" | #include "bytestream.h" | ||||
| #include "vdpau_internal.h" | |||||
| #include "xvmc_internal.h" | #include "xvmc_internal.h" | ||||
| #include "thread.h" | #include "thread.h" | ||||
| @@ -1093,12 +1092,7 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) | |||||
| if (avctx->xvmc_acceleration) | if (avctx->xvmc_acceleration) | ||||
| return avctx->get_format(avctx, pixfmt_xvmc_mpg2_420); | return avctx->get_format(avctx, pixfmt_xvmc_mpg2_420); | ||||
| else if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { | |||||
| if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) | |||||
| return AV_PIX_FMT_VDPAU_MPEG1; | |||||
| else | |||||
| return AV_PIX_FMT_VDPAU_MPEG2; | |||||
| } else { | |||||
| else { | |||||
| if (s->chroma_format < 2) | if (s->chroma_format < 2) | ||||
| return avctx->get_format(avctx, mpeg12_hwaccel_pixfmt_list_420); | return avctx->get_format(avctx, mpeg12_hwaccel_pixfmt_list_420); | ||||
| else if (s->chroma_format == 2) | else if (s->chroma_format == 2) | ||||
| @@ -1200,8 +1194,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) | |||||
| avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); | avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); | ||||
| // until then pix_fmt may be changed right after codec init | // until then pix_fmt may be changed right after codec init | ||||
| if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || | if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || | ||||
| avctx->hwaccel || | |||||
| s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| avctx->hwaccel) | |||||
| if (avctx->idct_algo == FF_IDCT_AUTO) | if (avctx->idct_algo == FF_IDCT_AUTO) | ||||
| avctx->idct_algo = FF_IDCT_SIMPLE; | avctx->idct_algo = FF_IDCT_SIMPLE; | ||||
| @@ -1961,8 +1954,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) | |||||
| avctx->pix_fmt = mpeg_get_pixelformat(avctx); | avctx->pix_fmt = mpeg_get_pixelformat(avctx); | ||||
| avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); | avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); | ||||
| if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel || | |||||
| s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| if (avctx->pix_fmt == AV_PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel) | |||||
| if (avctx->idct_algo == FF_IDCT_AUTO) | if (avctx->idct_algo == FF_IDCT_AUTO) | ||||
| avctx->idct_algo = FF_IDCT_SIMPLE; | avctx->idct_algo = FF_IDCT_SIMPLE; | ||||
| @@ -2076,10 +2068,6 @@ static int decode_chunks(AVCodecContext *avctx, | |||||
| s2->er.error_count += s2->thread_context[i]->er.error_count; | s2->er.error_count += s2->thread_context[i]->er.error_count; | ||||
| } | } | ||||
| if ((CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER) | |||||
| && avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) | |||||
| ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); | |||||
| ret = slice_end(avctx, picture); | ret = slice_end(avctx, picture); | ||||
| if (ret < 0) | if (ret < 0) | ||||
| return ret; | return ret; | ||||
| @@ -2260,11 +2248,6 @@ static int decode_chunks(AVCodecContext *avctx, | |||||
| return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
| } | } | ||||
| if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { | |||||
| s->slice_count++; | |||||
| break; | |||||
| } | |||||
| if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && | if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && | ||||
| !avctx->hwaccel) { | !avctx->hwaccel) { | ||||
| int threshold = (s2->mb_height * s->slice_count + | int threshold = (s2->mb_height * s->slice_count + | ||||
| @@ -2450,35 +2433,3 @@ AVCodec ff_mpeg_xvmc_decoder = { | |||||
| }; | }; | ||||
| #endif | #endif | ||||
| #if CONFIG_MPEG_VDPAU_DECODER | |||||
| AVCodec ff_mpeg_vdpau_decoder = { | |||||
| .name = "mpegvideo_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_MPEG2VIDEO, | |||||
| .priv_data_size = sizeof(Mpeg1Context), | |||||
| .init = mpeg_decode_init, | |||||
| .close = mpeg_decode_end, | |||||
| .decode = mpeg_decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | | |||||
| CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, | |||||
| .flush = flush, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"), | |||||
| }; | |||||
| #endif | |||||
| #if CONFIG_MPEG1_VDPAU_DECODER | |||||
| AVCodec ff_mpeg1_vdpau_decoder = { | |||||
| .name = "mpeg1video_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_MPEG1VIDEO, | |||||
| .priv_data_size = sizeof(Mpeg1Context), | |||||
| .init = mpeg_decode_init, | |||||
| .close = mpeg_decode_end, | |||||
| .decode = mpeg_decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | | |||||
| CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, | |||||
| .flush = flush, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"), | |||||
| }; | |||||
| #endif | |||||
| @@ -2278,21 +2278,3 @@ AVCodec ff_mpeg4_decoder = { | |||||
| .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), | .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), | ||||
| .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context), | .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context), | ||||
| }; | }; | ||||
| #if CONFIG_MPEG4_VDPAU_DECODER | |||||
| AVCodec ff_mpeg4_vdpau_decoder = { | |||||
| .name = "mpeg4_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_MPEG4, | |||||
| .priv_data_size = sizeof(MpegEncContext), | |||||
| .init = decode_init, | |||||
| .close = ff_h263_decode_end, | |||||
| .decode = ff_h263_decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | | |||||
| CODEC_CAP_HWACCEL_VDPAU, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"), | |||||
| .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_MPEG4, | |||||
| AV_PIX_FMT_NONE }, | |||||
| }; | |||||
| #endif | |||||
| @@ -1688,7 +1688,6 @@ void ff_MPV_frame_end(MpegEncContext *s) | |||||
| ff_xvmc_field_end(s); | ff_xvmc_field_end(s); | ||||
| } else if ((s->er.error_count || s->encoding) && | } else if ((s->er.error_count || s->encoding) && | ||||
| !s->avctx->hwaccel && | !s->avctx->hwaccel && | ||||
| !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && | |||||
| s->unrestricted_mv && | s->unrestricted_mv && | ||||
| s->current_picture.reference && | s->current_picture.reference && | ||||
| !s->intra_only && | !s->intra_only && | ||||
| @@ -2212,7 +2211,6 @@ void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, | |||||
| } | } | ||||
| if (!avctx->hwaccel && | if (!avctx->hwaccel && | ||||
| !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && | |||||
| draw_edges && | draw_edges && | ||||
| cur->reference && | cur->reference && | ||||
| !(avctx->flags & CODEC_FLAG_EMU_EDGE)) { | !(avctx->flags & CODEC_FLAG_EMU_EDGE)) { | ||||
| @@ -52,7 +52,6 @@ | |||||
| #include "golomb.h" | #include "golomb.h" | ||||
| #include "hpeldsp.h" | #include "hpeldsp.h" | ||||
| #include "rectangle.h" | #include "rectangle.h" | ||||
| #include "vdpau_internal.h" | |||||
| #if CONFIG_ZLIB | #if CONFIG_ZLIB | ||||
| #include <zlib.h> | #include <zlib.h> | ||||
| @@ -38,7 +38,6 @@ | |||||
| #include "msmpeg4data.h" | #include "msmpeg4data.h" | ||||
| #include "unary.h" | #include "unary.h" | ||||
| #include "mathops.h" | #include "mathops.h" | ||||
| #include "vdpau_internal.h" | |||||
| #undef NDEBUG | #undef NDEBUG | ||||
| #include <assert.h> | #include <assert.h> | ||||
| @@ -5732,13 +5731,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { | |||||
| if (v->profile < PROFILE_ADVANCED) | |||||
| avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; | |||||
| else | |||||
| avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; | |||||
| } | |||||
| //for advanced profile we may need to parse and unescape data | //for advanced profile we may need to parse and unescape data | ||||
| if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { | if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { | ||||
| int buf_size2 = 0; | int buf_size2 = 0; | ||||
| @@ -5755,8 +5747,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, | |||||
| if (size <= 0) continue; | if (size <= 0) continue; | ||||
| switch (AV_RB32(start)) { | switch (AV_RB32(start)) { | ||||
| case VC1_CODE_FRAME: | case VC1_CODE_FRAME: | ||||
| if (avctx->hwaccel || | |||||
| s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) | |||||
| if (avctx->hwaccel) | |||||
| buf_start = start; | buf_start = start; | ||||
| buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); | buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); | ||||
| break; | break; | ||||
| @@ -5940,10 +5931,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, | |||||
| s->me.qpel_put = s->dsp.put_qpel_pixels_tab; | s->me.qpel_put = s->dsp.put_qpel_pixels_tab; | ||||
| s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; | s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; | ||||
| if ((CONFIG_VC1_VDPAU_DECODER) | |||||
| &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) | |||||
| ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); | |||||
| else if (avctx->hwaccel) { | |||||
| if (avctx->hwaccel) { | |||||
| if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) | if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) | ||||
| goto err; | goto err; | ||||
| if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) | if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) | ||||
| @@ -6123,38 +6111,6 @@ AVCodec ff_wmv3_decoder = { | |||||
| }; | }; | ||||
| #endif | #endif | ||||
| #if CONFIG_WMV3_VDPAU_DECODER | |||||
| AVCodec ff_wmv3_vdpau_decoder = { | |||||
| .name = "wmv3_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_WMV3, | |||||
| .priv_data_size = sizeof(VC1Context), | |||||
| .init = vc1_decode_init, | |||||
| .close = ff_vc1_decode_end, | |||||
| .decode = vc1_decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), | |||||
| .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE }, | |||||
| .profiles = NULL_IF_CONFIG_SMALL(profiles) | |||||
| }; | |||||
| #endif | |||||
| #if CONFIG_VC1_VDPAU_DECODER | |||||
| AVCodec ff_vc1_vdpau_decoder = { | |||||
| .name = "vc1_vdpau", | |||||
| .type = AVMEDIA_TYPE_VIDEO, | |||||
| .id = AV_CODEC_ID_VC1, | |||||
| .priv_data_size = sizeof(VC1Context), | |||||
| .init = vc1_decode_init, | |||||
| .close = ff_vc1_decode_end, | |||||
| .decode = vc1_decode_frame, | |||||
| .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, | |||||
| .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), | |||||
| .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE }, | |||||
| .profiles = NULL_IF_CONFIG_SMALL(profiles) | |||||
| }; | |||||
| #endif | |||||
| #if CONFIG_WMV3IMAGE_DECODER | #if CONFIG_WMV3IMAGE_DECODER | ||||
| AVCodec ff_wmv3image_decoder = { | AVCodec ff_wmv3image_decoder = { | ||||
| .name = "wmv3image", | .name = "wmv3image", | ||||
| @@ -87,340 +87,4 @@ int ff_vdpau_add_buffer(AVCodecContext *avctx, | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| /* Obsolete non-hwaccel VDPAU support below... */ | |||||
| void ff_vdpau_h264_set_reference_frames(H264Context *h) | |||||
| { | |||||
| struct vdpau_render_state *render, *render_ref; | |||||
| VdpReferenceFrameH264 *rf, *rf2; | |||||
| Picture *pic; | |||||
| int i, list, pic_frame_idx; | |||||
| render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; | |||||
| assert(render); | |||||
| rf = &render->info.h264.referenceFrames[0]; | |||||
| #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) | |||||
| for (list = 0; list < 2; ++list) { | |||||
| Picture **lp = list ? h->long_ref : h->short_ref; | |||||
| int ls = list ? 16 : h->short_ref_count; | |||||
| for (i = 0; i < ls; ++i) { | |||||
| pic = lp[i]; | |||||
| if (!pic || !pic->reference) | |||||
| continue; | |||||
| pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; | |||||
| render_ref = (struct vdpau_render_state *)pic->f.data[0]; | |||||
| assert(render_ref); | |||||
| rf2 = &render->info.h264.referenceFrames[0]; | |||||
| while (rf2 != rf) { | |||||
| if ( | |||||
| (rf2->surface == render_ref->surface) | |||||
| && (rf2->is_long_term == pic->long_ref) | |||||
| && (rf2->frame_idx == pic_frame_idx) | |||||
| ) | |||||
| break; | |||||
| ++rf2; | |||||
| } | |||||
| if (rf2 != rf) { | |||||
| rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; | |||||
| rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; | |||||
| continue; | |||||
| } | |||||
| if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) | |||||
| continue; | |||||
| rf->surface = render_ref->surface; | |||||
| rf->is_long_term = pic->long_ref; | |||||
| rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; | |||||
| rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; | |||||
| rf->field_order_cnt[0] = pic->field_poc[0]; | |||||
| rf->field_order_cnt[1] = pic->field_poc[1]; | |||||
| rf->frame_idx = pic_frame_idx; | |||||
| ++rf; | |||||
| } | |||||
| } | |||||
| for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { | |||||
| rf->surface = VDP_INVALID_HANDLE; | |||||
| rf->is_long_term = 0; | |||||
| rf->top_is_reference = 0; | |||||
| rf->bottom_is_reference = 0; | |||||
| rf->field_order_cnt[0] = 0; | |||||
| rf->field_order_cnt[1] = 0; | |||||
| rf->frame_idx = 0; | |||||
| } | |||||
| } | |||||
| void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size) | |||||
| { | |||||
| struct vdpau_render_state *render = (struct vdpau_render_state*)data; | |||||
| assert(render); | |||||
| render->bitstream_buffers= av_fast_realloc( | |||||
| render->bitstream_buffers, | |||||
| &render->bitstream_buffers_allocated, | |||||
| sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) | |||||
| ); | |||||
| render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; | |||||
| render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; | |||||
| render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; | |||||
| render->bitstream_buffers_used++; | |||||
| } | |||||
| #if CONFIG_H264_VDPAU_DECODER | |||||
| void ff_vdpau_h264_picture_start(H264Context *h) | |||||
| { | |||||
| struct vdpau_render_state *render; | |||||
| int i; | |||||
| render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; | |||||
| assert(render); | |||||
| for (i = 0; i < 2; ++i) { | |||||
| int foc = h->cur_pic_ptr->field_poc[i]; | |||||
| if (foc == INT_MAX) | |||||
| foc = 0; | |||||
| render->info.h264.field_order_cnt[i] = foc; | |||||
| } | |||||
| render->info.h264.frame_num = h->frame_num; | |||||
| } | |||||
| void ff_vdpau_h264_picture_complete(H264Context *h) | |||||
| { | |||||
| struct vdpau_render_state *render; | |||||
| render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; | |||||
| assert(render); | |||||
| render->info.h264.slice_count = h->slice_num; | |||||
| if (render->info.h264.slice_count < 1) | |||||
| return; | |||||
| render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; | |||||
| render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; | |||||
| render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; | |||||
| render->info.h264.num_ref_frames = h->sps.ref_frame_count; | |||||
| render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; | |||||
| render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; | |||||
| render->info.h264.weighted_pred_flag = h->pps.weighted_pred; | |||||
| render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; | |||||
| render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; | |||||
| render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; | |||||
| render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |||||
| render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |||||
| render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; | |||||
| render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; | |||||
| render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; | |||||
| render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |||||
| render->info.h264.pic_order_cnt_type = h->sps.poc_type; | |||||
| render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; | |||||
| render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |||||
| render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |||||
| render->info.h264.entropy_coding_mode_flag = h->pps.cabac; | |||||
| render->info.h264.pic_order_present_flag = h->pps.pic_order_present; | |||||
| render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |||||
| render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; | |||||
| memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); | |||||
| memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
| memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
| ff_h264_draw_horiz_band(h, 0, h->avctx->height); | |||||
| render->bitstream_buffers_used = 0; | |||||
| } | |||||
| #endif /* CONFIG_H264_VDPAU_DECODER */ | |||||
| #if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER | |||||
| void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size, int slice_count) | |||||
| { | |||||
| struct vdpau_render_state *render, *last, *next; | |||||
| int i; | |||||
| if (!s->current_picture_ptr) return; | |||||
| render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; | |||||
| assert(render); | |||||
| /* fill VdpPictureInfoMPEG1Or2 struct */ | |||||
| render->info.mpeg.picture_structure = s->picture_structure; | |||||
| render->info.mpeg.picture_coding_type = s->pict_type; | |||||
| render->info.mpeg.intra_dc_precision = s->intra_dc_precision; | |||||
| render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; | |||||
| render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; | |||||
| render->info.mpeg.intra_vlc_format = s->intra_vlc_format; | |||||
| render->info.mpeg.alternate_scan = s->alternate_scan; | |||||
| render->info.mpeg.q_scale_type = s->q_scale_type; | |||||
| render->info.mpeg.top_field_first = s->top_field_first; | |||||
| render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 | |||||
| render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 | |||||
| render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. | |||||
| render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; | |||||
| render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; | |||||
| render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; | |||||
| for (i = 0; i < 64; ++i) { | |||||
| render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; | |||||
| render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; | |||||
| } | |||||
| render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; | |||||
| render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; | |||||
| switch(s->pict_type){ | |||||
| case AV_PICTURE_TYPE_B: | |||||
| next = (struct vdpau_render_state *)s->next_picture.f.data[0]; | |||||
| assert(next); | |||||
| render->info.mpeg.backward_reference = next->surface; | |||||
| // no return here, going to set forward prediction | |||||
| case AV_PICTURE_TYPE_P: | |||||
| last = (struct vdpau_render_state *)s->last_picture.f.data[0]; | |||||
| if (!last) // FIXME: Does this test make sense? | |||||
| last = render; // predict second field from the first | |||||
| render->info.mpeg.forward_reference = last->surface; | |||||
| } | |||||
| ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); | |||||
| render->info.mpeg.slice_count = slice_count; | |||||
| if (slice_count) | |||||
| ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); | |||||
| render->bitstream_buffers_used = 0; | |||||
| } | |||||
| #endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */ | |||||
| #if CONFIG_VC1_VDPAU_DECODER | |||||
| void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size) | |||||
| { | |||||
| VC1Context *v = s->avctx->priv_data; | |||||
| struct vdpau_render_state *render, *last, *next; | |||||
| render = (struct vdpau_render_state *)s->current_picture.f.data[0]; | |||||
| assert(render); | |||||
| /* fill LvPictureInfoVC1 struct */ | |||||
| render->info.vc1.frame_coding_mode = v->fcm; | |||||
| render->info.vc1.postprocflag = v->postprocflag; | |||||
| render->info.vc1.pulldown = v->broadcast; | |||||
| render->info.vc1.interlace = v->interlace; | |||||
| render->info.vc1.tfcntrflag = v->tfcntrflag; | |||||
| render->info.vc1.finterpflag = v->finterpflag; | |||||
| render->info.vc1.psf = v->psf; | |||||
| render->info.vc1.dquant = v->dquant; | |||||
| render->info.vc1.panscan_flag = v->panscanflag; | |||||
| render->info.vc1.refdist_flag = v->refdist_flag; | |||||
| render->info.vc1.quantizer = v->quantizer_mode; | |||||
| render->info.vc1.extended_mv = v->extended_mv; | |||||
| render->info.vc1.extended_dmv = v->extended_dmv; | |||||
| render->info.vc1.overlap = v->overlap; | |||||
| render->info.vc1.vstransform = v->vstransform; | |||||
| render->info.vc1.loopfilter = v->s.loop_filter; | |||||
| render->info.vc1.fastuvmc = v->fastuvmc; | |||||
| render->info.vc1.range_mapy_flag = v->range_mapy_flag; | |||||
| render->info.vc1.range_mapy = v->range_mapy; | |||||
| render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; | |||||
| render->info.vc1.range_mapuv = v->range_mapuv; | |||||
| /* Specific to simple/main profile only */ | |||||
| render->info.vc1.multires = v->multires; | |||||
| render->info.vc1.syncmarker = v->s.resync_marker; | |||||
| render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1); | |||||
| render->info.vc1.maxbframes = v->s.max_b_frames; | |||||
| render->info.vc1.deblockEnable = v->postprocflag & 1; | |||||
| render->info.vc1.pquant = v->pq; | |||||
| render->info.vc1.forward_reference = VDP_INVALID_HANDLE; | |||||
| render->info.vc1.backward_reference = VDP_INVALID_HANDLE; | |||||
| if (v->bi_type) | |||||
| render->info.vc1.picture_type = 4; | |||||
| else | |||||
| render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3; | |||||
| switch(s->pict_type){ | |||||
| case AV_PICTURE_TYPE_B: | |||||
| next = (struct vdpau_render_state *)s->next_picture.f.data[0]; | |||||
| assert(next); | |||||
| render->info.vc1.backward_reference = next->surface; | |||||
| // no break here, going to set forward prediction | |||||
| case AV_PICTURE_TYPE_P: | |||||
| last = (struct vdpau_render_state *)s->last_picture.f.data[0]; | |||||
| if (!last) // FIXME: Does this test make sense? | |||||
| last = render; // predict second field from the first | |||||
| render->info.vc1.forward_reference = last->surface; | |||||
| } | |||||
| ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); | |||||
| render->info.vc1.slice_count = 1; | |||||
| ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); | |||||
| render->bitstream_buffers_used = 0; | |||||
| } | |||||
| #endif /* (CONFIG_VC1_VDPAU_DECODER */ | |||||
| #if CONFIG_MPEG4_VDPAU_DECODER | |||||
| void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size) | |||||
| { | |||||
| struct vdpau_render_state *render, *last, *next; | |||||
| int i; | |||||
| if (!s->current_picture_ptr) return; | |||||
| render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; | |||||
| assert(render); | |||||
| /* fill VdpPictureInfoMPEG4Part2 struct */ | |||||
| render->info.mpeg4.trd[0] = s->pp_time; | |||||
| render->info.mpeg4.trb[0] = s->pb_time; | |||||
| render->info.mpeg4.trd[1] = s->pp_field_time >> 1; | |||||
| render->info.mpeg4.trb[1] = s->pb_field_time >> 1; | |||||
| render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den; | |||||
| render->info.mpeg4.vop_coding_type = 0; | |||||
| render->info.mpeg4.vop_fcode_forward = s->f_code; | |||||
| render->info.mpeg4.vop_fcode_backward = s->b_code; | |||||
| render->info.mpeg4.resync_marker_disable = !s->resync_marker; | |||||
| render->info.mpeg4.interlaced = !s->progressive_sequence; | |||||
| render->info.mpeg4.quant_type = s->mpeg_quant; | |||||
| render->info.mpeg4.quarter_sample = s->quarter_sample; | |||||
| render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263; | |||||
| render->info.mpeg4.rounding_control = s->no_rounding; | |||||
| render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan; | |||||
| render->info.mpeg4.top_field_first = s->top_field_first; | |||||
| for (i = 0; i < 64; ++i) { | |||||
| render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i]; | |||||
| render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; | |||||
| } | |||||
| render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE; | |||||
| render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE; | |||||
| switch (s->pict_type) { | |||||
| case AV_PICTURE_TYPE_B: | |||||
| next = (struct vdpau_render_state *)s->next_picture.f.data[0]; | |||||
| assert(next); | |||||
| render->info.mpeg4.backward_reference = next->surface; | |||||
| render->info.mpeg4.vop_coding_type = 2; | |||||
| // no break here, going to set forward prediction | |||||
| case AV_PICTURE_TYPE_P: | |||||
| last = (struct vdpau_render_state *)s->last_picture.f.data[0]; | |||||
| assert(last); | |||||
| render->info.mpeg4.forward_reference = last->surface; | |||||
| } | |||||
| ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); | |||||
| ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); | |||||
| render->bitstream_buffers_used = 0; | |||||
| } | |||||
| #endif /* CONFIG_MPEG4_VDPAU_DECODER */ | |||||
| /* @}*/ | /* @}*/ | ||||
| @@ -40,21 +40,4 @@ int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx); | |||||
| int ff_vdpau_add_buffer(AVCodecContext *avctx, | int ff_vdpau_add_buffer(AVCodecContext *avctx, | ||||
| const uint8_t *buf, uint32_t buf_size); | const uint8_t *buf, uint32_t buf_size); | ||||
| void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, | |||||
| int buf_size); | |||||
| void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size, int slice_count); | |||||
| void ff_vdpau_h264_picture_start(H264Context *h); | |||||
| void ff_vdpau_h264_set_reference_frames(H264Context *h); | |||||
| void ff_vdpau_h264_picture_complete(H264Context *h); | |||||
| void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size); | |||||
| void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, | |||||
| int buf_size); | |||||
| #endif /* AVCODEC_VDPAU_INTERNAL_H */ | #endif /* AVCODEC_VDPAU_INTERNAL_H */ | ||||