* commit '7c6eb0a1b7bf1aac7f033a7ec6d8cacc3b5c2615': lavc: AV-prefix all codec flags Conflicts: doc/examples/muxing.c ffmpeg.c ffmpeg_opt.c ffplay.c libavcodec/aacdec.c libavcodec/aacenc.c libavcodec/ac3dec.c libavcodec/ac3enc_float.c libavcodec/atrac1.c libavcodec/atrac3.c libavcodec/atrac3plusdec.c libavcodec/dcadec.c libavcodec/ffv1enc.c libavcodec/h264.c libavcodec/h264_loopfilter.c libavcodec/h264_mb.c libavcodec/imc.c libavcodec/libmp3lame.c libavcodec/libtheoraenc.c libavcodec/libtwolame.c libavcodec/libvpxenc.c libavcodec/libxavs.c libavcodec/libxvid.c libavcodec/mpeg12dec.c libavcodec/mpeg12enc.c libavcodec/mpegaudiodec_template.c libavcodec/mpegvideo.c libavcodec/mpegvideo_enc.c libavcodec/mpegvideo_motion.c libavcodec/nellymoserdec.c libavcodec/nellymoserenc.c libavcodec/nvenc.c libavcodec/on2avc.c libavcodec/options_table.h libavcodec/opus_celt.c libavcodec/pngenc.c libavcodec/ra288.c libavcodec/ratecontrol.c libavcodec/twinvq.c libavcodec/vc1_block.c libavcodec/vc1_loopfilter.c libavcodec/vc1_mc.c libavcodec/vc1dec.c libavcodec/vorbisdec.c libavcodec/vp3.c libavcodec/wma.c libavcodec/wmaprodec.c libavcodec/x86/hpeldsp_init.c libavcodec/x86/me_cmp_init.c Merged-by: Michael Niedermayer <michael@niedermayer.cc>tags/n2.8
@@ -562,7 +562,7 @@ static void video_decode_example(const char *outfilename, const char *filename) | |||||
} | } | ||||
if(codec->capabilities&CODEC_CAP_TRUNCATED) | if(codec->capabilities&CODEC_CAP_TRUNCATED) | ||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ | |||||
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames | |||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height | /* For some codecs, such as msmpeg4 and mpeg4, width and height | ||||
MUST be initialized there because this information is not | MUST be initialized there because this information is not | ||||
@@ -172,7 +172,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc, | |||||
/* Some formats want stream headers to be separate. */ | /* Some formats want stream headers to be separate. */ | ||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) | if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER; | |||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |||||
} | } | ||||
/**************************************************************/ | /**************************************************************/ | ||||
@@ -101,7 +101,7 @@ int main(int argc, char **argv) | |||||
} | } | ||||
out_stream->codec->codec_tag = 0; | out_stream->codec->codec_tag = 0; | ||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) | if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) | ||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; | |||||
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |||||
} | } | ||||
av_dump_format(ofmt_ctx, 0, out_filename, 1); | av_dump_format(ofmt_ctx, 0, out_filename, 1); | ||||
@@ -192,7 +192,7 @@ static int open_output_file(const char *filename, | |||||
* Mark the encoder so that it behaves accordingly. | * Mark the encoder so that it behaves accordingly. | ||||
*/ | */ | ||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) | if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) | ||||
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER; | |||||
(*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |||||
/** Open the encoder for the audio stream to use it later. */ | /** Open the encoder for the audio stream to use it later. */ | ||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) { | if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) { | ||||
@@ -161,7 +161,7 @@ static int open_output_file(const char *filename) | |||||
} | } | ||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) | if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) | ||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; | |||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |||||
} | } | ||||
av_dump_format(ofmt_ctx, 0, filename, 1); | av_dump_format(ofmt_ctx, 0, filename, 1); | ||||
@@ -1119,7 +1119,7 @@ static void do_video_out(AVFormatContext *s, | |||||
int got_packet, forced_keyframe = 0; | int got_packet, forced_keyframe = 0; | ||||
double pts_time; | double pts_time; | ||||
if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) && | |||||
if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && | |||||
ost->top_field_first >= 0) | ost->top_field_first >= 0) | ||||
in_picture->top_field_first = !!ost->top_field_first; | in_picture->top_field_first = !!ost->top_field_first; | ||||
@@ -1266,7 +1266,7 @@ static void do_video_stats(OutputStream *ost, int frame_size) | |||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, | fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, | ||||
ost->quality / (float)FF_QP2LAMBDA); | ost->quality / (float)FF_QP2LAMBDA); | ||||
if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR)) | |||||
if (enc->coded_frame && (enc->flags & AV_CODEC_FLAG_PSNR)) | |||||
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0))); | fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0))); | ||||
fprintf(vstats_file,"f_size= %6d ", frame_size); | fprintf(vstats_file,"f_size= %6d ", frame_size); | ||||
@@ -1416,8 +1416,8 @@ static void print_final_stats(int64_t total_size) | |||||
} | } | ||||
extra_size += ost->enc_ctx->extradata_size; | extra_size += ost->enc_ctx->extradata_size; | ||||
data_size += ost->data_size; | data_size += ost->data_size; | ||||
if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) | |||||
!= CODEC_FLAG_PASS1) | |||||
if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) | |||||
!= AV_CODEC_FLAG_PASS1) | |||||
pass1_used = 0; | pass1_used = 0; | ||||
} | } | ||||
@@ -1586,7 +1586,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti | |||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1))); | snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1))); | ||||
} | } | ||||
if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) { | |||||
if ((enc->flags & AV_CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) { | |||||
int j; | int j; | ||||
double error, error_sum = 0; | double error, error_sum = 0; | ||||
double scale, scale_sum = 0; | double scale, scale_sum = 0; | ||||
@@ -2734,7 +2734,7 @@ static void set_encoder_id(OutputFile *of, OutputStream *ost) | |||||
if (!encoder_string) | if (!encoder_string) | ||||
exit_program(1); | exit_program(1); | ||||
if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT)) | |||||
if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT)) | |||||
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); | av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); | ||||
else | else | ||||
av_strlcpy(encoder_string, "Lavc ", encoder_string_len); | av_strlcpy(encoder_string, "Lavc ", encoder_string_len); | ||||
@@ -732,7 +732,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, | |||||
ist->resample_height, | ist->resample_height, | ||||
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt, | ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt, | ||||
tb.num, tb.den, sar.num, sar.den, | tb.num, tb.den, sar.num, sar.den, | ||||
SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); | |||||
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); | |||||
if (fr.num && fr.den) | if (fr.num && fr.den) | ||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); | av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); | ||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, | snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, | ||||
@@ -1220,7 +1220,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e | |||||
MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st); | MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st); | ||||
if (qscale >= 0) { | if (qscale >= 0) { | ||||
ost->enc_ctx->flags |= CODEC_FLAG_QSCALE; | |||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE; | |||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale; | ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale; | ||||
} | } | ||||
@@ -1228,7 +1228,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e | |||||
ost->disposition = av_strdup(ost->disposition); | ost->disposition = av_strdup(ost->disposition); | ||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) | if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; | |||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; | |||||
av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags); | av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags); | ||||
@@ -1450,17 +1450,17 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in | |||||
video_enc->rc_override_count = i; | video_enc->rc_override_count = i; | ||||
if (do_psnr) | if (do_psnr) | ||||
video_enc->flags|= CODEC_FLAG_PSNR; | |||||
video_enc->flags|= AV_CODEC_FLAG_PSNR; | |||||
/* two pass mode */ | /* two pass mode */ | ||||
MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st); | MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st); | ||||
if (do_pass) { | if (do_pass) { | ||||
if (do_pass & 1) { | if (do_pass & 1) { | ||||
video_enc->flags |= CODEC_FLAG_PASS1; | |||||
video_enc->flags |= AV_CODEC_FLAG_PASS1; | |||||
av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND); | av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND); | ||||
} | } | ||||
if (do_pass & 2) { | if (do_pass & 2) { | ||||
video_enc->flags |= CODEC_FLAG_PASS2; | |||||
video_enc->flags |= AV_CODEC_FLAG_PASS2; | |||||
av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND); | av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND); | ||||
} | } | ||||
} | } | ||||
@@ -1481,7 +1481,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in | |||||
if (!strcmp(ost->enc->name, "libx264")) { | if (!strcmp(ost->enc->name, "libx264")) { | ||||
av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE); | av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE); | ||||
} else { | } else { | ||||
if (video_enc->flags & CODEC_FLAG_PASS2) { | |||||
if (video_enc->flags & AV_CODEC_FLAG_PASS2) { | |||||
char *logbuffer = read_file(logfilename); | char *logbuffer = read_file(logfilename); | ||||
if (!logbuffer) { | if (!logbuffer) { | ||||
@@ -1491,7 +1491,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in | |||||
} | } | ||||
video_enc->stats_in = logbuffer; | video_enc->stats_in = logbuffer; | ||||
} | } | ||||
if (video_enc->flags & CODEC_FLAG_PASS1) { | |||||
if (video_enc->flags & AV_CODEC_FLAG_PASS1) { | |||||
f = av_fopen_utf8(logfilename, "wb"); | f = av_fopen_utf8(logfilename, "wb"); | ||||
if (!f) { | if (!f) { | ||||
av_log(NULL, AV_LOG_FATAL, | av_log(NULL, AV_LOG_FATAL, | ||||
@@ -2696,7 +2696,8 @@ static int stream_component_open(VideoState *is, int stream_index) | |||||
av_codec_set_lowres(avctx, stream_lowres); | av_codec_set_lowres(avctx, stream_lowres); | ||||
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; | if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; | ||||
if (fast) avctx->flags2 |= CODEC_FLAG2_FAST; | |||||
if (fast) | |||||
avctx->flags2 |= AV_CODEC_FLAG2_FAST; | |||||
if(codec->capabilities & CODEC_CAP_DR1) | if(codec->capabilities & CODEC_CAP_DR1) | ||||
avctx->flags |= CODEC_FLAG_EMU_EDGE; | avctx->flags |= CODEC_FLAG_EMU_EDGE; | ||||
@@ -559,7 +559,7 @@ static inline void idct_put(FourXContext *f, int x, int y) | |||||
idct(block[i]); | idct(block[i]); | ||||
} | } | ||||
if (!(f->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(f->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
for (i = 4; i < 6; i++) | for (i = 4; i < 6; i++) | ||||
idct(block[i]); | idct(block[i]); | ||||
} | } | ||||
@@ -1129,9 +1129,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) | |||||
AAC_RENAME(ff_aac_sbr_init)(); | AAC_RENAME(ff_aac_sbr_init)(); | ||||
#if USE_FIXED | #if USE_FIXED | ||||
ac->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ac->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
#else | #else | ||||
ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
#endif /* USE_FIXED */ | #endif /* USE_FIXED */ | ||||
if (!ac->fdsp) { | if (!ac->fdsp) { | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -659,7 +659,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, | |||||
init_put_bits(&s->pb, avpkt->data, avpkt->size); | init_put_bits(&s->pb, avpkt->data, avpkt->size); | ||||
if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT)) | |||||
if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) | |||||
put_bitstream_info(s, LIBAVCODEC_IDENT); | put_bitstream_info(s, LIBAVCODEC_IDENT); | ||||
start_ch = 0; | start_ch = 0; | ||||
memset(chan_el_counter, 0, sizeof(chan_el_counter)); | memset(chan_el_counter, 0, sizeof(chan_el_counter)); | ||||
@@ -757,7 +757,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, | |||||
avctx->frame_bits = put_bits_count(&s->pb); | avctx->frame_bits = put_bits_count(&s->pb); | ||||
// rate control stuff | // rate control stuff | ||||
if (!(avctx->flags & CODEC_FLAG_QSCALE)) { | |||||
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) { | |||||
float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits; | float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits; | ||||
s->lambda *= ratio; | s->lambda *= ratio; | ||||
s->lambda = FFMIN(s->lambda, 65536.f); | s->lambda = FFMIN(s->lambda, 65536.f); | ||||
@@ -794,7 +794,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) | |||||
{ | { | ||||
int ret = 0; | int ret = 0; | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) | if (!s->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -262,7 +262,7 @@ static av_cold void lame_window_init(AacPsyContext *ctx, AVCodecContext *avctx) | |||||
for (i = 0; i < avctx->channels; i++) { | for (i = 0; i < avctx->channels; i++) { | ||||
AacPsyChannel *pch = &ctx->ch[i]; | AacPsyChannel *pch = &ctx->ch[i]; | ||||
if (avctx->flags & CODEC_FLAG_QSCALE) | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE) | |||||
pch->attack_threshold = psy_vbr_map[avctx->global_quality / FF_QP2LAMBDA].st_lrm; | pch->attack_threshold = psy_vbr_map[avctx->global_quality / FF_QP2LAMBDA].st_lrm; | ||||
else | else | ||||
pch->attack_threshold = lame_calc_attack_threshold(avctx->bit_rate / avctx->channels / 1000); | pch->attack_threshold = lame_calc_attack_threshold(avctx->bit_rate / avctx->channels / 1000); | ||||
@@ -193,13 +193,13 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx) | |||||
ff_bswapdsp_init(&s->bdsp); | ff_bswapdsp_init(&s->bdsp); | ||||
#if (USE_FIXED) | #if (USE_FIXED) | ||||
s->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
#else | #else | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
ff_fmt_convert_init(&s->fmt_conv, avctx); | ff_fmt_convert_init(&s->fmt_conv, avctx); | ||||
#endif | #endif | ||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
av_lfg_init(&s->dith_state, 0); | av_lfg_init(&s->dith_state, 0); | ||||
if (USE_FIXED) | if (USE_FIXED) | ||||
@@ -2484,7 +2484,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx) | |||||
ff_audiodsp_init(&s->adsp); | ff_audiodsp_init(&s->adsp); | ||||
ff_me_cmp_init(&s->mecc, avctx); | ff_me_cmp_init(&s->mecc, avctx); | ||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
dprint_options(s); | dprint_options(s); | ||||
@@ -139,7 +139,7 @@ static CoefType calc_cpl_coord(CoefSumType energy_ch, CoefSumType energy_cpl) | |||||
av_cold int ff_ac3_float_encode_init(AVCodecContext *avctx) | av_cold int ff_ac3_float_encode_init(AVCodecContext *avctx) | ||||
{ | { | ||||
AC3EncodeContext *s = avctx->priv_data; | AC3EncodeContext *s = avctx->priv_data; | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) | if (!s->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
return ff_ac3_encode_init(avctx); | return ff_ac3_encode_init(avctx); | ||||
@@ -64,7 +64,7 @@ av_cold void ff_idctdsp_init_arm(IDCTDSPContext *c, AVCodecContext *avctx, | |||||
int cpu_flags = av_get_cpu_flags(); | int cpu_flags = av_get_cpu_flags(); | ||||
if (!avctx->lowres && !high_bit_depth) { | if (!avctx->lowres && !high_bit_depth) { | ||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) || | |||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) || | |||||
avctx->idct_algo == FF_IDCT_ARM) { | avctx->idct_algo == FF_IDCT_ARM) { | ||||
c->idct_put = j_rev_dct_arm_put; | c->idct_put = j_rev_dct_arm_put; | ||||
c->idct_add = j_rev_dct_arm_add; | c->idct_add = j_rev_dct_arm_add; | ||||
@@ -33,7 +33,7 @@ av_cold void ff_idctdsp_init_armv6(IDCTDSPContext *c, AVCodecContext *avctx, | |||||
unsigned high_bit_depth) | unsigned high_bit_depth) | ||||
{ | { | ||||
if (!avctx->lowres && !high_bit_depth) { | if (!avctx->lowres && !high_bit_depth) { | ||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) || | |||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) || | |||||
avctx->idct_algo == FF_IDCT_SIMPLEARMV6) { | avctx->idct_algo == FF_IDCT_SIMPLEARMV6) { | ||||
c->idct_put = ff_simple_idct_put_armv6; | c->idct_put = ff_simple_idct_put_armv6; | ||||
c->idct_add = ff_simple_idct_add_armv6; | c->idct_add = ff_simple_idct_add_armv6; | ||||
@@ -66,7 +66,7 @@ int ff_ass_subtitle_header(AVCodecContext *avctx, | |||||
"\r\n" | "\r\n" | ||||
"[Events]\r\n" | "[Events]\r\n" | ||||
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n", | "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n", | ||||
!(avctx->flags & CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "", | |||||
!(avctx->flags & AV_CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "", | |||||
ASS_DEFAULT_PLAYRESX, ASS_DEFAULT_PLAYRESY, | ASS_DEFAULT_PLAYRESX, ASS_DEFAULT_PLAYRESY, | ||||
font, font_size, color, color, back_color, back_color, | font, font_size, color, color, back_color, back_color, | ||||
-bold, -italic, -underline, alignment); | -bold, -italic, -underline, alignment); | ||||
@@ -195,7 +195,7 @@ static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y) | |||||
a->idsp.idct_put(dest_y + 8 * linesize, linesize, block[2]); | a->idsp.idct_put(dest_y + 8 * linesize, linesize, block[2]); | ||||
a->idsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]); | a->idsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]); | ||||
if (!(a->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
a->idsp.idct_put(dest_cb, frame->linesize[1], block[4]); | a->idsp.idct_put(dest_cb, frame->linesize[1], block[4]); | ||||
a->idsp.idct_put(dest_cr, frame->linesize[2], block[5]); | a->idsp.idct_put(dest_cr, frame->linesize[2], block[5]); | ||||
} | } | ||||
@@ -207,7 +207,7 @@ static inline void dct_get(ASV1Context *a, const AVFrame *frame, | |||||
for (i = 0; i < 4; i++) | for (i = 0; i < 4; i++) | ||||
a->fdsp.fdct(block[i]); | a->fdsp.fdct(block[i]); | ||||
if (!(a->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
a->pdsp.get_pixels(block[4], ptr_cb, frame->linesize[1]); | a->pdsp.get_pixels(block[4], ptr_cb, frame->linesize[1]); | ||||
a->pdsp.get_pixels(block[5], ptr_cr, frame->linesize[2]); | a->pdsp.get_pixels(block[5], ptr_cr, frame->linesize[2]); | ||||
for (i = 4; i < 6; i++) | for (i = 4; i < 6; i++) | ||||
@@ -361,7 +361,7 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx) | |||||
ff_atrac_generate_tables(); | ff_atrac_generate_tables(); | ||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
q->bands[0] = q->low; | q->bands[0] = q->low; | ||||
q->bands[1] = q->mid; | q->bands[1] = q->mid; | ||||
@@ -914,7 +914,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) | |||||
} | } | ||||
ff_atrac_init_gain_compensation(&q->gainc_ctx, 4, 3); | ff_atrac_init_gain_compensation(&q->gainc_ctx, 4, 3); | ||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
q->units = av_mallocz_array(avctx->channels, sizeof(*q->units)); | q->units = av_mallocz_array(avctx->channels, sizeof(*q->units)); | ||||
if (!q->units || !q->fdsp) { | if (!q->units || !q->fdsp) { | ||||
@@ -171,7 +171,7 @@ static av_cold int atrac3p_decode_init(AVCodecContext *avctx) | |||||
ctx->my_channel_layout = avctx->channel_layout; | ctx->my_channel_layout = avctx->channel_layout; | ||||
ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units)); | ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units)); | ||||
ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!ctx->ch_units || !ctx->fdsp) { | if (!ctx->ch_units || !ctx->fdsp) { | ||||
atrac3p_decode_close(avctx); | atrac3p_decode_close(avctx); | ||||
@@ -712,6 +712,120 @@ typedef struct RcOverride{ | |||||
Note: Not everything is supported yet. | Note: Not everything is supported yet. | ||||
*/ | */ | ||||
/** | |||||
* Allow decoders to produce frames with data planes that are not aligned | |||||
* to CPU requirements (e.g. due to cropping). | |||||
*/ | |||||
#define AV_CODEC_FLAG_UNALIGNED (1 << 0) | |||||
/** | |||||
* Use fixed qscale. | |||||
*/ | |||||
#define AV_CODEC_FLAG_QSCALE (1 << 1) | |||||
/** | |||||
* 4 MV per MB allowed / advanced prediction for H.263. | |||||
*/ | |||||
#define AV_CODEC_FLAG_4MV (1 << 2) | |||||
/** | |||||
* Output even those frames that might be corrupted. | |||||
*/ | |||||
#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3) | |||||
/** | |||||
* Use qpel MC. | |||||
*/ | |||||
#define AV_CODEC_FLAG_QPEL (1 << 4) | |||||
/** | |||||
* Use internal 2pass ratecontrol in first pass mode. | |||||
*/ | |||||
#define AV_CODEC_FLAG_PASS1 (1 << 9) | |||||
/** | |||||
* Use internal 2pass ratecontrol in second pass mode. | |||||
*/ | |||||
#define AV_CODEC_FLAG_PASS2 (1 << 10) | |||||
/** | |||||
* loop filter. | |||||
*/ | |||||
#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11) | |||||
/** | |||||
* Only decode/encode grayscale. | |||||
*/ | |||||
#define AV_CODEC_FLAG_GRAY (1 << 13) | |||||
/** | |||||
* error[?] variables will be set during encoding. | |||||
*/ | |||||
#define AV_CODEC_FLAG_PSNR (1 << 15) | |||||
/** | |||||
* Input bitstream might be truncated at a random location | |||||
* instead of only at frame boundaries. | |||||
*/ | |||||
#define AV_CODEC_FLAG_TRUNCATED (1 << 16) | |||||
/** | |||||
* Use interlaced DCT. | |||||
*/ | |||||
#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18) | |||||
/** | |||||
* Force low delay. | |||||
*/ | |||||
#define AV_CODEC_FLAG_LOW_DELAY (1 << 19) | |||||
/** | |||||
* Place global headers in extradata instead of every keyframe. | |||||
*/ | |||||
#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22) | |||||
/** | |||||
* Use only bitexact stuff (except (I)DCT). | |||||
*/ | |||||
#define AV_CODEC_FLAG_BITEXACT (1 << 23) | |||||
/* Fx : Flag for h263+ extra options */ | |||||
/** | |||||
* H.263 advanced intra coding / MPEG-4 AC prediction | |||||
*/ | |||||
#define AV_CODEC_FLAG_AC_PRED (1 << 24) | |||||
/** | |||||
* interlaced motion estimation | |||||
*/ | |||||
#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29) | |||||
/** | |||||
* Allow non spec compliant speedup tricks. | |||||
*/ | |||||
#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31) | |||||
#define AV_CODEC_FLAG2_FAST (1 << 0) | |||||
/** | |||||
* Skip bitstream encoding. | |||||
*/ | |||||
#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2) | |||||
/** | |||||
* Place global headers at every keyframe instead of in extradata. | |||||
*/ | |||||
#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3) | |||||
/** | |||||
* timecode is in drop frame format. DEPRECATED!!!! | |||||
*/ | |||||
#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 | |||||
/** | |||||
* Input bitstream might be truncated at a packet boundaries | |||||
* instead of only at frame boundaries. | |||||
*/ | |||||
#define AV_CODEC_FLAG2_CHUNKS (1 << 15) | |||||
/** | |||||
* Discard cropping information from SPS. | |||||
*/ | |||||
#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16) | |||||
/** | |||||
* Show all frames before the first keyframe | |||||
*/ | |||||
#define AV_CODEC_FLAG2_SHOW_ALL 0x00400000 | |||||
/** | |||||
* Export motion vectors through frame side data | |||||
*/ | |||||
#define AV_CODEC_FLAG2_EXPORT_MVS 0x10000000 | |||||
/** | |||||
* Do not skip samples and export skip information as frame side data | |||||
*/ | |||||
#define AV_CODEC_FLAG2_SKIP_MANUAL 0x20000000 | |||||
/** | /** | ||||
* Allow decoders to produce frames with data planes that are not aligned | * Allow decoders to produce frames with data planes that are not aligned | ||||
* to CPU requirements (e.g. due to cropping). | * to CPU requirements (e.g. due to cropping). | ||||
@@ -1357,14 +1471,14 @@ typedef struct AVCodecContext { | |||||
#define FF_COMPRESSION_DEFAULT -1 | #define FF_COMPRESSION_DEFAULT -1 | ||||
/** | /** | ||||
* CODEC_FLAG_*. | |||||
* AV_CODEC_FLAG_*. | |||||
* - encoding: Set by user. | * - encoding: Set by user. | ||||
* - decoding: Set by user. | * - decoding: Set by user. | ||||
*/ | */ | ||||
int flags; | int flags; | ||||
/** | /** | ||||
* CODEC_FLAG2_* | |||||
* AV_CODEC_FLAG2_* | |||||
* - encoding: Set by user. | * - encoding: Set by user. | ||||
* - decoding: Set by user. | * - decoding: Set by user. | ||||
*/ | */ | ||||
@@ -2702,7 +2816,7 @@ typedef struct AVCodecContext { | |||||
/** | /** | ||||
* error | * error | ||||
* - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. | |||||
* - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. | |||||
* - decoding: unused | * - decoding: unused | ||||
*/ | */ | ||||
uint64_t error[AV_NUM_DATA_POINTERS]; | uint64_t error[AV_NUM_DATA_POINTERS]; | ||||
@@ -2000,7 +2000,7 @@ static av_cold int dca_decode_init(AVCodecContext *avctx) | |||||
s->avctx = avctx; | s->avctx = avctx; | ||||
dca_init_vlcs(); | dca_init_vlcs(); | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) | if (!s->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -1119,7 +1119,7 @@ int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth) | |||||
for (i = 0; i < FF_ARRAY_ELEMS(ff_dnxhd_cid_table); i++) { | for (i = 0; i < FF_ARRAY_ELEMS(ff_dnxhd_cid_table); i++) { | ||||
const CIDEntry *cid = &ff_dnxhd_cid_table[i]; | const CIDEntry *cid = &ff_dnxhd_cid_table[i]; | ||||
if (cid->width == avctx->width && cid->height == avctx->height && | if (cid->width == avctx->width && cid->height == avctx->height && | ||||
cid->interlaced == !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT) && | |||||
cid->interlaced == !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) && | |||||
cid->bit_depth == bit_depth) { | cid->bit_depth == bit_depth) { | ||||
for (j = 0; j < FF_ARRAY_ELEMS(cid->bit_rates); j++) { | for (j = 0; j < FF_ARRAY_ELEMS(cid->bit_rates); j++) { | ||||
if (cid->bit_rates[j] == mbs) | if (cid->bit_rates[j] == mbs) | ||||
@@ -395,7 +395,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, | |||||
ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, ctx->blocks[4]); | ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, ctx->blocks[4]); | ||||
ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, ctx->blocks[5]); | ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, ctx->blocks[5]); | ||||
if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
dct_y_offset = dct_linesize_chroma << 3; | dct_y_offset = dct_linesize_chroma << 3; | ||||
ctx->idsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); | ctx->idsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); | ||||
ctx->idsp.idct_put(dest_v, dct_linesize_chroma, ctx->blocks[3]); | ctx->idsp.idct_put(dest_v, dct_linesize_chroma, ctx->blocks[3]); | ||||
@@ -408,7 +408,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, | |||||
ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, ctx->blocks[6]); | ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, ctx->blocks[6]); | ||||
ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, ctx->blocks[7]); | ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, ctx->blocks[7]); | ||||
if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
dct_y_offset = dct_linesize_chroma << 3; | dct_y_offset = dct_linesize_chroma << 3; | ||||
ctx->idsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); | ctx->idsp.idct_put(dest_u, dct_linesize_chroma, ctx->blocks[2]); | ||||
ctx->idsp.idct_put(dest_u + dct_x_offset, dct_linesize_chroma, ctx->blocks[3]); | ctx->idsp.idct_put(dest_u + dct_x_offset, dct_linesize_chroma, ctx->blocks[3]); | ||||
@@ -351,7 +351,7 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx) | |||||
ctx->m.mb_height = (avctx->height + 15) / 16; | ctx->m.mb_height = (avctx->height + 15) / 16; | ||||
ctx->m.mb_width = (avctx->width + 15) / 16; | ctx->m.mb_width = (avctx->width + 15) / 16; | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
ctx->interlaced = 1; | ctx->interlaced = 1; | ||||
ctx->m.mb_height /= 2; | ctx->m.mb_height /= 2; | ||||
} | } | ||||
@@ -207,7 +207,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
memcpy (buf + 8, "V1.0", 4); | memcpy (buf + 8, "V1.0", 4); | ||||
write32(buf + 20, 1); /* new image */ | write32(buf + 20, 1); /* new image */ | ||||
write32(buf + 24, HEADER_SIZE); | write32(buf + 24, HEADER_SIZE); | ||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) | |||||
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) | |||||
memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100)); | memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100)); | ||||
write32(buf + 660, 0xFFFFFFFF); /* unencrypted */ | write32(buf + 660, 0xFFFFFFFF); /* unencrypted */ | ||||
@@ -30,7 +30,7 @@ static int dump_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, | |||||
int cmd= args ? *args : 0; | int cmd= args ? *args : 0; | ||||
/* cast to avoid warning about discarding qualifiers */ | /* cast to avoid warning about discarding qualifiers */ | ||||
if(avctx->extradata){ | if(avctx->extradata){ | ||||
if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER) && cmd=='a') | |||||
if( (keyframe && (avctx->flags2 & AV_CODEC_FLAG2_LOCAL_HEADER) && cmd == 'a') | |||||
||(keyframe && (cmd=='k' || !cmd)) | ||(keyframe && (cmd=='k' || !cmd)) | ||||
||(cmd=='e') | ||(cmd=='e') | ||||
/*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){ | /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){ | ||||
@@ -204,7 +204,7 @@ static av_always_inline PutBitContext *dv_encode_ac(EncBlockInfo *bi, | |||||
static av_always_inline int dv_guess_dct_mode(DVVideoContext *s, uint8_t *data, | static av_always_inline int dv_guess_dct_mode(DVVideoContext *s, uint8_t *data, | ||||
int linesize) | int linesize) | ||||
{ | { | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
int ps = s->ildct_cmp(NULL, data, NULL, linesize, 8) - 400; | int ps = s->ildct_cmp(NULL, data, NULL, linesize, 8) - 400; | ||||
if (ps > 0) { | if (ps > 0) { | ||||
int is = s->ildct_cmp(NULL, data, NULL, linesize << 1, 4) + | int is = s->ildct_cmp(NULL, data, NULL, linesize << 1, 4) + | ||||
@@ -284,7 +284,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac | |||||
case 5: | case 5: | ||||
if (!tmpptr) { | if (!tmpptr) { | ||||
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n"); | av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n"); | ||||
if (!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL)) | |||||
if (!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
frame->key_frame = 0; | frame->key_frame = 0; | ||||
@@ -101,7 +101,7 @@ static inline void comp_block(MadContext *t, AVFrame *frame, | |||||
frame->linesize[0], | frame->linesize[0], | ||||
t->last_frame->data[0] + offset, | t->last_frame->data[0] + offset, | ||||
t->last_frame->linesize[0], add); | t->last_frame->linesize[0], add); | ||||
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
} else if (!(t->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
int index = j - 3; | int index = j - 3; | ||||
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2); | unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2); | ||||
if (offset >= (t->avctx->height/2 - 7) * t->last_frame->linesize[index] - 7) | if (offset >= (t->avctx->height/2 - 7) * t->last_frame->linesize[index] - 7) | ||||
@@ -120,7 +120,7 @@ static inline void idct_put(MadContext *t, AVFrame *frame, int16_t *block, | |||||
ff_ea_idct_put_c( | ff_ea_idct_put_c( | ||||
frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3), | frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3), | ||||
frame->linesize[0], block); | frame->linesize[0], block); | ||||
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
} else if (!(t->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
int index = j - 3; | int index = j - 3; | ||||
ff_ea_idct_put_c( | ff_ea_idct_put_c( | ||||
frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8, | frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8, | ||||
@@ -116,7 +116,7 @@ static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame, | |||||
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); | ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); | ||||
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]); | ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]); | ||||
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]); | ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]); | ||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]); | ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]); | ||||
ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]); | ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]); | ||||
} | } | ||||
@@ -142,7 +142,7 @@ static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame, | |||||
tgq_dconly(s, dest_y + 8, linesize, dc[1]); | tgq_dconly(s, dest_y + 8, linesize, dc[1]); | ||||
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]); | tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]); | ||||
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]); | tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]); | ||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]); | tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]); | ||||
tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]); | tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]); | ||||
} | } | ||||
@@ -85,7 +85,7 @@ static inline void tqi_idct_put(TqiContext *t, AVFrame *frame, int16_t (*block)[ | |||||
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); | ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); | ||||
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]); | ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]); | ||||
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]); | ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]); | ||||
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) { | |||||
if(!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]); | ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]); | ||||
ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]); | ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]); | ||||
} | } | ||||
@@ -312,7 +312,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w, | |||||
diff = fold(diff, bits); | diff = fold(diff, bits); | ||||
if (s->ac) { | if (s->ac) { | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, | put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, | ||||
s->rc_stat2[p->quant_table_index][context]); | s->rc_stat2[p->quant_table_index][context]); | ||||
} else { | } else { | ||||
@@ -672,7 +672,8 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
s->version = 0; | s->version = 0; | ||||
if ((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1) | |||||
if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) || | |||||
avctx->slices > 1) | |||||
s->version = FFMAX(s->version, 2); | s->version = FFMAX(s->version, 2); | ||||
// Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability | // Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability | ||||
@@ -867,7 +868,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); | avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); | ||||
s->picture_number = 0; | s->picture_number = 0; | ||||
if (avctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) { | |||||
if (avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) { | |||||
for (i = 0; i < s->quant_table_count; i++) { | for (i = 0; i < s->quant_table_count; i++) { | ||||
s->rc_stat2[i] = av_mallocz(s->context_count[i] * | s->rc_stat2[i] = av_mallocz(s->context_count[i] * | ||||
sizeof(*s->rc_stat2[i])); | sizeof(*s->rc_stat2[i])); | ||||
@@ -980,7 +981,7 @@ slices_ok: | |||||
return ret; | return ret; | ||||
#define STATS_OUT_SIZE 1024 * 1024 * 6 | #define STATS_OUT_SIZE 1024 * 1024 * 6 | ||||
if (avctx->flags & CODEC_FLAG_PASS1) { | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS1) { | |||||
avctx->stats_out = av_mallocz(STATS_OUT_SIZE); | avctx->stats_out = av_mallocz(STATS_OUT_SIZE); | ||||
if (!avctx->stats_out) | if (!avctx->stats_out) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -1200,7 +1201,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
+ avctx->width*avctx->height*35LL*4; | + avctx->width*avctx->height*35LL*4; | ||||
if(!pict) { | if(!pict) { | ||||
if (avctx->flags & CODEC_FLAG_PASS1) { | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS1) { | |||||
int j, k, m; | int j, k, m; | ||||
char *p = avctx->stats_out; | char *p = avctx->stats_out; | ||||
char *end = p + STATS_OUT_SIZE; | char *end = p + STATS_OUT_SIZE; | ||||
@@ -1315,7 +1316,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
buf_p += bytes; | buf_p += bytes; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_PASS1) | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS1) | |||||
avctx->stats_out[0] = '\0'; | avctx->stats_out[0] = '\0'; | ||||
#if FF_API_CODED_FRAME | #if FF_API_CODED_FRAME | ||||
@@ -127,7 +127,7 @@ int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){ | |||||
if( !avctx->thread_count | if( !avctx->thread_count | ||||
&& avctx->codec_id == AV_CODEC_ID_MJPEG | && avctx->codec_id == AV_CODEC_ID_MJPEG | ||||
&& !(avctx->flags & CODEC_FLAG_QSCALE)) { | |||||
&& !(avctx->flags & AV_CODEC_FLAG_QSCALE)) { | |||||
av_log(avctx, AV_LOG_DEBUG, | av_log(avctx, AV_LOG_DEBUG, | ||||
"Forcing thread count to 1 for MJPEG encoding, use -thread_type slice " | "Forcing thread count to 1 for MJPEG encoding, use -thread_type slice " | ||||
"or a constant quantizer if you want to use multiple cpu cores\n"); | "or a constant quantizer if you want to use multiple cpu cores\n"); | ||||
@@ -135,7 +135,7 @@ int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){ | |||||
} | } | ||||
if( avctx->thread_count > 1 | if( avctx->thread_count > 1 | ||||
&& avctx->codec_id == AV_CODEC_ID_MJPEG | && avctx->codec_id == AV_CODEC_ID_MJPEG | ||||
&& !(avctx->flags & CODEC_FLAG_QSCALE)) | |||||
&& !(avctx->flags & AV_CODEC_FLAG_QSCALE)) | |||||
av_log(avctx, AV_LOG_WARNING, | av_log(avctx, AV_LOG_WARNING, | ||||
"MJPEG CBR encoding works badly with frame multi-threading, consider " | "MJPEG CBR encoding works badly with frame multi-threading, consider " | ||||
"using -threads 1, -thread_type slice or a constant quantizer.\n"); | "using -threads 1, -thread_type slice or a constant quantizer.\n"); | ||||
@@ -143,7 +143,7 @@ int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){ | |||||
if (avctx->codec_id == AV_CODEC_ID_HUFFYUV || | if (avctx->codec_id == AV_CODEC_ID_HUFFYUV || | ||||
avctx->codec_id == AV_CODEC_ID_FFVHUFF) { | avctx->codec_id == AV_CODEC_ID_FFVHUFF) { | ||||
int warn = 0; | int warn = 0; | ||||
if (avctx->flags & CODEC_FLAG_PASS1) | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS1) | |||||
warn = 1; | warn = 1; | ||||
else if(avctx->context_model > 0) { | else if(avctx->context_model > 0) { | ||||
AVDictionaryEntry *t = av_dict_get(options, "non_deterministic", | AVDictionaryEntry *t = av_dict_get(options, "non_deterministic", | ||||
@@ -116,7 +116,7 @@ static inline int h263_get_motion_length(int val, int f_code){ | |||||
} | } | ||||
static inline void ff_h263_encode_motion_vector(MpegEncContext * s, int x, int y, int f_code){ | static inline void ff_h263_encode_motion_vector(MpegEncContext * s, int x, int y, int f_code){ | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) { | |||||
skip_put_bits(&s->pb, | skip_put_bits(&s->pb, | ||||
h263_get_motion_length(x, f_code) | h263_get_motion_length(x, f_code) | ||||
+h263_get_motion_length(y, f_code)); | +h263_get_motion_length(y, f_code)); | ||||
@@ -50,7 +50,7 @@ static enum AVPixelFormat h263_get_format(AVCodecContext *avctx) | |||||
if (avctx->codec->id == AV_CODEC_ID_MSS2) | if (avctx->codec->id == AV_CODEC_ID_MSS2) | ||||
return AV_PIX_FMT_YUV420P; | return AV_PIX_FMT_YUV420P; | ||||
if (CONFIG_GRAY && (avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED) | if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED) | ||||
avctx->color_range = AVCOL_RANGE_MPEG; | avctx->color_range = AVCOL_RANGE_MPEG; | ||||
return AV_PIX_FMT_GRAY8; | return AV_PIX_FMT_GRAY8; | ||||
@@ -165,7 +165,7 @@ static int get_consumed_bytes(MpegEncContext *s, int buf_size) | |||||
/* We would have to scan through the whole buf to handle the weird | /* We would have to scan through the whole buf to handle the weird | ||||
* reordering ... */ | * reordering ... */ | ||||
return buf_size; | return buf_size; | ||||
} else if (s->avctx->flags & CODEC_FLAG_TRUNCATED) { | |||||
} else if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { | |||||
pos -= s->parse_context.last_index; | pos -= s->parse_context.last_index; | ||||
// padding is not really read so this might be -1 | // padding is not really read so this might be -1 | ||||
if (pos < 0) | if (pos < 0) | ||||
@@ -430,7 +430,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
return 0; | return 0; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_TRUNCATED) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { | |||||
int next; | int next; | ||||
if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) { | if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) { | ||||
@@ -293,7 +293,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, H264SliceContext *sl, | |||||
if(i>=length-1){ //no escaped 0 | if(i>=length-1){ //no escaped 0 | ||||
*dst_length= length; | *dst_length= length; | ||||
*consumed= length+1; //+1 for the header | *consumed= length+1; //+1 for the header | ||||
if(h->avctx->flags2 & CODEC_FLAG2_FAST){ | |||||
if(h->avctx->flags2 & AV_CODEC_FLAG2_FAST){ | |||||
return src; | return src; | ||||
}else{ | }else{ | ||||
memcpy(dst, src, length); | memcpy(dst, src, length); | ||||
@@ -1392,7 +1392,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, | |||||
if(!h->slice_context_count) | if(!h->slice_context_count) | ||||
h->slice_context_count= 1; | h->slice_context_count= 1; | ||||
h->max_contexts = h->slice_context_count; | h->max_contexts = h->slice_context_count; | ||||
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) { | |||||
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) { | |||||
h->current_slice = 0; | h->current_slice = 0; | ||||
if (!h->first_field) | if (!h->first_field) | ||||
h->cur_pic_ptr = NULL; | h->cur_pic_ptr = NULL; | ||||
@@ -1546,8 +1546,8 @@ again: | |||||
// "recovered". | // "recovered". | ||||
if (h->nal_unit_type == NAL_IDR_SLICE) | if (h->nal_unit_type == NAL_IDR_SLICE) | ||||
h->frame_recovered |= FRAME_RECOVERED_IDR; | h->frame_recovered |= FRAME_RECOVERED_IDR; | ||||
h->frame_recovered |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL); | |||||
h->frame_recovered |= 3*!!(avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT); | |||||
h->frame_recovered |= 3*!!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL); | |||||
h->frame_recovered |= 3*!!(avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT); | |||||
#if 1 | #if 1 | ||||
h->cur_pic_ptr->recovered |= h->frame_recovered; | h->cur_pic_ptr->recovered |= h->frame_recovered; | ||||
#else | #else | ||||
@@ -1555,7 +1555,7 @@ again: | |||||
#endif | #endif | ||||
if (h->current_slice == 1) { | if (h->current_slice == 1) { | ||||
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) | |||||
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) | |||||
decode_postinit(h, nal_index >= nals_needed); | decode_postinit(h, nal_index >= nals_needed); | ||||
if (h->avctx->hwaccel && | if (h->avctx->hwaccel && | ||||
@@ -1831,7 +1831,7 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data, | |||||
goto out; | goto out; | ||||
} | } | ||||
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) { | |||||
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) { | |||||
if (avctx->skip_frame >= AVDISCARD_NONREF || | if (avctx->skip_frame >= AVDISCARD_NONREF || | ||||
buf_size >= 4 && !memcmp("Q264", buf, 4)) | buf_size >= 4 && !memcmp("Q264", buf, 4)) | ||||
return buf_size; | return buf_size; | ||||
@@ -1839,9 +1839,9 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data, | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) || | |||||
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) || | |||||
(h->mb_y >= h->mb_height && h->mb_height)) { | (h->mb_y >= h->mb_height && h->mb_height)) { | ||||
if (avctx->flags2 & CODEC_FLAG2_CHUNKS) | |||||
if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) | |||||
decode_postinit(h, 1); | decode_postinit(h, 1); | ||||
ff_h264_field_end(h, &h->slice_ctx[0], 0); | ff_h264_field_end(h, &h->slice_ctx[0], 0); | ||||
@@ -242,7 +242,7 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h, | |||||
unsigned int uvlinesize, | unsigned int uvlinesize, | ||||
int pixel_shift) | int pixel_shift) | ||||
{ | { | ||||
int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); | |||||
int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); | |||||
int chroma444 = CHROMA444(h); | int chroma444 = CHROMA444(h); | ||||
int chroma422 = CHROMA422(h); | int chroma422 = CHROMA422(h); | ||||
@@ -723,7 +723,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, | |||||
const int mb_type = h->cur_pic.mb_type[mb_xy]; | const int mb_type = h->cur_pic.mb_type[mb_xy]; | ||||
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; | const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; | ||||
int first_vertical_edge_done = 0; | int first_vertical_edge_done = 0; | ||||
int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY)); | |||||
int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); | |||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); | int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); | ||||
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | ||||
int b = 52 + sl->slice_beta_offset - qp_bd_offset; | int b = 52 + sl->slice_beta_offset - qp_bd_offset; | ||||
@@ -250,7 +250,7 @@ static av_always_inline void mc_dir_part(const H264Context *h, H264SliceContext | |||||
if (!square) | if (!square) | ||||
qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize); | qpix_op[luma_xy](dest_y + delta, src_y + delta, sl->mb_linesize); | ||||
if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY) | |||||
if (CONFIG_GRAY && h->flags & AV_CODEC_FLAG_GRAY) | |||||
return; | return; | ||||
if (chroma_idc == 3 /* yuv444 */) { | if (chroma_idc == 3 /* yuv444 */) { | ||||
@@ -425,7 +425,7 @@ static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceCon | |||||
int weight1 = 64 - weight0; | int weight1 = 64 - weight0; | ||||
luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, | luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, | ||||
height, 5, weight0, weight1, 0); | height, 5, weight0, weight1, 0); | ||||
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, | chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, | ||||
chroma_height, 5, weight0, weight1, 0); | chroma_height, 5, weight0, weight1, 0); | ||||
chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, | chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, | ||||
@@ -438,7 +438,7 @@ static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceCon | |||||
sl->luma_weight[refn1][1][0], | sl->luma_weight[refn1][1][0], | ||||
sl->luma_weight[refn0][0][1] + | sl->luma_weight[refn0][0][1] + | ||||
sl->luma_weight[refn1][1][1]); | sl->luma_weight[refn1][1][1]); | ||||
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height, | chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height, | ||||
sl->chroma_log2_weight_denom, | sl->chroma_log2_weight_denom, | ||||
sl->chroma_weight[refn0][0][0][0], | sl->chroma_weight[refn0][0][0][0], | ||||
@@ -465,7 +465,7 @@ static av_always_inline void mc_part_weighted(const H264Context *h, H264SliceCon | |||||
sl->luma_log2_weight_denom, | sl->luma_log2_weight_denom, | ||||
sl->luma_weight[refn][list][0], | sl->luma_weight[refn][list][0], | ||||
sl->luma_weight[refn][list][1]); | sl->luma_weight[refn][list][1]); | ||||
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (sl->use_weight_chroma) { | if (sl->use_weight_chroma) { | ||||
chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height, | chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height, | ||||
sl->chroma_log2_weight_denom, | sl->chroma_log2_weight_denom, | ||||
@@ -566,7 +566,7 @@ static av_always_inline void xchg_mb_border(const H264Context *h, H264SliceConte | |||||
XCHG(sl->top_borders[top_idx][sl->mb_x + 1], | XCHG(sl->top_borders[top_idx][sl->mb_x + 1], | ||||
src_y + (17 << pixel_shift), 1); | src_y + (17 << pixel_shift), 1); | ||||
} | } | ||||
if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (chroma444) { | if (chroma444) { | ||||
if (deblock_topleft) { | if (deblock_topleft) { | ||||
XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1); | XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1); | ||||
@@ -112,7 +112,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
for (j = 0; j < 16; j++) | for (j = 0; j < 16; j++) | ||||
tmp_y[j] = get_bits(&gb, bit_depth); | tmp_y[j] = get_bits(&gb, bit_depth); | ||||
} | } | ||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (!h->sps.chroma_format_idc) { | if (!h->sps.chroma_format_idc) { | ||||
for (i = 0; i < block_h; i++) { | for (i = 0; i < block_h; i++) { | ||||
uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); | uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); | ||||
@@ -137,7 +137,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
} else { | } else { | ||||
for (i = 0; i < 16; i++) | for (i = 0; i < 16; i++) | ||||
memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16); | memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16); | ||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (!h->sps.chroma_format_idc) { | if (!h->sps.chroma_format_idc) { | ||||
for (i = 0; i < 8; i++) { | for (i = 0; i < 8; i++) { | ||||
memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8); | memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8); | ||||
@@ -159,7 +159,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
xchg_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize, | xchg_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize, | ||||
uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT); | uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT); | ||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
h->hpc.pred8x8[sl->chroma_pred_mode](dest_cb, uvlinesize); | h->hpc.pred8x8[sl->chroma_pred_mode](dest_cb, uvlinesize); | ||||
h->hpc.pred8x8[sl->chroma_pred_mode](dest_cr, uvlinesize); | h->hpc.pred8x8[sl->chroma_pred_mode](dest_cr, uvlinesize); | ||||
} | } | ||||
@@ -190,7 +190,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
hl_decode_mb_idct_luma(h, sl, mb_type, is_h264, SIMPLE, transform_bypass, | hl_decode_mb_idct_luma(h, sl, mb_type, is_h264, SIMPLE, transform_bypass, | ||||
PIXEL_SHIFT, block_offset, linesize, dest_y, 0); | PIXEL_SHIFT, block_offset, linesize, dest_y, 0); | ||||
if ((SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) && | |||||
if ((SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) && | |||||
(sl->cbp & 0x30)) { | (sl->cbp & 0x30)) { | ||||
uint8_t *dest[2] = { dest_cb, dest_cr }; | uint8_t *dest[2] = { dest_cb, dest_cr }; | ||||
if (transform_bypass) { | if (transform_bypass) { | ||||
@@ -280,7 +280,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo | |||||
int i, j, p; | int i, j, p; | ||||
const int *block_offset = &h->block_offset[0]; | const int *block_offset = &h->block_offset[0]; | ||||
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); | const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); | ||||
const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) ? 3 : 1; | |||||
const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1; | |||||
for (p = 0; p < plane_count; p++) { | for (p = 0; p < plane_count; p++) { | ||||
dest[p] = h->cur_pic.f->data[p] + | dest[p] = h->cur_pic.f->data[p] + | ||||
@@ -464,7 +464,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
int width = 16 * sps->mb_width; | int width = 16 * sps->mb_width; | ||||
int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag); | int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag); | ||||
if (h->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { | |||||
if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { | |||||
av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original " | av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original " | ||||
"values are l:%d r:%d t:%d b:%d\n", | "values are l:%d r:%d t:%d b:%d\n", | ||||
crop_left, crop_right, crop_top, crop_bottom); | crop_left, crop_right, crop_top, crop_bottom); | ||||
@@ -481,7 +481,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
int step_y = (2 - sps->frame_mbs_only_flag) << vsub; | int step_y = (2 - sps->frame_mbs_only_flag) << vsub; | ||||
if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && | if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && | ||||
!(h->avctx->flags & CODEC_FLAG_UNALIGNED)) { | |||||
!(h->avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { | |||||
crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); | crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); | ||||
av_log(h->avctx, AV_LOG_WARNING, | av_log(h->avctx, AV_LOG_WARNING, | ||||
"Reducing left cropping to %d " | "Reducing left cropping to %d " | ||||
@@ -246,7 +246,7 @@ static int alloc_picture(H264Context *h, H264Picture *pic) | |||||
pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data; | pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data; | ||||
} | } | ||||
} | } | ||||
if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & CODEC_FLAG_GRAY && pic->f->data[2]) { | |||||
if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) { | |||||
int h_chroma_shift, v_chroma_shift; | int h_chroma_shift, v_chroma_shift; | ||||
av_pix_fmt_get_chroma_sub_sample(pic->f->format, | av_pix_fmt_get_chroma_sub_sample(pic->f->format, | ||||
&h_chroma_shift, &v_chroma_shift); | &h_chroma_shift, &v_chroma_shift); | ||||
@@ -685,7 +685,7 @@ static av_always_inline void backup_mb_border(const H264Context *h, H264SliceCon | |||||
AV_COPY128(top_border, src_y + 15 * linesize); | AV_COPY128(top_border, src_y + 15 * linesize); | ||||
if (pixel_shift) | if (pixel_shift) | ||||
AV_COPY128(top_border + 16, src_y + 15 * linesize + 16); | AV_COPY128(top_border + 16, src_y + 15 * linesize + 16); | ||||
if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (chroma444) { | if (chroma444) { | ||||
if (pixel_shift) { | if (pixel_shift) { | ||||
AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize); | AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize); | ||||
@@ -728,7 +728,7 @@ static av_always_inline void backup_mb_border(const H264Context *h, H264SliceCon | |||||
if (pixel_shift) | if (pixel_shift) | ||||
AV_COPY128(top_border + 16, src_y + 16 * linesize + 16); | AV_COPY128(top_border + 16, src_y + 16 * linesize + 16); | ||||
if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { | |||||
if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (chroma444) { | if (chroma444) { | ||||
if (pixel_shift) { | if (pixel_shift) { | ||||
AV_COPY128(top_border + 32, src_cb + 16 * linesize); | AV_COPY128(top_border + 32, src_cb + 16 * linesize); | ||||
@@ -1282,7 +1282,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
h->chroma_format_idc != h->sps.chroma_format_idc) | h->chroma_format_idc != h->sps.chroma_format_idc) | ||||
needs_reinit = 1; | needs_reinit = 1; | ||||
if (h->flags & CODEC_FLAG_LOW_DELAY || | |||||
if (h->flags & AV_CODEC_FLAG_LOW_DELAY || | |||||
(h->sps.bitstream_restriction_flag && | (h->sps.bitstream_restriction_flag && | ||||
!h->sps.num_reorder_frames)) { | !h->sps.num_reorder_frames)) { | ||||
if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) | if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) | ||||
@@ -1829,7 +1829,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
sl->deblocking_filter = 0; | sl->deblocking_filter = 0; | ||||
if (sl->deblocking_filter == 1 && h->max_contexts > 1) { | if (sl->deblocking_filter == 1 && h->max_contexts > 1) { | ||||
if (h->avctx->flags2 & CODEC_FLAG2_FAST) { | |||||
if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) { | |||||
/* Cheat slightly for speed: | /* Cheat slightly for speed: | ||||
* Do not bother to deblock across slices. */ | * Do not bother to deblock across slices. */ | ||||
sl->deblocking_filter = 2; | sl->deblocking_filter = 2; | ||||
@@ -2327,7 +2327,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) | |||||
sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME || | sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME || | ||||
avctx->codec_id != AV_CODEC_ID_H264 || | avctx->codec_id != AV_CODEC_ID_H264 || | ||||
(CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY)); | |||||
(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); | |||||
if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) { | if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) { | ||||
const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1); | const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1); | ||||
@@ -585,7 +585,7 @@ static void decode_vui(GetBitContext *gb, AVCodecContext *avctx, | |||||
vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * 2; | vui->def_disp_win.bottom_offset = get_ue_golomb_long(gb) * 2; | ||||
if (apply_defdispwin && | if (apply_defdispwin && | ||||
avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { | |||||
avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { | |||||
av_log(avctx, AV_LOG_DEBUG, | av_log(avctx, AV_LOG_DEBUG, | ||||
"discarding vui default display window, " | "discarding vui default display window, " | ||||
"original values are l:%u r:%u t:%u b:%u\n", | "original values are l:%u r:%u t:%u b:%u\n", | ||||
@@ -853,7 +853,7 @@ int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, | |||||
sps->pic_conf_win.top_offset = get_ue_golomb_long(gb) * 2; | sps->pic_conf_win.top_offset = get_ue_golomb_long(gb) * 2; | ||||
sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) * 2; | sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) * 2; | ||||
if (avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { | |||||
if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { | |||||
av_log(avctx, AV_LOG_DEBUG, | av_log(avctx, AV_LOG_DEBUG, | ||||
"discarding sps conformance window, " | "discarding sps conformance window, " | ||||
"original values are l:%u r:%u t:%u b:%u\n", | "original values are l:%u r:%u t:%u b:%u\n", | ||||
@@ -1057,7 +1057,7 @@ int ff_hevc_parse_sps(HEVCSPS *sps, GetBitContext *gb, unsigned int *sps_id, | |||||
sps->output_window.bottom_offset += sps->vui.def_disp_win.bottom_offset; | sps->output_window.bottom_offset += sps->vui.def_disp_win.bottom_offset; | ||||
} | } | ||||
if (sps->output_window.left_offset & (0x1F >> (sps->pixel_shift)) && | if (sps->output_window.left_offset & (0x1F >> (sps->pixel_shift)) && | ||||
!(avctx->flags & CODEC_FLAG_UNALIGNED)) { | |||||
!(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { | |||||
sps->output_window.left_offset &= ~(0x1F >> (sps->pixel_shift)); | sps->output_window.left_offset &= ~(0x1F >> (sps->pixel_shift)); | ||||
av_log(avctx, AV_LOG_WARNING, "Reducing left output window to %d " | av_log(avctx, AV_LOG_WARNING, "Reducing left output window to %d " | ||||
"chroma samples to preserve alignment.\n", | "chroma samples to preserve alignment.\n", | ||||
@@ -1038,7 +1038,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
decode_422_bitstream(s, width - 2); | decode_422_bitstream(s, width - 2); | ||||
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0], | lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0], | ||||
width - 2, lefty); | width - 2, lefty); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu); | leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu); | ||||
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv); | leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv); | ||||
} | } | ||||
@@ -1071,14 +1071,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
decode_422_bitstream(s, width); | decode_422_bitstream(s, width); | ||||
lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0], | lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0], | ||||
width, lefty); | width, lefty); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
leftu = s->hdsp.add_hfyu_left_pred(udst, s->temp[1], width2, leftu); | leftu = s->hdsp.add_hfyu_left_pred(udst, s->temp[1], width2, leftu); | ||||
leftv = s->hdsp.add_hfyu_left_pred(vdst, s->temp[2], width2, leftv); | leftv = s->hdsp.add_hfyu_left_pred(vdst, s->temp[2], width2, leftv); | ||||
} | } | ||||
if (s->predictor == PLANE) { | if (s->predictor == PLANE) { | ||||
if (cy > s->interlaced) { | if (cy > s->interlaced) { | ||||
s->hdsp.add_bytes(ydst, ydst - fake_ystride, width); | s->hdsp.add_bytes(ydst, ydst - fake_ystride, width); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
s->hdsp.add_bytes(udst, udst - fake_ustride, width2); | s->hdsp.add_bytes(udst, udst - fake_ustride, width2); | ||||
s->hdsp.add_bytes(vdst, vdst - fake_vstride, width2); | s->hdsp.add_bytes(vdst, vdst - fake_vstride, width2); | ||||
} | } | ||||
@@ -1093,7 +1093,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
decode_422_bitstream(s, width - 2); | decode_422_bitstream(s, width - 2); | ||||
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0], | lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0], | ||||
width - 2, lefty); | width - 2, lefty); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu); | leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu); | ||||
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv); | leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv); | ||||
} | } | ||||
@@ -1105,7 +1105,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
decode_422_bitstream(s, width); | decode_422_bitstream(s, width); | ||||
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + p->linesize[0], | lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + p->linesize[0], | ||||
s->temp[0], width, lefty); | s->temp[0], width, lefty); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); | leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); | ||||
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); | leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); | ||||
} | } | ||||
@@ -1117,7 +1117,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
decode_422_bitstream(s, 4); | decode_422_bitstream(s, 4); | ||||
lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + fake_ystride, | lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + fake_ystride, | ||||
s->temp[0], 4, lefty); | s->temp[0], 4, lefty); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu); | leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu); | ||||
leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv); | leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv); | ||||
} | } | ||||
@@ -1128,7 +1128,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
s->hdsp.add_hfyu_median_pred(p->data[0] + fake_ystride + 4, | s->hdsp.add_hfyu_median_pred(p->data[0] + fake_ystride + 4, | ||||
p->data[0] + 4, s->temp[0], | p->data[0] + 4, s->temp[0], | ||||
width - 4, &lefty, &lefttopy); | width - 4, &lefty, &lefttopy); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
lefttopu = p->data[1][1]; | lefttopu = p->data[1][1]; | ||||
lefttopv = p->data[2][1]; | lefttopv = p->data[2][1]; | ||||
s->hdsp.add_hfyu_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu); | s->hdsp.add_hfyu_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu); | ||||
@@ -1163,7 +1163,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | |||||
s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride, | s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride, | ||||
s->temp[0], width, | s->temp[0], width, | ||||
&lefty, &lefttopy); | &lefty, &lefttopy); | ||||
if (!(s->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(s->flags & AV_CODEC_FLAG_GRAY)) { | |||||
s->hdsp.add_hfyu_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); | s->hdsp.add_hfyu_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); | ||||
s->hdsp.add_hfyu_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); | s->hdsp.add_hfyu_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); | ||||
} | } | ||||
@@ -220,7 +220,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
ff_huffyuvencdsp_init(&s->hencdsp); | ff_huffyuvencdsp_init(&s->hencdsp); | ||||
avctx->extradata = av_mallocz(3*MAX_N + 4); | avctx->extradata = av_mallocz(3*MAX_N + 4); | ||||
if (s->flags&CODEC_FLAG_PASS1) { | |||||
if (s->flags&AV_CODEC_FLAG_PASS1) { | |||||
#define STATS_OUT_SIZE 21*MAX_N*3 + 4 | #define STATS_OUT_SIZE 21*MAX_N*3 + 4 | ||||
avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 | avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 | ||||
if (!avctx->stats_out) | if (!avctx->stats_out) | ||||
@@ -314,10 +314,10 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
avctx->bits_per_coded_sample = s->bitstream_bpp; | avctx->bits_per_coded_sample = s->bitstream_bpp; | ||||
s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR); | s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR); | ||||
s->predictor = avctx->prediction_method; | s->predictor = avctx->prediction_method; | ||||
s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; | |||||
s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0; | |||||
if (avctx->context_model == 1) { | if (avctx->context_model == 1) { | ||||
s->context = avctx->context_model; | s->context = avctx->context_model; | ||||
if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) { | |||||
if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) { | |||||
av_log(avctx, AV_LOG_ERROR, | av_log(avctx, AV_LOG_ERROR, | ||||
"context=1 is not compatible with " | "context=1 is not compatible with " | ||||
"2 pass huffyuv encoding\n"); | "2 pass huffyuv encoding\n"); | ||||
@@ -454,7 +454,7 @@ static int encode_422_bitstream(HYuvContext *s, int offset, int count) | |||||
count /= 2; | count /= 2; | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
for(i = 0; i < count; i++) { | for(i = 0; i < count; i++) { | ||||
LOAD4; | LOAD4; | ||||
s->stats[0][y0]++; | s->stats[0][y0]++; | ||||
@@ -463,7 +463,7 @@ static int encode_422_bitstream(HYuvContext *s, int offset, int count) | |||||
s->stats[2][v0]++; | s->stats[2][v0]++; | ||||
} | } | ||||
} | } | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) | |||||
return 0; | return 0; | ||||
if (s->context) { | if (s->context) { | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
@@ -539,7 +539,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
put_bits(&s->pb, 2, y1&3); | put_bits(&s->pb, 2, y1&3); | ||||
if (s->bps <= 8) { | if (s->bps <= 8) { | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD2; | LOAD2; | ||||
STAT2; | STAT2; | ||||
@@ -549,7 +549,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
STATEND; | STATEND; | ||||
} | } | ||||
} | } | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) | |||||
return 0; | return 0; | ||||
if (s->context) { | if (s->context) { | ||||
@@ -575,7 +575,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
} | } | ||||
} else if (s->bps <= 14) { | } else if (s->bps <= 14) { | ||||
int mask = s->n - 1; | int mask = s->n - 1; | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD2_14; | LOAD2_14; | ||||
STAT2; | STAT2; | ||||
@@ -585,7 +585,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
STATEND; | STATEND; | ||||
} | } | ||||
} | } | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) | |||||
return 0; | return 0; | ||||
if (s->context) { | if (s->context) { | ||||
@@ -610,7 +610,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD2_16; | LOAD2_16; | ||||
STAT2_16; | STAT2_16; | ||||
@@ -620,7 +620,7 @@ static int encode_plane_bitstream(HYuvContext *s, int width, int plane) | |||||
STATEND_16; | STATEND_16; | ||||
} | } | ||||
} | } | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) | |||||
return 0; | return 0; | ||||
if (s->context) { | if (s->context) { | ||||
@@ -672,13 +672,13 @@ static int encode_gray_bitstream(HYuvContext *s, int count) | |||||
count /= 2; | count /= 2; | ||||
if (s->flags & CODEC_FLAG_PASS1) { | |||||
if (s->flags & AV_CODEC_FLAG_PASS1) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD2; | LOAD2; | ||||
STAT2; | STAT2; | ||||
} | } | ||||
} | } | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) | |||||
return 0; | return 0; | ||||
if (s->context) { | if (s->context) { | ||||
@@ -726,13 +726,13 @@ static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes) | |||||
if (planes == 4) \ | if (planes == 4) \ | ||||
put_bits(&s->pb, s->len[2][a], s->bits[2][a]); | put_bits(&s->pb, s->len[2][a], s->bits[2][a]); | ||||
if ((s->flags & CODEC_FLAG_PASS1) && | |||||
(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) { | |||||
if ((s->flags & AV_CODEC_FLAG_PASS1) && | |||||
(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD_GBRA; | LOAD_GBRA; | ||||
STAT_BGRA; | STAT_BGRA; | ||||
} | } | ||||
} else if (s->context || (s->flags & CODEC_FLAG_PASS1)) { | |||||
} else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) { | |||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
LOAD_GBRA; | LOAD_GBRA; | ||||
STAT_BGRA; | STAT_BGRA; | ||||
@@ -1000,7 +1000,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
put_bits(&s->pb, 15, 0); | put_bits(&s->pb, 15, 0); | ||||
size /= 4; | size /= 4; | ||||
if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) { | |||||
if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) { | |||||
int j; | int j; | ||||
char *p = avctx->stats_out; | char *p = avctx->stats_out; | ||||
char *end = p + STATS_OUT_SIZE; | char *end = p + STATS_OUT_SIZE; | ||||
@@ -1017,7 +1017,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
} | } | ||||
} else if (avctx->stats_out) | } else if (avctx->stats_out) | ||||
avctx->stats_out[0] = '\0'; | avctx->stats_out[0] = '\0'; | ||||
if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) { | |||||
if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) { | |||||
flush_put_bits(&s->pb); | flush_put_bits(&s->pb); | ||||
s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size); | s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size); | ||||
} | } | ||||
@@ -256,7 +256,7 @@ static av_cold int imc_decode_init(AVCodecContext *avctx) | |||||
return ret; | return ret; | ||||
} | } | ||||
ff_bswapdsp_init(&q->bdsp); | ff_bswapdsp_init(&q->bdsp); | ||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!q->fdsp) { | if (!q->fdsp) { | ||||
ff_fft_end(&q->fft); | ff_fft_end(&q->fft); | ||||
@@ -453,7 +453,7 @@ void ff_h263_encode_mb(MpegEncContext * s, | |||||
int16_t pred_dc; | int16_t pred_dc; | ||||
int16_t rec_intradc[6]; | int16_t rec_intradc[6]; | ||||
int16_t *dc_ptr[6]; | int16_t *dc_ptr[6]; | ||||
const int interleaved_stats = s->avctx->flags & CODEC_FLAG_PASS1; | |||||
const int interleaved_stats = s->avctx->flags & AV_CODEC_FLAG_PASS1; | |||||
if (!s->mb_intra) { | if (!s->mb_intra) { | ||||
/* compute cbp */ | /* compute cbp */ | ||||
@@ -321,7 +321,7 @@ static int put_com(Jpeg2000EncoderContext *s, int compno) | |||||
{ | { | ||||
int size = 4 + strlen(LIBAVCODEC_IDENT); | int size = 4 + strlen(LIBAVCODEC_IDENT); | ||||
if (s->avctx->flags & CODEC_FLAG_BITEXACT) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT) | |||||
return 0; | return 0; | ||||
if (s->buf_end - s->buf < size + 2) | if (s->buf_end - s->buf < size + 2) | ||||
@@ -467,7 +467,7 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c) | |||||
} | } | ||||
c->transform = bytestream2_get_byteu(&s->g); // DWT transformation type | c->transform = bytestream2_get_byteu(&s->g); // DWT transformation type | ||||
/* set integer 9/7 DWT in case of BITEXACT flag */ | /* set integer 9/7 DWT in case of BITEXACT flag */ | ||||
if ((s->avctx->flags & CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97)) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97)) | |||||
c->transform = FF_DWT97_INT; | c->transform = FF_DWT97_INT; | ||||
else if (c->transform == FF_DWT53) { | else if (c->transform == FF_DWT53) { | ||||
s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS; | s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS; | ||||
@@ -63,7 +63,7 @@ static av_cold int aacPlus_encode_init(AVCodecContext *avctx) | |||||
aacplus_cfg->bitRate = avctx->bit_rate; | aacplus_cfg->bitRate = avctx->bit_rate; | ||||
aacplus_cfg->bandWidth = avctx->cutoff; | aacplus_cfg->bandWidth = avctx->cutoff; | ||||
aacplus_cfg->outputFormat = !(avctx->flags & CODEC_FLAG_GLOBAL_HEADER); | |||||
aacplus_cfg->outputFormat = !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER); | |||||
aacplus_cfg->inputFormat = avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? AACPLUS_INPUT_FLOAT : AACPLUS_INPUT_16BIT; | aacplus_cfg->inputFormat = avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? AACPLUS_INPUT_FLOAT : AACPLUS_INPUT_16BIT; | ||||
if (!aacplusEncSetConfiguration(s->aacplus_handle, aacplus_cfg)) { | if (!aacplusEncSetConfiguration(s->aacplus_handle, aacplus_cfg)) { | ||||
av_log(avctx, AV_LOG_ERROR, "libaacplus doesn't support this output format!\n"); | av_log(avctx, AV_LOG_ERROR, "libaacplus doesn't support this output format!\n"); | ||||
@@ -74,7 +74,7 @@ static av_cold int aacPlus_encode_init(AVCodecContext *avctx) | |||||
/* Set decoder specific info */ | /* Set decoder specific info */ | ||||
avctx->extradata_size = 0; | avctx->extradata_size = 0; | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
unsigned char *buffer = NULL; | unsigned char *buffer = NULL; | ||||
unsigned long decoder_specific_info_size; | unsigned long decoder_specific_info_size; | ||||
@@ -206,7 +206,7 @@ static av_cold int dcadec_init(AVCodecContext *avctx) | |||||
int flags = 0; | int flags = 0; | ||||
/* Affects only lossy DTS profiles. DTS-HD MA is always bitexact */ | /* Affects only lossy DTS profiles. DTS-HD MA is always bitexact */ | ||||
if (avctx->flags & CODEC_FLAG_BITEXACT) | |||||
if (avctx->flags & AV_CODEC_FLAG_BITEXACT) | |||||
flags |= DCADEC_FLAG_CORE_BIT_EXACT; | flags |= DCADEC_FLAG_CORE_BIT_EXACT; | ||||
if (avctx->request_channel_layout > 0 && avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE) { | if (avctx->request_channel_layout > 0 && avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE) { | ||||
@@ -117,7 +117,7 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) | |||||
faac_cfg->allowMidside = 1; | faac_cfg->allowMidside = 1; | ||||
faac_cfg->bitRate = avctx->bit_rate / avctx->channels; | faac_cfg->bitRate = avctx->bit_rate / avctx->channels; | ||||
faac_cfg->bandWidth = avctx->cutoff; | faac_cfg->bandWidth = avctx->cutoff; | ||||
if(avctx->flags & CODEC_FLAG_QSCALE) { | |||||
if(avctx->flags & AV_CODEC_FLAG_QSCALE) { | |||||
faac_cfg->bitRate = 0; | faac_cfg->bitRate = 0; | ||||
faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA; | faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA; | ||||
} | } | ||||
@@ -131,7 +131,7 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx) | |||||
/* Set decoder specific info */ | /* Set decoder specific info */ | ||||
avctx->extradata_size = 0; | avctx->extradata_size = 0; | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
unsigned char *buffer = NULL; | unsigned char *buffer = NULL; | ||||
unsigned long decoder_specific_info_size; | unsigned long decoder_specific_info_size; | ||||
@@ -184,7 +184,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
goto error; | goto error; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_QSCALE || s->vbr) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE || s->vbr) { | |||||
int mode = s->vbr ? s->vbr : avctx->global_quality; | int mode = s->vbr ? s->vbr : avctx->global_quality; | ||||
if (mode < 1 || mode > 5) { | if (mode < 1 || mode > 5) { | ||||
av_log(avctx, AV_LOG_WARNING, | av_log(avctx, AV_LOG_WARNING, | ||||
@@ -224,7 +224,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
/* Choose bitstream format - if global header is requested, use | /* Choose bitstream format - if global header is requested, use | ||||
* raw access units, otherwise use ADTS. */ | * raw access units, otherwise use ADTS. */ | ||||
if ((err = aacEncoder_SetParam(s->handle, AACENC_TRANSMUX, | if ((err = aacEncoder_SetParam(s->handle, AACENC_TRANSMUX, | ||||
avctx->flags & CODEC_FLAG_GLOBAL_HEADER ? 0 : s->latm ? 10 : 2)) != AACENC_OK) { | |||||
avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER ? 0 : s->latm ? 10 : 2)) != AACENC_OK) { | |||||
av_log(avctx, AV_LOG_ERROR, "Unable to set the transmux format: %s\n", | av_log(avctx, AV_LOG_ERROR, "Unable to set the transmux format: %s\n", | ||||
aac_get_error(err)); | aac_get_error(err)); | ||||
goto error; | goto error; | ||||
@@ -243,7 +243,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
* if using mp4 mode (raw access units, with global header) and | * if using mp4 mode (raw access units, with global header) and | ||||
* implicit signaling if using ADTS. */ | * implicit signaling if using ADTS. */ | ||||
if (s->signaling < 0) | if (s->signaling < 0) | ||||
s->signaling = avctx->flags & CODEC_FLAG_GLOBAL_HEADER ? 2 : 0; | |||||
s->signaling = avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER ? 2 : 0; | |||||
if ((err = aacEncoder_SetParam(s->handle, AACENC_SIGNALING_MODE, | if ((err = aacEncoder_SetParam(s->handle, AACENC_SIGNALING_MODE, | ||||
s->signaling)) != AACENC_OK) { | s->signaling)) != AACENC_OK) { | ||||
@@ -289,7 +289,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
avctx->initial_padding = info.encoderDelay; | avctx->initial_padding = info.encoderDelay; | ||||
ff_af_queue_init(avctx, &s->afq); | ff_af_queue_init(avctx, &s->afq); | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
avctx->extradata_size = info.confSize; | avctx->extradata_size = info.confSize; | ||||
avctx->extradata = av_mallocz(avctx->extradata_size + | avctx->extradata = av_mallocz(avctx->extradata_size + | ||||
FF_INPUT_BUFFER_PADDING_SIZE); | FF_INPUT_BUFFER_PADDING_SIZE); | ||||
@@ -111,7 +111,7 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx) | |||||
lame_set_quality(s->gfp, avctx->compression_level); | lame_set_quality(s->gfp, avctx->compression_level); | ||||
/* rate control */ | /* rate control */ | ||||
if (avctx->flags & CODEC_FLAG_QSCALE) { // VBR | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE) { // VBR | |||||
lame_set_VBR(s->gfp, vbr_default); | lame_set_VBR(s->gfp, vbr_default); | ||||
lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA); | lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA); | ||||
} else { | } else { | ||||
@@ -159,7 +159,7 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx) | |||||
if (ret < 0) | if (ret < 0) | ||||
goto error; | goto error; | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) { | if (!s->fdsp) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto error; | goto error; | ||||
@@ -137,7 +137,7 @@ static av_cold int svc_encode_init(AVCodecContext *avctx) | |||||
goto fail; | goto fail; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
SFrameBSInfo fbi = { 0 }; | SFrameBSInfo fbi = { 0 }; | ||||
int i, size = 0; | int i, size = 0; | ||||
(*s->encoder)->EncodeParameterSets(s->encoder, &fbi); | (*s->encoder)->EncodeParameterSets(s->encoder, &fbi); | ||||
@@ -192,7 +192,7 @@ static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, | |||||
// frames have two layers, where the first layer contains the SPS/PPS. | // frames have two layers, where the first layer contains the SPS/PPS. | ||||
// If using global headers, don't include the SPS/PPS in the returned | // If using global headers, don't include the SPS/PPS in the returned | ||||
// packet - thus, only return one layer. | // packet - thus, only return one layer. | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) | |||||
first_layer = fbi.iLayerNum - 1; | first_layer = fbi.iLayerNum - 1; | ||||
for (layer = first_layer; layer < fbi.iLayerNum; layer++) { | for (layer = first_layer; layer < fbi.iLayerNum; layer++) { | ||||
@@ -175,7 +175,7 @@ static av_cold int libschroedinger_encode_init(AVCodecContext *avctx) | |||||
} | } | ||||
/* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ | /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ | ||||
if (avctx->flags & CODEC_FLAG_QSCALE) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE) { | |||||
if (!avctx->global_quality) { | if (!avctx->global_quality) { | ||||
/* lossless coding */ | /* lossless coding */ | ||||
schro_encoder_setting_set_double(p_schro_params->encoder, | schro_encoder_setting_set_double(p_schro_params->encoder, | ||||
@@ -202,14 +202,14 @@ static av_cold int libschroedinger_encode_init(AVCodecContext *avctx) | |||||
"bitrate", avctx->bit_rate); | "bitrate", avctx->bit_rate); | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_ME) | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) | |||||
/* All material can be coded as interlaced or progressive | /* All material can be coded as interlaced or progressive | ||||
irrespective of the type of source material. */ | irrespective of the type of source material. */ | ||||
schro_encoder_setting_set_double(p_schro_params->encoder, | schro_encoder_setting_set_double(p_schro_params->encoder, | ||||
"interlaced_coding", 1); | "interlaced_coding", 1); | ||||
schro_encoder_setting_set_double(p_schro_params->encoder, "open_gop", | schro_encoder_setting_set_double(p_schro_params->encoder, "open_gop", | ||||
!(avctx->flags & CODEC_FLAG_CLOSED_GOP)); | |||||
!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); | |||||
/* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger | /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger | ||||
* and libdirac support other bit-depth data. */ | * and libdirac support other bit-depth data. */ | ||||
@@ -40,7 +40,7 @@ | |||||
* used to set the encoding mode. | * used to set the encoding mode. | ||||
* | * | ||||
* Rate Control | * Rate Control | ||||
* VBR mode is turned on by setting CODEC_FLAG_QSCALE in avctx->flags. | |||||
* VBR mode is turned on by setting AV_CODEC_FLAG_QSCALE in avctx->flags. | |||||
* avctx->global_quality is used to set the encoding quality. | * avctx->global_quality is used to set the encoding quality. | ||||
* For CBR mode, avctx->bit_rate can be used to set the constant bitrate. | * For CBR mode, avctx->bit_rate can be used to set the constant bitrate. | ||||
* Alternatively, the 'cbr_quality' option can be set from 0 to 10 to set | * Alternatively, the 'cbr_quality' option can be set from 0 to 10 to set | ||||
@@ -177,7 +177,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
speex_init_header(&s->header, avctx->sample_rate, avctx->channels, mode); | speex_init_header(&s->header, avctx->sample_rate, avctx->channels, mode); | ||||
/* rate control method and parameters */ | /* rate control method and parameters */ | ||||
if (avctx->flags & CODEC_FLAG_QSCALE) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE) { | |||||
/* VBR */ | /* VBR */ | ||||
s->header.vbr = 1; | s->header.vbr = 1; | ||||
s->vad = 1; /* VAD is always implicitly activated for VBR */ | s->vad = 1; /* VAD is always implicitly activated for VBR */ | ||||
@@ -210,7 +210,7 @@ static av_cold int encode_init(AVCodecContext* avc_context) | |||||
} | } | ||||
avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift); | avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift); | ||||
if (avc_context->flags & CODEC_FLAG_QSCALE) { | |||||
if (avc_context->flags & AV_CODEC_FLAG_QSCALE) { | |||||
/* Clip global_quality in QP units to the [0 - 10] range | /* Clip global_quality in QP units to the [0 - 10] range | ||||
to be consistent with the libvorbis implementation. | to be consistent with the libvorbis implementation. | ||||
Theora accepts a quality parameter which is an int value in | Theora accepts a quality parameter which is an int value in | ||||
@@ -241,10 +241,10 @@ static av_cold int encode_init(AVCodecContext* avc_context) | |||||
} | } | ||||
// need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers | // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers | ||||
if (avc_context->flags & CODEC_FLAG_PASS1) { | |||||
if (avc_context->flags & AV_CODEC_FLAG_PASS1) { | |||||
if ((ret = get_stats(avc_context, 0)) < 0) | if ((ret = get_stats(avc_context, 0)) < 0) | ||||
return ret; | return ret; | ||||
} else if (avc_context->flags & CODEC_FLAG_PASS2) { | |||||
} else if (avc_context->flags & AV_CODEC_FLAG_PASS2) { | |||||
if ((ret = submit_stats(avc_context)) < 0) | if ((ret = submit_stats(avc_context)) < 0) | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -281,7 +281,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt, | |||||
// EOS, finish and get 1st pass stats if applicable | // EOS, finish and get 1st pass stats if applicable | ||||
if (!frame) { | if (!frame) { | ||||
th_encode_packetout(h->t_state, 1, &o_packet); | th_encode_packetout(h->t_state, 1, &o_packet); | ||||
if (avc_context->flags & CODEC_FLAG_PASS1) | |||||
if (avc_context->flags & AV_CODEC_FLAG_PASS1) | |||||
if ((ret = get_stats(avc_context, 1)) < 0) | if ((ret = get_stats(avc_context, 1)) < 0) | ||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
@@ -295,7 +295,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt, | |||||
t_yuv_buffer[i].data = frame->data[i]; | t_yuv_buffer[i].data = frame->data[i]; | ||||
} | } | ||||
if (avc_context->flags & CODEC_FLAG_PASS2) | |||||
if (avc_context->flags & AV_CODEC_FLAG_PASS2) | |||||
if ((ret = submit_stats(avc_context)) < 0) | if ((ret = submit_stats(avc_context)) < 0) | ||||
return ret; | return ret; | ||||
@@ -318,7 +318,7 @@ static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt, | |||||
return AVERROR_EXTERNAL; | return AVERROR_EXTERNAL; | ||||
} | } | ||||
if (avc_context->flags & CODEC_FLAG_PASS1) | |||||
if (avc_context->flags & AV_CODEC_FLAG_PASS1) | |||||
if ((ret = get_stats(avc_context, 0)) < 0) | if ((ret = get_stats(avc_context, 0)) < 0) | ||||
return ret; | return ret; | ||||
@@ -81,7 +81,7 @@ static av_cold int twolame_encode_init(AVCodecContext *avctx) | |||||
if (!avctx->bit_rate) | if (!avctx->bit_rate) | ||||
avctx->bit_rate = avctx->sample_rate < 28000 ? 160000 : 384000; | avctx->bit_rate = avctx->sample_rate < 28000 ? 160000 : 384000; | ||||
if (avctx->flags & CODEC_FLAG_QSCALE || !avctx->bit_rate) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE || !avctx->bit_rate) { | |||||
twolame_set_VBR(s->glopts, TRUE); | twolame_set_VBR(s->glopts, TRUE); | ||||
twolame_set_VBR_level(s->glopts, | twolame_set_VBR_level(s->glopts, | ||||
avctx->global_quality / (float) FF_QP2LAMBDA); | avctx->global_quality / (float) FF_QP2LAMBDA); | ||||
@@ -85,7 +85,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
params.sampleRate = avctx->sample_rate; | params.sampleRate = avctx->sample_rate; | ||||
params.bitRate = avctx->bit_rate; | params.bitRate = avctx->bit_rate; | ||||
params.nChannels = avctx->channels; | params.nChannels = avctx->channels; | ||||
params.adtsUsed = !(avctx->flags & CODEC_FLAG_GLOBAL_HEADER); | |||||
params.adtsUsed = !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER); | |||||
if (s->codec_api.SetParam(s->handle, VO_PID_AAC_ENCPARAM, ¶ms) | if (s->codec_api.SetParam(s->handle, VO_PID_AAC_ENCPARAM, ¶ms) | ||||
!= VO_ERR_NONE) { | != VO_ERR_NONE) { | ||||
av_log(avctx, AV_LOG_ERROR, "Unable to set encoding parameters\n"); | av_log(avctx, AV_LOG_ERROR, "Unable to set encoding parameters\n"); | ||||
@@ -102,7 +102,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx) | |||||
ret = AVERROR(ENOSYS); | ret = AVERROR(ENOSYS); | ||||
goto error; | goto error; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
avctx->extradata_size = 2; | avctx->extradata_size = 2; | ||||
avctx->extradata = av_mallocz(avctx->extradata_size + | avctx->extradata = av_mallocz(avctx->extradata_size + | ||||
FF_INPUT_BUFFER_PADDING_SIZE); | FF_INPUT_BUFFER_PADDING_SIZE); | ||||
@@ -86,14 +86,14 @@ static av_cold int libvorbis_setup(vorbis_info *vi, AVCodecContext *avctx) | |||||
double cfreq; | double cfreq; | ||||
int ret; | int ret; | ||||
if (avctx->flags & CODEC_FLAG_QSCALE || !avctx->bit_rate) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE || !avctx->bit_rate) { | |||||
/* variable bitrate | /* variable bitrate | ||||
* NOTE: we use the oggenc range of -1 to 10 for global_quality for | * NOTE: we use the oggenc range of -1 to 10 for global_quality for | ||||
* user convenience, but libvorbis uses -0.1 to 1.0. | * user convenience, but libvorbis uses -0.1 to 1.0. | ||||
*/ | */ | ||||
float q = avctx->global_quality / (float)FF_QP2LAMBDA; | float q = avctx->global_quality / (float)FF_QP2LAMBDA; | ||||
/* default to 3 if the user did not set quality or bitrate */ | /* default to 3 if the user did not set quality or bitrate */ | ||||
if (!(avctx->flags & CODEC_FLAG_QSCALE)) | |||||
if (!(avctx->flags & AV_CODEC_FLAG_QSCALE)) | |||||
q = 3.0; | q = 3.0; | ||||
if ((ret = vorbis_encode_setup_vbr(vi, avctx->channels, | if ((ret = vorbis_encode_setup_vbr(vi, avctx->channels, | ||||
avctx->sample_rate, | avctx->sample_rate, | ||||
@@ -218,7 +218,7 @@ static av_cold int libvorbis_encode_init(AVCodecContext *avctx) | |||||
} | } | ||||
vorbis_comment_init(&s->vc); | vorbis_comment_init(&s->vc); | ||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) | |||||
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) | |||||
vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT); | vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT); | ||||
if ((ret = vorbis_analysis_headerout(&s->vd, &s->vc, &header, &header_comm, | if ((ret = vorbis_analysis_headerout(&s->vd, &s->vc, &header, &header_comm, | ||||
@@ -382,7 +382,7 @@ static av_cold int vpx_init(AVCodecContext *avctx, | |||||
VP8Context *ctx = avctx->priv_data; | VP8Context *ctx = avctx->priv_data; | ||||
struct vpx_codec_enc_cfg enccfg = { 0 }; | struct vpx_codec_enc_cfg enccfg = { 0 }; | ||||
struct vpx_codec_enc_cfg enccfg_alpha; | struct vpx_codec_enc_cfg enccfg_alpha; | ||||
vpx_codec_flags_t flags = (avctx->flags & CODEC_FLAG_PSNR) ? VPX_CODEC_USE_PSNR : 0; | |||||
vpx_codec_flags_t flags = (avctx->flags & AV_CODEC_FLAG_PSNR) ? VPX_CODEC_USE_PSNR : 0; | |||||
int res; | int res; | ||||
vpx_img_fmt_t img_fmt = VPX_IMG_FMT_I420; | vpx_img_fmt_t img_fmt = VPX_IMG_FMT_I420; | ||||
#if CONFIG_LIBVPX_VP9_ENCODER | #if CONFIG_LIBVPX_VP9_ENCODER | ||||
@@ -423,9 +423,9 @@ static av_cold int vpx_init(AVCodecContext *avctx, | |||||
enccfg.g_threads = avctx->thread_count; | enccfg.g_threads = avctx->thread_count; | ||||
enccfg.g_lag_in_frames= ctx->lag_in_frames; | enccfg.g_lag_in_frames= ctx->lag_in_frames; | ||||
if (avctx->flags & CODEC_FLAG_PASS1) | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS1) | |||||
enccfg.g_pass = VPX_RC_FIRST_PASS; | enccfg.g_pass = VPX_RC_FIRST_PASS; | ||||
else if (avctx->flags & CODEC_FLAG_PASS2) | |||||
else if (avctx->flags & AV_CODEC_FLAG_PASS2) | |||||
enccfg.g_pass = VPX_RC_LAST_PASS; | enccfg.g_pass = VPX_RC_LAST_PASS; | ||||
else | else | ||||
enccfg.g_pass = VPX_RC_ONE_PASS; | enccfg.g_pass = VPX_RC_ONE_PASS; | ||||
@@ -914,7 +914,7 @@ static int vp8_encode(AVCodecContext *avctx, AVPacket *pkt, | |||||
coded_size = queue_frames(avctx, pkt); | coded_size = queue_frames(avctx, pkt); | ||||
if (!frame && avctx->flags & CODEC_FLAG_PASS1) { | |||||
if (!frame && avctx->flags & AV_CODEC_FLAG_PASS1) { | |||||
unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); | unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); | ||||
avctx->stats_out = av_malloc(b64_size); | avctx->stats_out = av_malloc(b64_size); | ||||
@@ -407,7 +407,7 @@ static av_cold int X264_init(AVCodecContext *avctx) | |||||
#endif | #endif | ||||
x264_param_default(&x4->params); | x264_param_default(&x4->params); | ||||
x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; | |||||
x4->params.b_deblocking_filter = avctx->flags & AV_CODEC_FLAG_LOOP_FILTER; | |||||
if (x4->preset || x4->tune) | if (x4->preset || x4->tune) | ||||
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) { | if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) { | ||||
@@ -440,8 +440,8 @@ static av_cold int X264_init(AVCodecContext *avctx) | |||||
} | } | ||||
x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | ||||
x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | ||||
x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; | |||||
if (avctx->flags & CODEC_FLAG_PASS2) { | |||||
x4->params.rc.b_stat_write = avctx->flags & AV_CODEC_FLAG_PASS1; | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
x4->params.rc.b_stat_read = 1; | x4->params.rc.b_stat_read = 1; | ||||
} else { | } else { | ||||
if (x4->crf >= 0) { | if (x4->crf >= 0) { | ||||
@@ -663,15 +663,15 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x4->params.i_fps_num = avctx->time_base.den; | x4->params.i_fps_num = avctx->time_base.den; | ||||
x4->params.i_fps_den = avctx->time_base.num * avctx->ticks_per_frame; | x4->params.i_fps_den = avctx->time_base.num * avctx->ticks_per_frame; | ||||
x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; | |||||
x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR; | |||||
x4->params.i_threads = avctx->thread_count; | x4->params.i_threads = avctx->thread_count; | ||||
if (avctx->thread_type) | if (avctx->thread_type) | ||||
x4->params.b_sliced_threads = avctx->thread_type == FF_THREAD_SLICE; | x4->params.b_sliced_threads = avctx->thread_type == FF_THREAD_SLICE; | ||||
x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; | |||||
x4->params.b_interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT; | |||||
x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP); | |||||
x4->params.b_open_gop = !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP); | |||||
x4->params.i_slice_count = avctx->slices; | x4->params.i_slice_count = avctx->slices; | ||||
@@ -687,7 +687,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED) | if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED) | ||||
x4->params.vui.i_transfer = avctx->color_trc; | x4->params.vui.i_transfer = avctx->color_trc; | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) | |||||
x4->params.b_repeat_headers = 0; | x4->params.b_repeat_headers = 0; | ||||
if(x4->x264opts){ | if(x4->x264opts){ | ||||
@@ -731,7 +731,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
if (!x4->enc) | if (!x4->enc) | ||||
return AVERROR_EXTERNAL; | return AVERROR_EXTERNAL; | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
x264_nal_t *nal; | x264_nal_t *nal; | ||||
uint8_t *p; | uint8_t *p; | ||||
int nnal, s, i; | int nnal, s, i; | ||||
@@ -119,7 +119,7 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) | |||||
ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame; | ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame; | ||||
ctx->params->sourceWidth = avctx->width; | ctx->params->sourceWidth = avctx->width; | ||||
ctx->params->sourceHeight = avctx->height; | ctx->params->sourceHeight = avctx->height; | ||||
ctx->params->bEnablePsnr = !!(avctx->flags & CODEC_FLAG_PSNR); | |||||
ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR); | |||||
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 && | if ((avctx->color_primaries <= AVCOL_PRI_BT2020 && | ||||
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) || | avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) || | ||||
@@ -179,7 +179,7 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) | |||||
ctx->params->rc.rateControlMode = X265_RC_ABR; | ctx->params->rc.rateControlMode = X265_RC_ABR; | ||||
} | } | ||||
if (!(avctx->flags & CODEC_FLAG_GLOBAL_HEADER)) | |||||
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) | |||||
ctx->params->bRepeatHeaders = 1; | ctx->params->bRepeatHeaders = 1; | ||||
if (ctx->x265_opts) { | if (ctx->x265_opts) { | ||||
@@ -214,7 +214,7 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
x265_nal *nal; | x265_nal *nal; | ||||
int nnal; | int nnal; | ||||
@@ -249,8 +249,8 @@ static av_cold int XAVS_init(AVCodecContext *avctx) | |||||
} | } | ||||
x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000; | ||||
x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000; | ||||
x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1; | |||||
if (avctx->flags & CODEC_FLAG_PASS2) { | |||||
x4->params.rc.b_stat_write = avctx->flags & AV_CODEC_FLAG_PASS1; | |||||
if (avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
x4->params.rc.b_stat_read = 1; | x4->params.rc.b_stat_read = 1; | ||||
} else { | } else { | ||||
if (x4->crf >= 0) { | if (x4->crf >= 0) { | ||||
@@ -321,7 +321,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x4->params.i_scenecut_threshold = avctx->scenechange_threshold; | x4->params.i_scenecut_threshold = avctx->scenechange_threshold; | ||||
// x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER; | |||||
// x4->params.b_deblocking_filter = avctx->flags & AV_CODEC_FLAG_LOOP_FILTER; | |||||
x4->params.rc.i_qp_min = avctx->qmin; | x4->params.rc.i_qp_min = avctx->qmin; | ||||
x4->params.rc.i_qp_max = avctx->qmax; | x4->params.rc.i_qp_max = avctx->qmax; | ||||
@@ -346,7 +346,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; | x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA; | ||||
/* AVS P2 only enables 8x8 transform */ | /* AVS P2 only enables 8x8 transform */ | ||||
x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & CODEC_FLAG2_8X8DCT; | |||||
x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & AV_CODEC_FLAG2_8X8DCT; | |||||
x4->params.analyse.i_trellis = avctx->trellis; | x4->params.analyse.i_trellis = avctx->trellis; | ||||
x4->params.analyse.i_noise_reduction = avctx->noise_reduction; | x4->params.analyse.i_noise_reduction = avctx->noise_reduction; | ||||
@@ -371,12 +371,12 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x4->params.rc.f_pb_factor = avctx->b_quant_factor; | x4->params.rc.f_pb_factor = avctx->b_quant_factor; | ||||
x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; | x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; | ||||
x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR; | |||||
x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR; | |||||
x4->params.i_log_level = XAVS_LOG_DEBUG; | x4->params.i_log_level = XAVS_LOG_DEBUG; | ||||
x4->params.i_threads = avctx->thread_count; | x4->params.i_threads = avctx->thread_count; | ||||
x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT; | |||||
x4->params.b_interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT; | |||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) | |||||
x4->params.b_repeat_headers = 0; | x4->params.b_repeat_headers = 0; | ||||
x4->enc = xavs_encoder_open(&x4->params); | x4->enc = xavs_encoder_open(&x4->params); | ||||
@@ -388,7 +388,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
/* TAG: Do we have GLOBAL HEADER in AVS */ | /* TAG: Do we have GLOBAL HEADER in AVS */ | ||||
/* We Have PPS and SPS in AVS */ | /* We Have PPS and SPS in AVS */ | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER && 0) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && 0) { | |||||
xavs_nal_t *nal; | xavs_nal_t *nal; | ||||
int nnal, s, i, size; | int nnal, s, i, size; | ||||
uint8_t *p; | uint8_t *p; | ||||
@@ -382,13 +382,13 @@ static av_cold int xvid_encode_init(AVCodecContext *avctx) | |||||
/* Bring in VOP flags from ffmpeg command-line */ | /* Bring in VOP flags from ffmpeg command-line */ | ||||
x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */ | x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */ | ||||
if (xvid_flags & CODEC_FLAG_4MV) | |||||
if (xvid_flags & AV_CODEC_FLAG_4MV) | |||||
x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */ | x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */ | ||||
if (avctx->trellis) | if (avctx->trellis) | ||||
x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */ | x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */ | ||||
if (xvid_flags & CODEC_FLAG_AC_PRED) | |||||
if (xvid_flags & AV_CODEC_FLAG_AC_PRED) | |||||
x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */ | x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */ | ||||
if (xvid_flags & CODEC_FLAG_GRAY) | |||||
if (xvid_flags & AV_CODEC_FLAG_GRAY) | |||||
x->vop_flags |= XVID_VOP_GREYSCALE; | x->vop_flags |= XVID_VOP_GREYSCALE; | ||||
/* Decide which ME quality setting to use */ | /* Decide which ME quality setting to use */ | ||||
@@ -462,7 +462,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x->vol_flags |= XVID_VOL_GMC; | x->vol_flags |= XVID_VOL_GMC; | ||||
x->me_flags |= XVID_ME_GME_REFINE; | x->me_flags |= XVID_ME_GME_REFINE; | ||||
} | } | ||||
if (xvid_flags & CODEC_FLAG_QPEL) { | |||||
if (xvid_flags & AV_CODEC_FLAG_QPEL) { | |||||
x->vol_flags |= XVID_VOL_QUARTERPEL; | x->vol_flags |= XVID_VOL_QUARTERPEL; | ||||
x->me_flags |= XVID_ME_QUARTERPELREFINE16; | x->me_flags |= XVID_ME_QUARTERPELREFINE16; | ||||
if (x->vop_flags & XVID_VOP_INTER4V) | if (x->vop_flags & XVID_VOP_INTER4V) | ||||
@@ -514,7 +514,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
x->old_twopassbuffer = NULL; | x->old_twopassbuffer = NULL; | ||||
x->twopassfile = NULL; | x->twopassfile = NULL; | ||||
if (xvid_flags & CODEC_FLAG_PASS1) { | |||||
if (xvid_flags & AV_CODEC_FLAG_PASS1) { | |||||
rc2pass1.version = XVID_VERSION; | rc2pass1.version = XVID_VERSION; | ||||
rc2pass1.context = x; | rc2pass1.context = x; | ||||
x->twopassbuffer = av_malloc(BUFFER_SIZE); | x->twopassbuffer = av_malloc(BUFFER_SIZE); | ||||
@@ -530,7 +530,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
plugins[xvid_enc_create.num_plugins].func = xvid_ff_2pass; | plugins[xvid_enc_create.num_plugins].func = xvid_ff_2pass; | ||||
plugins[xvid_enc_create.num_plugins].param = &rc2pass1; | plugins[xvid_enc_create.num_plugins].param = &rc2pass1; | ||||
xvid_enc_create.num_plugins++; | xvid_enc_create.num_plugins++; | ||||
} else if (xvid_flags & CODEC_FLAG_PASS2) { | |||||
} else if (xvid_flags & AV_CODEC_FLAG_PASS2) { | |||||
rc2pass2.version = XVID_VERSION; | rc2pass2.version = XVID_VERSION; | ||||
rc2pass2.bitrate = avctx->bit_rate; | rc2pass2.bitrate = avctx->bit_rate; | ||||
@@ -561,7 +561,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
plugins[xvid_enc_create.num_plugins].func = xvid_plugin_2pass2; | plugins[xvid_enc_create.num_plugins].func = xvid_plugin_2pass2; | ||||
plugins[xvid_enc_create.num_plugins].param = &rc2pass2; | plugins[xvid_enc_create.num_plugins].param = &rc2pass2; | ||||
xvid_enc_create.num_plugins++; | xvid_enc_create.num_plugins++; | ||||
} else if (!(xvid_flags & CODEC_FLAG_QSCALE)) { | |||||
} else if (!(xvid_flags & AV_CODEC_FLAG_QSCALE)) { | |||||
/* Single Pass Bitrate Control! */ | /* Single Pass Bitrate Control! */ | ||||
single.version = XVID_VERSION; | single.version = XVID_VERSION; | ||||
single.bitrate = avctx->bit_rate; | single.bitrate = avctx->bit_rate; | ||||
@@ -620,7 +620,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
xvid_enc_create.max_key_interval = 240; /* Xvid's best default */ | xvid_enc_create.max_key_interval = 240; /* Xvid's best default */ | ||||
/* Quants */ | /* Quants */ | ||||
if (xvid_flags & CODEC_FLAG_QSCALE) | |||||
if (xvid_flags & AV_CODEC_FLAG_QSCALE) | |||||
x->qscale = 1; | x->qscale = 1; | ||||
else | else | ||||
x->qscale = 0; | x->qscale = 0; | ||||
@@ -666,13 +666,13 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
/* Misc Settings */ | /* Misc Settings */ | ||||
xvid_enc_create.frame_drop_ratio = 0; | xvid_enc_create.frame_drop_ratio = 0; | ||||
xvid_enc_create.global = 0; | xvid_enc_create.global = 0; | ||||
if (xvid_flags & CODEC_FLAG_CLOSED_GOP) | |||||
if (xvid_flags & AV_CODEC_FLAG_CLOSED_GOP) | |||||
xvid_enc_create.global |= XVID_GLOBAL_CLOSED_GOP; | xvid_enc_create.global |= XVID_GLOBAL_CLOSED_GOP; | ||||
/* Determines which codec mode we are operating in */ | /* Determines which codec mode we are operating in */ | ||||
avctx->extradata = NULL; | avctx->extradata = NULL; | ||||
avctx->extradata_size = 0; | avctx->extradata_size = 0; | ||||
if (xvid_flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (xvid_flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
/* In this case, we are claiming to be MPEG4 */ | /* In this case, we are claiming to be MPEG4 */ | ||||
x->quicktime_format = 1; | x->quicktime_format = 1; | ||||
avctx->codec_id = AV_CODEC_ID_MPEG4; | avctx->codec_id = AV_CODEC_ID_MPEG4; | ||||
@@ -160,7 +160,7 @@ static inline void idct_put(MDECContext *a, AVFrame *frame, int mb_x, int mb_y) | |||||
a->idsp.idct_put(dest_y + 8 * linesize, linesize, block[2]); | a->idsp.idct_put(dest_y + 8 * linesize, linesize, block[2]); | ||||
a->idsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]); | a->idsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]); | ||||
if (!(a->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
a->idsp.idct_put(dest_cb, frame->linesize[1], block[4]); | a->idsp.idct_put(dest_cb, frame->linesize[1], block[4]); | ||||
a->idsp.idct_put(dest_cr, frame->linesize[2], block[5]); | a->idsp.idct_put(dest_cr, frame->linesize[2], block[5]); | ||||
} | } | ||||
@@ -135,7 +135,7 @@ static void jpeg_put_comments(AVCodecContext *avctx, PutBitContext *p) | |||||
} | } | ||||
/* comment */ | /* comment */ | ||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { | |||||
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) { | |||||
put_marker(p, COM); | put_marker(p, COM); | ||||
flush_put_bits(p); | flush_put_bits(p); | ||||
ptr = put_bits_ptr(p); | ptr = put_bits_ptr(p); | ||||
@@ -127,7 +127,7 @@ int main(int argc, char **argv) | |||||
printf("ffmpeg motion test\n"); | printf("ffmpeg motion test\n"); | ||||
ctx = avcodec_alloc_context3(NULL); | ctx = avcodec_alloc_context3(NULL); | ||||
ctx->flags |= CODEC_FLAG_BITEXACT; | |||||
ctx->flags |= AV_CODEC_FLAG_BITEXACT; | |||||
av_force_cpu_flags(0); | av_force_cpu_flags(0); | ||||
memset(&cctx, 0, sizeof(cctx)); | memset(&cctx, 0, sizeof(cctx)); | ||||
ff_me_cmp_init(&cctx, ctx); | ff_me_cmp_init(&cctx, ctx); | ||||
@@ -99,7 +99,7 @@ static inline void init_ref(MotionEstContext *c, uint8_t *src[3], uint8_t *ref[3 | |||||
} | } | ||||
static int get_flags(MotionEstContext *c, int direct, int chroma){ | static int get_flags(MotionEstContext *c, int direct, int chroma){ | ||||
return ((c->avctx->flags&CODEC_FLAG_QPEL) ? FLAG_QPEL : 0) | |||||
return ((c->avctx->flags&AV_CODEC_FLAG_QPEL) ? FLAG_QPEL : 0) | |||||
+ (direct ? FLAG_DIRECT : 0) | + (direct ? FLAG_DIRECT : 0) | ||||
+ (chroma ? FLAG_CHROMA : 0); | + (chroma ? FLAG_CHROMA : 0); | ||||
} | } | ||||
@@ -352,7 +352,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
c->mb_flags = get_flags(c, 0, c->avctx->mb_cmp &FF_CMP_CHROMA); | c->mb_flags = get_flags(c, 0, c->avctx->mb_cmp &FF_CMP_CHROMA); | ||||
/*FIXME s->no_rounding b_type*/ | /*FIXME s->no_rounding b_type*/ | ||||
if (s->avctx->flags & CODEC_FLAG_QPEL) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_QPEL) { | |||||
c->sub_motion_search= qpel_motion_search; | c->sub_motion_search= qpel_motion_search; | ||||
c->qpel_avg = s->qdsp.avg_qpel_pixels_tab; | c->qpel_avg = s->qdsp.avg_qpel_pixels_tab; | ||||
if (s->no_rounding) | if (s->no_rounding) | ||||
@@ -1003,7 +1003,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
mx <<=shift; | mx <<=shift; | ||||
my <<=shift; | my <<=shift; | ||||
} | } | ||||
if ((s->avctx->flags & CODEC_FLAG_4MV) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_4MV) | |||||
&& !c->skip && varc>50<<8 && vard>10<<8){ | && !c->skip && varc>50<<8 && vard>10<<8){ | ||||
if(h263_mv4_search(s, mx, my, shift) < INT_MAX) | if(h263_mv4_search(s, mx, my, shift) < INT_MAX) | ||||
mb_type|=CANDIDATE_MB_TYPE_INTER4V; | mb_type|=CANDIDATE_MB_TYPE_INTER4V; | ||||
@@ -1011,7 +1011,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
set_p_mv_tables(s, mx, my, 0); | set_p_mv_tables(s, mx, my, 0); | ||||
}else | }else | ||||
set_p_mv_tables(s, mx, my, 1); | set_p_mv_tables(s, mx, my, 1); | ||||
if ((s->avctx->flags & CODEC_FLAG_INTERLACED_ME) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) | |||||
&& !c->skip){ //FIXME varc/d checks | && !c->skip){ //FIXME varc/d checks | ||||
if(interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0) < INT_MAX) | if(interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0) < INT_MAX) | ||||
mb_type |= CANDIDATE_MB_TYPE_INTER_I; | mb_type |= CANDIDATE_MB_TYPE_INTER_I; | ||||
@@ -1024,7 +1024,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
if(c->avctx->me_sub_cmp != c->avctx->mb_cmp && !c->skip) | if(c->avctx->me_sub_cmp != c->avctx->mb_cmp && !c->skip) | ||||
dmin= get_mb_score(s, mx, my, 0, 0, 0, 16, 1); | dmin= get_mb_score(s, mx, my, 0, 0, 0, 16, 1); | ||||
if ((s->avctx->flags & CODEC_FLAG_4MV) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_4MV) | |||||
&& !c->skip && varc>50<<8 && vard>10<<8){ | && !c->skip && varc>50<<8 && vard>10<<8){ | ||||
int dmin4= h263_mv4_search(s, mx, my, shift); | int dmin4= h263_mv4_search(s, mx, my, shift); | ||||
if(dmin4 < dmin){ | if(dmin4 < dmin){ | ||||
@@ -1032,7 +1032,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
dmin=dmin4; | dmin=dmin4; | ||||
} | } | ||||
} | } | ||||
if ((s->avctx->flags & CODEC_FLAG_INTERLACED_ME) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) | |||||
&& !c->skip){ //FIXME varc/d checks | && !c->skip){ //FIXME varc/d checks | ||||
int dmin_i= interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0); | int dmin_i= interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0); | ||||
if(dmin_i < dmin){ | if(dmin_i < dmin){ | ||||
@@ -1550,7 +1550,7 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, | |||||
fbmin= bidir_refine(s, mb_x, mb_y) + penalty_factor; | fbmin= bidir_refine(s, mb_x, mb_y) + penalty_factor; | ||||
ff_dlog(s, "%d %d %d %d\n", dmin, fmin, bmin, fbmin); | ff_dlog(s, "%d %d %d %d\n", dmin, fmin, bmin, fbmin); | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) { | |||||
//FIXME mb type penalty | //FIXME mb type penalty | ||||
c->skip=0; | c->skip=0; | ||||
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; | c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; | ||||
@@ -1681,7 +1681,7 @@ void ff_fix_long_p_mvs(MpegEncContext * s) | |||||
if(c->avctx->me_range && range > c->avctx->me_range) range= c->avctx->me_range; | if(c->avctx->me_range && range > c->avctx->me_range) range= c->avctx->me_range; | ||||
if (s->avctx->flags & CODEC_FLAG_4MV) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_4MV) { | |||||
const int wrap= s->b8_stride; | const int wrap= s->b8_stride; | ||||
/* clip / convert to intra 8x8 type MVs */ | /* clip / convert to intra 8x8 type MVs */ | ||||
@@ -844,7 +844,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) | |||||
ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks | ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks | ||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { | if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { | ||||
if (s->avctx->flags2 & CODEC_FLAG2_FAST) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) { | |||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i); | mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i); | ||||
} else { | } else { | ||||
@@ -1064,7 +1064,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) | |||||
ff_xvmc_pack_pblocks(s, cbp); | ff_xvmc_pack_pblocks(s, cbp); | ||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { | if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { | ||||
if (s->avctx->flags2 & CODEC_FLAG2_FAST) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) { | |||||
for (i = 0; i < 6; i++) { | for (i = 0; i < 6; i++) { | ||||
if (cbp & 32) | if (cbp & 32) | ||||
mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i); | mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i); | ||||
@@ -1086,7 +1086,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) | |||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
if (s->avctx->flags2 & CODEC_FLAG2_FAST) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) { | |||||
for (i = 0; i < 6; i++) { | for (i = 0; i < 6; i++) { | ||||
if (cbp & 32) | if (cbp & 32) | ||||
mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i); | mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i); | ||||
@@ -1235,7 +1235,7 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) | |||||
MpegEncContext *s = &s1->mpeg_enc_ctx; | MpegEncContext *s = &s1->mpeg_enc_ctx; | ||||
const enum AVPixelFormat *pix_fmts; | const enum AVPixelFormat *pix_fmts; | ||||
if (CONFIG_GRAY && (avctx->flags & CODEC_FLAG_GRAY)) | |||||
if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) | |||||
return AV_PIX_FMT_GRAY8; | return AV_PIX_FMT_GRAY8; | ||||
if (s->chroma_format < 2) | if (s->chroma_format < 2) | ||||
@@ -1473,7 +1473,7 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *s1) | |||||
s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10; | s->avctx->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10; | ||||
s->low_delay = get_bits1(&s->gb); | s->low_delay = get_bits1(&s->gb); | ||||
if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) | |||||
s->low_delay = 1; | s->low_delay = 1; | ||||
s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1; | s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1; | ||||
@@ -2196,7 +2196,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, | |||||
s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO; | s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO; | ||||
s->out_format = FMT_MPEG1; | s->out_format = FMT_MPEG1; | ||||
s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER | s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER | ||||
if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) | |||||
s->low_delay = 1; | s->low_delay = 1; | ||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO) | if (s->avctx->debug & FF_DEBUG_PICT_INFO) | ||||
@@ -2649,7 +2649,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, | |||||
} | } | ||||
} | } | ||||
} | } | ||||
if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & CODEC_FLAG2_SHOW_ALL)) | |||||
if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) | |||||
s->sync = 1; | s->sync = 1; | ||||
if (!s2->next_picture_ptr) { | if (!s2->next_picture_ptr) { | ||||
/* Skip P-frames if we do not have a reference frame or | /* Skip P-frames if we do not have a reference frame or | ||||
@@ -2770,7 +2770,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, void *data, | |||||
return buf_size; | return buf_size; | ||||
} | } | ||||
if (s2->avctx->flags & CODEC_FLAG_TRUNCATED) { | |||||
if (s2->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { | |||||
int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf, | int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf, | ||||
buf_size, NULL); | buf_size, NULL); | ||||
@@ -205,7 +205,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
} | } | ||||
} | } | ||||
s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE); | |||||
s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & AV_CODEC_FLAG2_DROP_FRAME_TIMECODE); | |||||
if (s->drop_frame_timecode) | if (s->drop_frame_timecode) | ||||
s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME; | s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME; | ||||
if (s->drop_frame_timecode && s->frame_rate_index != 4) { | if (s->drop_frame_timecode && s->frame_rate_index != 4) { | ||||
@@ -384,7 +384,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) | |||||
put_bits(&s->pb, 1, 1); | put_bits(&s->pb, 1, 1); | ||||
put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); | put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); | ||||
put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); | put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); | ||||
put_bits(&s->pb, 1, !!(s->avctx->flags & CODEC_FLAG_CLOSED_GOP) || s->intra_only || !s->gop_picture_number); | |||||
put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) || s->intra_only || !s->gop_picture_number); | |||||
put_bits(&s->pb, 1, 0); // broken link | put_bits(&s->pb, 1, 0); // broken link | ||||
} | } | ||||
} | } | ||||
@@ -2228,7 +2228,7 @@ static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) | |||||
s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ | s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ | ||||
if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && | if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && | ||||
ctx->vol_control_parameters == 0 && !(s->avctx->flags & CODEC_FLAG_LOW_DELAY)) { | |||||
ctx->vol_control_parameters == 0 && !(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) { | |||||
av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n"); | av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n"); | ||||
s->low_delay = 0; | s->low_delay = 0; | ||||
} | } | ||||
@@ -2607,7 +2607,7 @@ int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) | |||||
} | } | ||||
end: | end: | ||||
if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) | |||||
s->low_delay = 1; | s->low_delay = 1; | ||||
s->avctx->has_b_frames = !s->low_delay; | s->avctx->has_b_frames = !s->low_delay; | ||||
@@ -430,7 +430,7 @@ static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64], | |||||
int i; | int i; | ||||
if (scan_table) { | if (scan_table) { | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) { | |||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
skip_put_bits(&s->pb, | skip_put_bits(&s->pb, | ||||
mpeg4_get_block_length(s, block[i], i, | mpeg4_get_block_length(s, block[i], i, | ||||
@@ -442,7 +442,7 @@ static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64], | |||||
intra_dc[i], scan_table[i], dc_pb, ac_pb); | intra_dc[i], scan_table[i], dc_pb, ac_pb); | ||||
} | } | ||||
} else { | } else { | ||||
if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) { | |||||
if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) { | |||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
skip_put_bits(&s->pb, | skip_put_bits(&s->pb, | ||||
mpeg4_get_block_length(s, block[i], i, 0, | mpeg4_get_block_length(s, block[i], i, 0, | ||||
@@ -507,7 +507,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], | |||||
PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb; | PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb; | ||||
PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb; | PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb; | ||||
PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb; | PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb; | ||||
const int interleaved_stats = (s->avctx->flags & CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0; | |||||
const int interleaved_stats = (s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0; | |||||
if (!s->mb_intra) { | if (!s->mb_intra) { | ||||
int i, cbp; | int i, cbp; | ||||
@@ -832,7 +832,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], | |||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1); | dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1); | ||||
if (s->avctx->flags & CODEC_FLAG_AC_PRED) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_AC_PRED) { | |||||
s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index); | s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index); | ||||
} else { | } else { | ||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
@@ -932,7 +932,7 @@ static void mpeg4_encode_gop_header(MpegEncContext *s) | |||||
put_bits(&s->pb, 1, 1); | put_bits(&s->pb, 1, 1); | ||||
put_bits(&s->pb, 6, seconds); | put_bits(&s->pb, 6, seconds); | ||||
put_bits(&s->pb, 1, !!(s->avctx->flags & CODEC_FLAG_CLOSED_GOP)); | |||||
put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); | |||||
put_bits(&s->pb, 1, 0); // broken link == NO | put_bits(&s->pb, 1, 0); // broken link == NO | ||||
ff_mpeg4_stuffing(&s->pb); | ff_mpeg4_stuffing(&s->pb); | ||||
@@ -1078,7 +1078,7 @@ static void mpeg4_encode_vol_header(MpegEncContext *s, | |||||
ff_mpeg4_stuffing(&s->pb); | ff_mpeg4_stuffing(&s->pb); | ||||
/* user data */ | /* user data */ | ||||
if (!(s->avctx->flags & CODEC_FLAG_BITEXACT)) { | |||||
if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT)) { | |||||
put_bits(&s->pb, 16, 0); | put_bits(&s->pb, 16, 0); | ||||
put_bits(&s->pb, 16, 0x1B2); /* user_data */ | put_bits(&s->pb, 16, 0x1B2); /* user_data */ | ||||
avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0); | avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0); | ||||
@@ -1092,7 +1092,7 @@ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number) | |||||
int time_div, time_mod; | int time_div, time_mod; | ||||
if (s->pict_type == AV_PICTURE_TYPE_I) { | if (s->pict_type == AV_PICTURE_TYPE_I) { | ||||
if (!(s->avctx->flags & CODEC_FLAG_GLOBAL_HEADER)) { | |||||
if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) { | |||||
if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy | if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy | ||||
mpeg4_encode_visual_object_header(s); | mpeg4_encode_visual_object_header(s); | ||||
if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy | if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy | ||||
@@ -1325,7 +1325,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; | s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; | ||||
s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; | s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; | ||||
if (s->avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
s->avctx->extradata = av_malloc(1024); | s->avctx->extradata = av_malloc(1024); | ||||
init_put_bits(&s->pb, s->avctx->extradata, 1024); | init_put_bits(&s->pb, s->avctx->extradata, 1024); | ||||
@@ -429,7 +429,7 @@ static av_cold int decode_init(AVCodecContext * avctx) | |||||
s->avctx = avctx; | s->avctx = avctx; | ||||
#if USE_FLOATS | #if USE_FLOATS | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) | if (!s->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
#endif | #endif | ||||
@@ -199,7 +199,7 @@ static int alloc_picture_tables(AVCodecContext *avctx, Picture *pic, int encodin | |||||
} | } | ||||
if (out_format == FMT_H263 || encoding || avctx->debug_mv || | if (out_format == FMT_H263 || encoding || avctx->debug_mv || | ||||
(avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) { | |||||
(avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS)) { | |||||
int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t); | int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t); | ||||
int ref_index_size = 4 * mb_array_size; | int ref_index_size = 4 * mb_array_size; | ||||
@@ -297,7 +297,7 @@ static av_cold int dct_init(MpegEncContext *s) | |||||
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; | s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; | ||||
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; | s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; | ||||
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; | s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; | ||||
if (s->avctx->flags & CODEC_FLAG_BITEXACT) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT) | |||||
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; | s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; | ||||
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; | s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; | ||||
@@ -731,7 +731,7 @@ static int init_context_frame(MpegEncContext *s) | |||||
} | } | ||||
if (s->codec_id == AV_CODEC_ID_MPEG4 || | if (s->codec_id == AV_CODEC_ID_MPEG4 || | ||||
(s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) { | |||||
(s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) { | |||||
/* interlaced direct mode decoding tables */ | /* interlaced direct mode decoding tables */ | ||||
for (i = 0; i < 2; i++) { | for (i = 0; i < 2; i++) { | ||||
int j, k; | int j, k; | ||||
@@ -1254,7 +1254,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) | |||||
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; | s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; | ||||
s->current_picture_ptr->f->pict_type = s->pict_type; | s->current_picture_ptr->f->pict_type = s->pict_type; | ||||
// if (s->avctx->flags && CODEC_FLAG_QSCALE) | |||||
// if (s->avctx->flags && AV_CODEC_FLAG_QSCALE) | |||||
// s->current_picture_ptr->quality = s->new_picture_ptr->quality; | // s->current_picture_ptr->quality = s->new_picture_ptr->quality; | ||||
s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; | s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; | ||||
@@ -1568,7 +1568,7 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_ | |||||
int *low_delay, | int *low_delay, | ||||
int mb_width, int mb_height, int mb_stride, int quarter_sample) | int mb_width, int mb_height, int mb_stride, int quarter_sample) | ||||
{ | { | ||||
if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) { | |||||
if ((avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) { | |||||
const int shift = 1 + quarter_sample; | const int shift = 1 + quarter_sample; | ||||
const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1; | const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1; | ||||
const int mv_stride = (mb_width << mv_sample_log2) + | const int mv_stride = (mb_width << mv_sample_log2) + | ||||
@@ -2096,7 +2096,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, | |||||
src_x, src_y << field_based, h_edge_pos, | src_x, src_y << field_based, h_edge_pos, | ||||
v_edge_pos); | v_edge_pos); | ||||
ptr_y = s->sc.edge_emu_buffer; | ptr_y = s->sc.edge_emu_buffer; | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | ||||
uint8_t *vbuf =ubuf + 9 * s->uvlinesize; | uint8_t *vbuf =ubuf + 9 * s->uvlinesize; | ||||
s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | ||||
@@ -2131,7 +2131,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, | |||||
sy = (sy << 2) >> lowres; | sy = (sy << 2) >> lowres; | ||||
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); | pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h; | int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h; | ||||
uvsx = (uvsx << 2) >> lowres; | uvsx = (uvsx << 2) >> lowres; | ||||
uvsy = (uvsy << 2) >> lowres; | uvsy = (uvsy << 2) >> lowres; | ||||
@@ -2251,7 +2251,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s, | |||||
my += s->mv[dir][i][1]; | my += s->mv[dir][i][1]; | ||||
} | } | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) | |||||
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, | chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, | ||||
pix_op, mx, my); | pix_op, mx, my); | ||||
break; | break; | ||||
@@ -2491,7 +2491,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) | else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) | ||||
s->mbintra_table[mb_xy]=1; | s->mbintra_table[mb_xy]=1; | ||||
if ((s->avctx->flags & CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor || | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor || | |||||
!(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) && | !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) && | ||||
s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc | s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc | ||||
uint8_t *dest_y, *dest_cb, *dest_cr; | uint8_t *dest_y, *dest_cb, *dest_cr; | ||||
@@ -2594,7 +2594,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); | add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); | ||||
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); | add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if (s->chroma_y_shift){ | if (s->chroma_y_shift){ | ||||
add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); | add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); | ||||
add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); | add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); | ||||
@@ -2613,7 +2613,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); | add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); | ||||
add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); | add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if(s->chroma_y_shift){//Chroma420 | if(s->chroma_y_shift){//Chroma420 | ||||
add_dct(s, block[4], 4, dest_cb, uvlinesize); | add_dct(s, block[4], 4, dest_cb, uvlinesize); | ||||
add_dct(s, block[5], 5, dest_cr, uvlinesize); | add_dct(s, block[5], 5, dest_cr, uvlinesize); | ||||
@@ -2646,7 +2646,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); | put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); | ||||
put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); | put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if(s->chroma_y_shift){ | if(s->chroma_y_shift){ | ||||
put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); | put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); | ||||
put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); | put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); | ||||
@@ -2665,7 +2665,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]); | s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]); | ||||
s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); | s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
if(s->chroma_y_shift){ | if(s->chroma_y_shift){ | ||||
s->idsp.idct_put(dest_cb, uvlinesize, block[4]); | s->idsp.idct_put(dest_cb, uvlinesize, block[4]); | ||||
s->idsp.idct_put(dest_cr, uvlinesize, block[5]); | s->idsp.idct_put(dest_cr, uvlinesize, block[5]); | ||||
@@ -2691,7 +2691,7 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], | |||||
skip_idct: | skip_idct: | ||||
if(!readable){ | if(!readable){ | ||||
s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); | s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); | s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); | ||||
s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); | s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); | ||||
} | } | ||||
@@ -339,7 +339,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx) | |||||
s->max_b_frames = avctx->max_b_frames; | s->max_b_frames = avctx->max_b_frames; | ||||
s->codec_id = avctx->codec->id; | s->codec_id = avctx->codec->id; | ||||
s->strict_std_compliance = avctx->strict_std_compliance; | s->strict_std_compliance = avctx->strict_std_compliance; | ||||
s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0; | |||||
s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0; | |||||
s->mpeg_quant = avctx->mpeg_quant; | s->mpeg_quant = avctx->mpeg_quant; | ||||
s->rtp_mode = !!avctx->rtp_payload_size; | s->rtp_mode = !!avctx->rtp_payload_size; | ||||
s->intra_dc_precision = avctx->intra_dc_precision; | s->intra_dc_precision = avctx->intra_dc_precision; | ||||
@@ -377,7 +377,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
#endif | #endif | ||||
/* Fixed QSCALE */ | /* Fixed QSCALE */ | ||||
s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); | |||||
s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE); | |||||
#if FF_API_MPV_OPT | #if FF_API_MPV_OPT | ||||
FF_DISABLE_DEPRECATION_WARNINGS | FF_DISABLE_DEPRECATION_WARNINGS | ||||
@@ -395,7 +395,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
(s->mpv_flags & FF_MPV_FLAG_QP_RD)) && | (s->mpv_flags & FF_MPV_FLAG_QP_RD)) && | ||||
!s->fixed_qscale; | !s->fixed_qscale; | ||||
s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER); | |||||
s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER); | |||||
if (avctx->rc_max_rate && !avctx->rc_buffer_size) { | if (avctx->rc_max_rate && !avctx->rc_buffer_size) { | ||||
switch(avctx->codec_id) { | switch(avctx->codec_id) { | ||||
@@ -476,7 +476,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
"specified vbv buffer is too large for the given bitrate!\n"); | "specified vbv buffer is too large for the given bitrate!\n"); | ||||
} | } | ||||
if ((s->avctx->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 && | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 && | |||||
s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P && | s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P && | ||||
s->codec_id != AV_CODEC_ID_FLV1) { | s->codec_id != AV_CODEC_ID_FLV1) { | ||||
av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); | av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); | ||||
@@ -569,7 +569,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
return -1; | return -1; | ||||
} | } | ||||
if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) && | |||||
if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) && | |||||
s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { | s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { | ||||
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); | av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); | ||||
return -1; | return -1; | ||||
@@ -595,14 +595,14 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
} | } | ||||
if (s->avctx->scenechange_threshold < 1000000000 && | if (s->avctx->scenechange_threshold < 1000000000 && | ||||
(s->avctx->flags & CODEC_FLAG_CLOSED_GOP)) { | |||||
(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) { | |||||
av_log(avctx, AV_LOG_ERROR, | av_log(avctx, AV_LOG_ERROR, | ||||
"closed gop with scene change detection are not supported yet, " | "closed gop with scene change detection are not supported yet, " | ||||
"set threshold to 1000000000\n"); | "set threshold to 1000000000\n"); | ||||
return -1; | return -1; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_LOW_DELAY) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) { | |||||
if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { | if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { | ||||
av_log(avctx, AV_LOG_ERROR, | av_log(avctx, AV_LOG_ERROR, | ||||
"low delay forcing is only available for mpeg2\n"); | "low delay forcing is only available for mpeg2\n"); | ||||
@@ -652,7 +652,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
return -1; | return -1; | ||||
} | } | ||||
if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) { | |||||
if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) { | |||||
av_log(avctx, AV_LOG_INFO, | av_log(avctx, AV_LOG_INFO, | ||||
"notice: b_frame_strategy only affects the first pass\n"); | "notice: b_frame_strategy only affects the first pass\n"); | ||||
avctx->b_frame_strategy = 0; | avctx->b_frame_strategy = 0; | ||||
@@ -708,12 +708,12 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
switch (avctx->codec->id) { | switch (avctx->codec->id) { | ||||
case AV_CODEC_ID_MPEG1VIDEO: | case AV_CODEC_ID_MPEG1VIDEO: | ||||
s->out_format = FMT_MPEG1; | s->out_format = FMT_MPEG1; | ||||
s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY); | |||||
s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY); | |||||
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); | avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); | ||||
break; | break; | ||||
case AV_CODEC_ID_MPEG2VIDEO: | case AV_CODEC_ID_MPEG2VIDEO: | ||||
s->out_format = FMT_MPEG1; | s->out_format = FMT_MPEG1; | ||||
s->low_delay = !!(s->avctx->flags & CODEC_FLAG_LOW_DELAY); | |||||
s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY); | |||||
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); | avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); | ||||
s->rtp_mode = 1; | s->rtp_mode = 1; | ||||
break; | break; | ||||
@@ -762,9 +762,9 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
s->out_format = FMT_H263; | s->out_format = FMT_H263; | ||||
s->h263_plus = 1; | s->h263_plus = 1; | ||||
/* Fx */ | /* Fx */ | ||||
s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0; | |||||
s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0; | |||||
s->modified_quant = s->h263_aic; | s->modified_quant = s->h263_aic; | ||||
s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0; | |||||
s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0; | |||||
s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus; | s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus; | ||||
/* /Fx */ | /* /Fx */ | ||||
@@ -846,8 +846,8 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
s->encoding = 1; | s->encoding = 1; | ||||
s->progressive_frame = | s->progressive_frame = | ||||
s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT | | |||||
CODEC_FLAG_INTERLACED_ME) || | |||||
s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | | |||||
AV_CODEC_FLAG_INTERLACED_ME) || | |||||
s->alternate_scan); | s->alternate_scan); | ||||
/* init */ | /* init */ | ||||
@@ -1314,8 +1314,8 @@ static int estimate_best_b_count(MpegEncContext *s) | |||||
c->width = s->width >> scale; | c->width = s->width >> scale; | ||||
c->height = s->height >> scale; | c->height = s->height >> scale; | ||||
c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR; | |||||
c->flags |= s->avctx->flags & CODEC_FLAG_QPEL; | |||||
c->flags = AV_CODEC_FLAG_QSCALE | AV_CODEC_FLAG_PSNR; | |||||
c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL; | |||||
c->mb_decision = s->avctx->mb_decision; | c->mb_decision = s->avctx->mb_decision; | ||||
c->me_cmp = s->avctx->me_cmp; | c->me_cmp = s->avctx->me_cmp; | ||||
c->mb_cmp = s->avctx->mb_cmp; | c->mb_cmp = s->avctx->mb_cmp; | ||||
@@ -1439,7 +1439,7 @@ static int select_input_picture(MpegEncContext *s) | |||||
} else { | } else { | ||||
int b_frames; | int b_frames; | ||||
if (s->avctx->flags & CODEC_FLAG_PASS2) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
for (i = 0; i < s->max_b_frames + 1; i++) { | for (i = 0; i < s->max_b_frames + 1; i++) { | ||||
int pict_num = s->input_picture[0]->f->display_picture_number + i; | int pict_num = s->input_picture[0]->f->display_picture_number + i; | ||||
@@ -1508,13 +1508,13 @@ static int select_input_picture(MpegEncContext *s) | |||||
s->gop_size > s->picture_in_gop_number) { | s->gop_size > s->picture_in_gop_number) { | ||||
b_frames = s->gop_size - s->picture_in_gop_number - 1; | b_frames = s->gop_size - s->picture_in_gop_number - 1; | ||||
} else { | } else { | ||||
if (s->avctx->flags & CODEC_FLAG_CLOSED_GOP) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) | |||||
b_frames = 0; | b_frames = 0; | ||||
s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I; | s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I; | ||||
} | } | ||||
} | } | ||||
if ((s->avctx->flags & CODEC_FLAG_CLOSED_GOP) && b_frames && | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames && | |||||
s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I) | s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I) | ||||
b_frames--; | b_frames--; | ||||
@@ -1833,7 +1833,7 @@ vbv_retry: | |||||
av_assert0(s->avctx->rc_max_rate); | av_assert0(s->avctx->rc_max_rate); | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PASS1) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS1) | |||||
ff_write_pass1_stats(s); | ff_write_pass1_stats(s); | ||||
for (i = 0; i < 4; i++) { | for (i = 0; i < 4; i++) { | ||||
@@ -1843,7 +1843,7 @@ vbv_retry: | |||||
avctx->error[i] += s->current_picture_ptr->f->error[i]; | avctx->error[i] += s->current_picture_ptr->f->error[i]; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PASS1) | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS1) | |||||
assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + | assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + | ||||
avctx->i_tex_bits + avctx->p_tex_bits == | avctx->i_tex_bits + avctx->p_tex_bits == | ||||
put_bits_count(&s->pb)); | put_bits_count(&s->pb)); | ||||
@@ -2145,7 +2145,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | |||||
} | } | ||||
if (s->mb_intra) { | if (s->mb_intra) { | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
int progressive_score, interlaced_score; | int progressive_score, interlaced_score; | ||||
s->interlaced_dct = 0; | s->interlaced_dct = 0; | ||||
@@ -2176,7 +2176,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | |||||
s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y); | s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y); | ||||
s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y); | s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y); | ||||
if (s->avctx->flags & CODEC_FLAG_GRAY) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_GRAY) { | |||||
skip_dct[4] = 1; | skip_dct[4] = 1; | ||||
skip_dct[5] = 1; | skip_dct[5] = 1; | ||||
} else { | } else { | ||||
@@ -2224,7 +2224,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | |||||
op_pix, op_qpix); | op_pix, op_qpix); | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
int progressive_score, interlaced_score; | int progressive_score, interlaced_score; | ||||
s->interlaced_dct = 0; | s->interlaced_dct = 0; | ||||
@@ -2262,7 +2262,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | |||||
s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, | s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, | ||||
dest_y + dct_offset + 8, wrap_y); | dest_y + dct_offset + 8, wrap_y); | ||||
if (s->avctx->flags & CODEC_FLAG_GRAY) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_GRAY) { | |||||
skip_dct[4] = 1; | skip_dct[4] = 1; | ||||
skip_dct[5] = 1; | skip_dct[5] = 1; | ||||
} else { | } else { | ||||
@@ -2372,7 +2372,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | |||||
} | } | ||||
} | } | ||||
if ((s->avctx->flags & CODEC_FLAG_GRAY) && s->mb_intra) { | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) { | |||||
s->block_last_index[4] = | s->block_last_index[4] = | ||||
s->block_last_index[5] = 0; | s->block_last_index[5] = 0; | ||||
s->block[4][0] = | s->block[4][0] = | ||||
@@ -2696,7 +2696,7 @@ static void write_slice_end(MpegEncContext *s){ | |||||
avpriv_align_put_bits(&s->pb); | avpriv_align_put_bits(&s->pb); | ||||
flush_put_bits(&s->pb); | flush_put_bits(&s->pb); | ||||
if ((s->avctx->flags & CODEC_FLAG_PASS1) && !s->partitioned_frame) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame) | |||||
s->misc_bits+= get_bits_diff(s); | s->misc_bits+= get_bits_diff(s); | ||||
} | } | ||||
@@ -2949,7 +2949,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ | |||||
break; | break; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PASS1) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS1) { | |||||
int bits= put_bits_count(&s->pb); | int bits= put_bits_count(&s->pb); | ||||
s->misc_bits+= bits - s->last_bits; | s->misc_bits+= bits - s->last_bits; | ||||
s->last_bits= bits; | s->last_bits= bits; | ||||
@@ -3369,7 +3369,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ | |||||
s->p_mv_table[xy][1]=0; | s->p_mv_table[xy][1]=0; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PSNR) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PSNR) { | |||||
int w= 16; | int w= 16; | ||||
int h= 16; | int h= 16; | ||||
@@ -3534,11 +3534,11 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
s->no_rounding ^= 1; | s->no_rounding ^= 1; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PASS2) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
if (estimate_qp(s,1) < 0) | if (estimate_qp(s,1) < 0) | ||||
return -1; | return -1; | ||||
ff_get_2pass_fcode(s); | ff_get_2pass_fcode(s); | ||||
} else if (!(s->avctx->flags & CODEC_FLAG_QSCALE)) { | |||||
} else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) { | |||||
if(s->pict_type==AV_PICTURE_TYPE_B) | if(s->pict_type==AV_PICTURE_TYPE_B) | ||||
s->lambda= s->last_lambda_for[s->pict_type]; | s->lambda= s->last_lambda_for[s->pict_type]; | ||||
else | else | ||||
@@ -3605,7 +3605,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) { | if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) { | ||||
s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); | s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) { | |||||
int a,b; | int a,b; | ||||
a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select | a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select | ||||
b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); | b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); | ||||
@@ -3614,7 +3614,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
ff_fix_long_p_mvs(s); | ff_fix_long_p_mvs(s); | ||||
ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); | ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) { | |||||
int j; | int j; | ||||
for(i=0; i<2; i++){ | for(i=0; i<2; i++){ | ||||
for(j=0; j<2; j++) | for(j=0; j<2; j++) | ||||
@@ -3639,7 +3639,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); | ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); | ||||
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); | ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); | ||||
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); | ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); | ||||
if (s->avctx->flags & CODEC_FLAG_INTERLACED_ME) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) { | |||||
int dir, j; | int dir, j; | ||||
for(dir=0; dir<2; dir++){ | for(dir=0; dir<2; dir++){ | ||||
for(i=0; i<2; i++){ | for(i=0; i<2; i++){ | ||||
@@ -3660,7 +3660,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
if (s->qscale < 3 && s->max_qcoeff <= 128 && | if (s->qscale < 3 && s->max_qcoeff <= 128 && | ||||
s->pict_type == AV_PICTURE_TYPE_I && | s->pict_type == AV_PICTURE_TYPE_I && | ||||
!(s->avctx->flags & CODEC_FLAG_QSCALE)) | |||||
!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) | |||||
s->qscale= 3; //reduce clipping problems | s->qscale= 3; //reduce clipping problems | ||||
if (s->out_format == FMT_MJPEG) { | if (s->out_format == FMT_MJPEG) { | ||||
@@ -88,7 +88,7 @@ static void gmc1_motion(MpegEncContext *s, | |||||
} | } | ||||
} | } | ||||
if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) | |||||
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) | |||||
return; | return; | ||||
motion_x = s->sprite_offset[1][0]; | motion_x = s->sprite_offset[1][0]; | ||||
@@ -165,7 +165,7 @@ static void gmc_motion(MpegEncContext *s, | |||||
a + 1, (1 << (2 * a + 1)) - s->no_rounding, | a + 1, (1 << (2 * a + 1)) - s->no_rounding, | ||||
s->h_edge_pos, s->v_edge_pos); | s->h_edge_pos, s->v_edge_pos); | ||||
if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) | |||||
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) | |||||
return; | return; | ||||
ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 + | ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 + | ||||
@@ -324,7 +324,7 @@ void mpeg_motion_internal(MpegEncContext *s, | |||||
src_x, src_y, | src_x, src_y, | ||||
s->h_edge_pos, s->v_edge_pos); | s->h_edge_pos, s->v_edge_pos); | ||||
ptr_y = s->sc.edge_emu_buffer; | ptr_y = s->sc.edge_emu_buffer; | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | ||||
uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | ||||
uvsrc_y = (unsigned)uvsrc_y << field_based; | uvsrc_y = (unsigned)uvsrc_y << field_based; | ||||
@@ -359,7 +359,7 @@ void mpeg_motion_internal(MpegEncContext *s, | |||||
pix_op[0][dxy](dest_y, ptr_y, linesize, h); | pix_op[0][dxy](dest_y, ptr_y, linesize, h); | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
pix_op[s->chroma_x_shift][uvdxy] | pix_op[s->chroma_x_shift][uvdxy] | ||||
(dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); | (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); | ||||
pix_op[s->chroma_x_shift][uvdxy] | pix_op[s->chroma_x_shift][uvdxy] | ||||
@@ -547,7 +547,7 @@ static inline void qpel_motion(MpegEncContext *s, | |||||
src_x, src_y << field_based, | src_x, src_y << field_based, | ||||
s->h_edge_pos, s->v_edge_pos); | s->h_edge_pos, s->v_edge_pos); | ||||
ptr_y = s->sc.edge_emu_buffer; | ptr_y = s->sc.edge_emu_buffer; | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; | ||||
uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | uint8_t *vbuf = ubuf + 9 * s->uvlinesize; | ||||
s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, | ||||
@@ -584,7 +584,7 @@ static inline void qpel_motion(MpegEncContext *s, | |||||
qpix_op[1][dxy](dest_y, ptr_y, linesize); | qpix_op[1][dxy](dest_y, ptr_y, linesize); | ||||
qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize); | qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize); | ||||
} | } | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); | pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); | ||||
pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); | pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); | ||||
} | } | ||||
@@ -740,7 +740,7 @@ static inline void apply_obmc(MpegEncContext *s, | |||||
mx += mv[0][0]; | mx += mv[0][0]; | ||||
my += mv[0][1]; | my += mv[0][1]; | ||||
} | } | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) | |||||
chroma_4mv_motion(s, dest_cb, dest_cr, | chroma_4mv_motion(s, dest_cb, dest_cr, | ||||
ref_picture, pix_op[1], | ref_picture, pix_op[1], | ||||
mx, my); | mx, my); | ||||
@@ -813,7 +813,7 @@ static inline void apply_8x8(MpegEncContext *s, | |||||
} | } | ||||
} | } | ||||
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) | |||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) | |||||
chroma_4mv_motion(s, dest_cb, dest_cr, | chroma_4mv_motion(s, dest_cb, dest_cr, | ||||
ref_picture, pix_op[1], mx, my); | ref_picture, pix_op[1], mx, my); | ||||
} | } | ||||
@@ -298,7 +298,7 @@ static void ff_xvmc_decode_mb(struct MpegEncContext *s) | |||||
cbp++; | cbp++; | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_GRAY) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_GRAY) { | |||||
if (s->mb_intra) { // intra frames are always full chroma blocks | if (s->mb_intra) { // intra frames are always full chroma blocks | ||||
for (i = 4; i < blocks_per_mb; i++) { | for (i = 4; i < blocks_per_mb; i++) { | ||||
memset(s->pblocks[i], 0, sizeof(*s->pblocks[i])); // so we need to clear them | memset(s->pblocks[i], 0, sizeof(*s->pblocks[i])); // so we need to clear them | ||||
@@ -121,7 +121,7 @@ static av_cold int decode_init(AVCodecContext * avctx) { | |||||
av_lfg_init(&s->random_state, 0); | av_lfg_init(&s->random_state, 0); | ||||
ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); | ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) | if (!s->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -171,7 +171,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
s->avctx = avctx; | s->avctx = avctx; | ||||
if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0) | if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0) | ||||
goto error; | goto error; | ||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->fdsp) { | if (!s->fdsp) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto error; | goto error; | ||||
@@ -806,7 +806,7 @@ static av_cold int nvenc_encode_init(AVCodecContext *avctx) | |||||
if (avctx->rc_buffer_size > 0) | if (avctx->rc_buffer_size > 0) | ||||
ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size; | ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size; | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD; | ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD; | ||||
} else { | } else { | ||||
ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME; | ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME; | ||||
@@ -823,8 +823,8 @@ static av_cold int nvenc_encode_init(AVCodecContext *avctx) | |||||
ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag = avctx->color_range == AVCOL_RANGE_JPEG; | ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag = avctx->color_range == AVCOL_RANGE_JPEG; | ||||
ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; | |||||
ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; | |||||
ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; | |||||
ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; | |||||
if (!ctx->profile) { | if (!ctx->profile) { | ||||
switch (avctx->profile) { | switch (avctx->profile) { | ||||
@@ -881,8 +881,8 @@ static av_cold int nvenc_encode_init(AVCodecContext *avctx) | |||||
break; | break; | ||||
case AV_CODEC_ID_H265: | case AV_CODEC_ID_H265: | ||||
ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; | |||||
ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; | |||||
ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; | |||||
ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; | |||||
/* No other profile is supported in the current SDK version 5 */ | /* No other profile is supported in the current SDK version 5 */ | ||||
ctx->encode_config.profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID; | ctx->encode_config.profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID; | ||||
@@ -997,7 +997,7 @@ static av_cold int nvenc_encode_init(AVCodecContext *avctx) | |||||
ctx->output_surfaces[surfaceCount].busy = 0; | ctx->output_surfaces[surfaceCount].busy = 0; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { | |||||
uint32_t outSize = 0; | uint32_t outSize = 0; | ||||
char tmpHeader[256]; | char tmpHeader[256]; | ||||
NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 }; | NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 }; | ||||
@@ -1304,7 +1304,7 @@ static int nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
pic_params.outputBitstream = ctx->output_surfaces[i].output_surface; | pic_params.outputBitstream = ctx->output_surfaces[i].output_surface; | ||||
pic_params.completionEvent = 0; | pic_params.completionEvent = 0; | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
if (frame->top_field_first) { | if (frame->top_field_first) { | ||||
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM; | pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM; | ||||
} else { | } else { | ||||
@@ -951,7 +951,7 @@ static av_cold int on2avc_decode_init(AVCodecContext *avctx) | |||||
ff_fft_init(&c->fft256, 7, 0); | ff_fft_init(&c->fft256, 7, 0); | ||||
ff_fft_init(&c->fft512, 8, 1); | ff_fft_init(&c->fft512, 8, 1); | ||||
ff_fft_init(&c->fft1024, 9, 1); | ff_fft_init(&c->fft1024, 9, 1); | ||||
c->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
c->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!c->fdsp) | if (!c->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -49,11 +49,11 @@ static const AVOption avcodec_options[] = { | |||||
"to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.", | "to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.", | ||||
OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E}, | OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E}, | ||||
{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|S|E|D, "flags"}, | {"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|S|E|D, "flags"}, | ||||
{"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" }, | |||||
{"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = AV_CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" }, | |||||
{"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
#if FF_API_GMC | #if FF_API_GMC | ||||
{"gmc", "use gmc", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GMC }, INT_MIN, INT_MAX, V|E, "flags"}, | {"gmc", "use gmc", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GMC }, INT_MIN, INT_MAX, V|E, "flags"}, | ||||
#endif | #endif | ||||
@@ -63,33 +63,33 @@ static const AVOption avcodec_options[] = { | |||||
#if FF_API_INPUT_PRESERVED | #if FF_API_INPUT_PRESERVED | ||||
{"input_preserved", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INPUT_PRESERVED }, INT_MIN, INT_MAX, 0, "flags"}, | {"input_preserved", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INPUT_PRESERVED }, INT_MIN, INT_MAX, 0, "flags"}, | ||||
#endif | #endif | ||||
{"pass1", "use internal 2-pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"pass2", "use internal 2-pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"}, | |||||
{"pass1", "use internal 2-pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"pass2", "use internal 2-pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"}, | |||||
#if FF_API_EMU_EDGE | #if FF_API_EMU_EDGE | ||||
{"emu_edge", "do not draw edges", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_EMU_EDGE }, INT_MIN, INT_MAX, 0, "flags"}, | {"emu_edge", "do not draw edges", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_EMU_EDGE }, INT_MIN, INT_MAX, 0, "flags"}, | ||||
#endif | #endif | ||||
{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"truncated", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_TRUNCATED }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"truncated", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_TRUNCATED }, INT_MIN, INT_MAX, 0, "flags"}, | |||||
#if FF_API_NORMALIZE_AQP | #if FF_API_NORMALIZE_AQP | ||||
{"naq", "normalize adaptive quantization", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_NORMALIZE_AQP }, INT_MIN, INT_MAX, V|E, "flags"}, | {"naq", "normalize adaptive quantization", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_NORMALIZE_AQP }, INT_MIN, INT_MAX, V|E, "flags"}, | ||||
#endif | #endif | ||||
{"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"}, | |||||
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"}, | |||||
{"bitexact", "use only bitexact functions (except (I)DCT)", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"}, | |||||
{"aic", "H.263 advanced intra coding / MPEG-4 AC prediction", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"}, | |||||
{"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"}, | |||||
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"}, | |||||
{"bitexact", "use only bitexact functions (except (I)DCT)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"}, | |||||
{"aic", "H.263 advanced intra coding / MPEG-4 AC prediction", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"}, | |||||
{"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"}, | |||||
{"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||||
{"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, V|D, "flags2"}, | |||||
#if FF_API_MOTION_EST | #if FF_API_MOTION_EST | ||||
{"me_method", "set motion estimation method", OFFSET(me_method), AV_OPT_TYPE_INT, {.i64 = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method"}, | {"me_method", "set motion estimation method", OFFSET(me_method), AV_OPT_TYPE_INT, {.i64 = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method"}, | ||||
{"zero", "zero motion estimation (fastest)", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ZERO }, INT_MIN, INT_MAX, V|E, "me_method" }, | {"zero", "zero motion estimation (fastest)", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ZERO }, INT_MIN, INT_MAX, V|E, "me_method" }, | ||||
@@ -2209,7 +2209,7 @@ int ff_celt_init(AVCodecContext *avctx, CeltContext **ps, int output_channels) | |||||
goto fail; | goto fail; | ||||
} | } | ||||
s->dsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
s->dsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!s->dsp) { | if (!s->dsp) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto fail; | goto fail; | ||||
@@ -188,8 +188,8 @@ int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, | |||||
const uint8_t *buf, int buf_size, int keyframe) | const uint8_t *buf, int buf_size, int keyframe) | ||||
{ | { | ||||
if (s && s->parser->split) { | if (s && s->parser->split) { | ||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER || | |||||
avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER) { | |||||
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER || | |||||
avctx->flags2 & AV_CODEC_FLAG2_LOCAL_HEADER) { | |||||
int i = s->parser->split(avctx, buf, buf_size); | int i = s->parser->split(avctx, buf, buf_size); | ||||
buf += i; | buf += i; | ||||
buf_size -= i; | buf_size -= i; | ||||
@@ -200,7 +200,7 @@ int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, | |||||
*poutbuf = (uint8_t *) buf; | *poutbuf = (uint8_t *) buf; | ||||
*poutbuf_size = buf_size; | *poutbuf_size = buf_size; | ||||
if (avctx->extradata) { | if (avctx->extradata) { | ||||
if (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) { | |||||
if (keyframe && (avctx->flags2 & AV_CODEC_FLAG2_LOCAL_HEADER)) { | |||||
int size = buf_size + avctx->extradata_size; | int size = buf_size + avctx->extradata_size; | ||||
*poutbuf_size = size; | *poutbuf_size = size; | ||||
@@ -649,7 +649,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
s->dpm = s->dpi * 10000 / 254; | s->dpm = s->dpi * 10000 / 254; | ||||
} | } | ||||
s->is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT); | |||||
s->is_progressive = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT); | |||||
switch (avctx->pix_fmt) { | switch (avctx->pix_fmt) { | ||||
case AV_PIX_FMT_RGBA64BE: | case AV_PIX_FMT_RGBA64BE: | ||||
s->bit_depth = 16; | s->bit_depth = 16; | ||||
@@ -261,7 +261,7 @@ av_cold void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx, | |||||
return; | return; | ||||
if (!high_bit_depth && avctx->lowres == 0) { | if (!high_bit_depth && avctx->lowres == 0) { | ||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) || | |||||
if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) || | |||||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) { | (avctx->idct_algo == FF_IDCT_ALTIVEC)) { | ||||
c->idct = idct_altivec; | c->idct = idct_altivec; | ||||
c->idct_add = idct_add_altivec; | c->idct_add = idct_add_altivec; | ||||
@@ -574,7 +574,7 @@ static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!(avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride, | ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride, | ||||
buf + y_data_size, u_data_size, | buf + y_data_size, u_data_size, | ||||
qmat_chroma_scaled, log2_chroma_blocks_per_mb); | qmat_chroma_scaled, log2_chroma_blocks_per_mb); | ||||
@@ -323,7 +323,7 @@ static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx, | |||||
*y_data_size = encode_slice_plane(avctx, mb_count, dest_y, luma_stride, | *y_data_size = encode_slice_plane(avctx, mb_count, dest_y, luma_stride, | ||||
buf, data_size, ctx->qmat_luma[qp - 1], 0); | buf, data_size, ctx->qmat_luma[qp - 1], 0); | ||||
if (!(avctx->flags & CODEC_FLAG_GRAY)) { | |||||
if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) { | |||||
*u_data_size = encode_slice_plane(avctx, mb_count, dest_u, | *u_data_size = encode_slice_plane(avctx, mb_count, dest_u, | ||||
chroma_stride, buf + *y_data_size, data_size - *y_data_size, | chroma_stride, buf + *y_data_size, data_size - *y_data_size, | ||||
ctx->qmat_chroma[qp - 1], 1); | ctx->qmat_chroma[qp - 1], 1); | ||||
@@ -963,7 +963,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||||
bytestream_put_be16 (&buf, avctx->height); | bytestream_put_be16 (&buf, avctx->height); | ||||
frame_flags = ctx->chroma_factor << 6; | frame_flags = ctx->chroma_factor << 6; | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) | |||||
frame_flags |= pic->top_field_first ? 0x04 : 0x08; | frame_flags |= pic->top_field_first ? 0x04 : 0x08; | ||||
bytestream_put_byte (&buf, frame_flags); | bytestream_put_byte (&buf, frame_flags); | ||||
@@ -1122,7 +1122,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||||
int mps; | int mps; | ||||
int i, j; | int i, j; | ||||
int min_quant, max_quant; | int min_quant, max_quant; | ||||
int interlaced = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT); | |||||
int interlaced = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT); | |||||
avctx->bits_per_raw_sample = 10; | avctx->bits_per_raw_sample = 10; | ||||
#if FF_API_CODED_FRAME | #if FF_API_CODED_FRAME | ||||
@@ -46,9 +46,9 @@ | |||||
static void validate_thread_parameters(AVCodecContext *avctx) | static void validate_thread_parameters(AVCodecContext *avctx) | ||||
{ | { | ||||
int frame_threading_supported = (avctx->codec->capabilities & CODEC_CAP_FRAME_THREADS) | int frame_threading_supported = (avctx->codec->capabilities & CODEC_CAP_FRAME_THREADS) | ||||
&& !(avctx->flags & CODEC_FLAG_TRUNCATED) | |||||
&& !(avctx->flags & CODEC_FLAG_LOW_DELAY) | |||||
&& !(avctx->flags2 & CODEC_FLAG2_CHUNKS); | |||||
&& !(avctx->flags & AV_CODEC_FLAG_TRUNCATED) | |||||
&& !(avctx->flags & AV_CODEC_FLAG_LOW_DELAY) | |||||
&& !(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS); | |||||
if (avctx->thread_count == 1) { | if (avctx->thread_count == 1) { | ||||
avctx->active_thread_type = 0; | avctx->active_thread_type = 0; | ||||
} else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) { | } else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) { | ||||
@@ -58,7 +58,7 @@ static int init_video_param(AVCodecContext *avctx, QSVEncContext *q) | |||||
q->param.mfx.TargetUsage = q->preset; | q->param.mfx.TargetUsage = q->preset; | ||||
q->param.mfx.GopPicSize = FFMAX(0, avctx->gop_size); | q->param.mfx.GopPicSize = FFMAX(0, avctx->gop_size); | ||||
q->param.mfx.GopRefDist = FFMAX(-1, avctx->max_b_frames) + 1; | q->param.mfx.GopRefDist = FFMAX(-1, avctx->max_b_frames) + 1; | ||||
q->param.mfx.GopOptFlag = avctx->flags & CODEC_FLAG_CLOSED_GOP ? | |||||
q->param.mfx.GopOptFlag = avctx->flags & AV_CODEC_FLAG_CLOSED_GOP ? | |||||
MFX_GOP_CLOSED : 0; | MFX_GOP_CLOSED : 0; | ||||
q->param.mfx.IdrInterval = q->idr_interval; | q->param.mfx.IdrInterval = q->idr_interval; | ||||
q->param.mfx.NumSlice = avctx->slices; | q->param.mfx.NumSlice = avctx->slices; | ||||
@@ -78,7 +78,7 @@ static int init_video_param(AVCodecContext *avctx, QSVEncContext *q) | |||||
q->param.mfx.FrameInfo.BitDepthChroma = 8; | q->param.mfx.FrameInfo.BitDepthChroma = 8; | ||||
q->param.mfx.FrameInfo.Width = FFALIGN(avctx->width, q->width_align); | q->param.mfx.FrameInfo.Width = FFALIGN(avctx->width, q->width_align); | ||||
if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { | |||||
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { | |||||
/* A true field layout (TFF or BFF) is not important here, | /* A true field layout (TFF or BFF) is not important here, | ||||
it will specified later during frame encoding. But it is important | it will specified later during frame encoding. But it is important | ||||
to specify is frame progressive or not because allowed heigh alignment | to specify is frame progressive or not because allowed heigh alignment | ||||
@@ -100,7 +100,7 @@ static int init_video_param(AVCodecContext *avctx, QSVEncContext *q) | |||||
q->param.mfx.FrameInfo.FrameRateExtD = avctx->time_base.num; | q->param.mfx.FrameInfo.FrameRateExtD = avctx->time_base.num; | ||||
} | } | ||||
if (avctx->flags & CODEC_FLAG_QSCALE) { | |||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE) { | |||||
q->param.mfx.RateControlMethod = MFX_RATECONTROL_CQP; | q->param.mfx.RateControlMethod = MFX_RATECONTROL_CQP; | ||||
ratecontrol_desc = "constant quantization parameter (CQP)"; | ratecontrol_desc = "constant quantization parameter (CQP)"; | ||||
} else if (avctx->rc_max_rate == avctx->bit_rate) { | } else if (avctx->rc_max_rate == avctx->bit_rate) { | ||||
@@ -81,7 +81,7 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx) | |||||
return AVERROR_PATCHWELCOME; | return AVERROR_PATCHWELCOME; | ||||
} | } | ||||
ractx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); | |||||
ractx->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); | |||||
if (!ractx->fdsp) | if (!ractx->fdsp) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -170,7 +170,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
if (!rcc->buffer_index) | if (!rcc->buffer_index) | ||||
rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4; | rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4; | ||||
if (s->avctx->flags & CODEC_FLAG_PASS2) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
int i; | int i; | ||||
char *p; | char *p; | ||||
@@ -238,7 +238,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
} | } | ||||
// FIXME maybe move to end | // FIXME maybe move to end | ||||
if ((s->avctx->flags & CODEC_FLAG_PASS2) && s->rc_strategy == 1) { | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == 1) { | |||||
#if CONFIG_LIBXVID | #if CONFIG_LIBXVID | ||||
return ff_xvid_rate_control_init(s); | return ff_xvid_rate_control_init(s); | ||||
#else | #else | ||||
@@ -249,7 +249,7 @@ FF_ENABLE_DEPRECATION_WARNINGS | |||||
} | } | ||||
} | } | ||||
if (!(s->avctx->flags & CODEC_FLAG_PASS2)) { | |||||
if (!(s->avctx->flags & AV_CODEC_FLAG_PASS2)) { | |||||
rcc->short_term_qsum = 0.001; | rcc->short_term_qsum = 0.001; | ||||
rcc->short_term_qcount = 0.001; | rcc->short_term_qcount = 0.001; | ||||
@@ -318,7 +318,7 @@ av_cold void ff_rate_control_uninit(MpegEncContext *s) | |||||
av_freep(&rcc->entry); | av_freep(&rcc->entry); | ||||
#if CONFIG_LIBXVID | #if CONFIG_LIBXVID | ||||
if ((s->avctx->flags & CODEC_FLAG_PASS2) && s->rc_strategy == 1) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == 1) | |||||
ff_xvid_rate_control_uninit(s); | ff_xvid_rate_control_uninit(s); | ||||
#endif | #endif | ||||
} | } | ||||
@@ -771,7 +771,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run) | |||||
emms_c(); | emms_c(); | ||||
#if CONFIG_LIBXVID | #if CONFIG_LIBXVID | ||||
if ((s->avctx->flags & CODEC_FLAG_PASS2) && s->rc_strategy == 1) | |||||
if ((s->avctx->flags & AV_CODEC_FLAG_PASS2) && s->rc_strategy == 1) | |||||
return ff_xvid_rate_estimate_qscale(s, dry_run); | return ff_xvid_rate_estimate_qscale(s, dry_run); | ||||
#endif | #endif | ||||
@@ -790,7 +790,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run) | |||||
s->frame_bits - s->stuffing_bits); | s->frame_bits - s->stuffing_bits); | ||||
} | } | ||||
if (s->avctx->flags & CODEC_FLAG_PASS2) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
av_assert0(picture_number >= 0); | av_assert0(picture_number >= 0); | ||||
if (picture_number >= rcc->num_entries) { | if (picture_number >= rcc->num_entries) { | ||||
av_log(s, AV_LOG_ERROR, "Input is longer than 2-pass log file\n"); | av_log(s, AV_LOG_ERROR, "Input is longer than 2-pass log file\n"); | ||||
@@ -824,7 +824,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run) | |||||
var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum; | var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum; | ||||
short_term_q = 0; /* avoid warning */ | short_term_q = 0; /* avoid warning */ | ||||
if (s->avctx->flags & CODEC_FLAG_PASS2) { | |||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { | |||||
if (pict_type != AV_PICTURE_TYPE_I) | if (pict_type != AV_PICTURE_TYPE_I) | ||||
av_assert0(pict_type == rce->new_pict_type); | av_assert0(pict_type == rce->new_pict_type); | ||||