* commit '7e350379f87e7f74420b4813170fe808e2313911':
lavfi: switch to AVFrame.
Conflicts:
doc/filters.texi
libavfilter/af_ashowinfo.c
libavfilter/audio.c
libavfilter/avfilter.c
libavfilter/avfilter.h
libavfilter/buffersink.c
libavfilter/buffersrc.c
libavfilter/buffersrc.h
libavfilter/f_select.c
libavfilter/f_setpts.c
libavfilter/fifo.c
libavfilter/split.c
libavfilter/src_movie.c
libavfilter/version.h
libavfilter/vf_aspect.c
libavfilter/vf_bbox.c
libavfilter/vf_blackframe.c
libavfilter/vf_delogo.c
libavfilter/vf_drawbox.c
libavfilter/vf_drawtext.c
libavfilter/vf_fade.c
libavfilter/vf_fieldorder.c
libavfilter/vf_fps.c
libavfilter/vf_frei0r.c
libavfilter/vf_gradfun.c
libavfilter/vf_hqdn3d.c
libavfilter/vf_lut.c
libavfilter/vf_overlay.c
libavfilter/vf_pad.c
libavfilter/vf_scale.c
libavfilter/vf_showinfo.c
libavfilter/vf_transpose.c
libavfilter/vf_vflip.c
libavfilter/vf_yadif.c
libavfilter/video.c
libavfilter/vsrc_testsrc.c
libavfilter/yadif.h
Following are notes about the merge authorship and various technical details.
Michael Niedermayer:
* Main merge operation, notably avfilter.c and video.c
* Switch to AVFrame:
- afade
- anullsrc
- apad
- aresample
- blackframe
- deshake
- idet
- il
- mandelbrot
- mptestsrc
- noise
- setfield
- smartblur
- tinterlace
* various merge changes and fixes in:
- ashowinfo
- blackdetect
- field
- fps
- select
- testsrc
- yadif
Nicolas George:
* Switch to AVFrame:
- make rawdec work with refcounted frames. Adapted from commit
759001c534
by Anton Khirnov.
Also, fix the use of || instead of | in a flags check.
- make buffer sink and src, audio and video work all together
Clément Bœsch:
* Switch to AVFrame:
- aevalsrc
- alphaextract
- blend
- cellauto
- colormatrix
- concat
- earwax
- ebur128
- edgedetect
- geq
- histeq
- histogram
- hue
- kerndeint
- life
- movie
- mp (with the help of Michael)
- overlay
- pad
- pan
- pp
- pp
- removelogo
- sendcmd
- showspectrum
- showwaves
- silencedetect
- stereo3d
- subtitles
- super2xsai
- swapuv
- thumbnail
- tile
Hendrik Leppkes:
* Switch to AVFrame:
- aconvert
- amerge
- asetnsamples
- atempo
- biquads
Matthieu Bouron:
* Switch to AVFrame
- alphamerge
- decimate
- volumedetect
Stefano Sabatini:
* Switch to AVFrame:
- astreamsync
- flite
- framestep
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Signed-off-by: Nicolas George <nicolas.george@normalesup.org>
Signed-off-by: Clément Bœsch <ubitux@gmail.com>
Signed-off-by: Hendrik Leppkes <h.leppkes@gmail.com>
Signed-off-by: Matthieu Bouron <matthieu.bouron@gmail.com>
Signed-off-by: Stefano Sabatini <stefasab@gmail.com>
Merged-by: Michael Niedermayer <michaelni@gmx.at>
tags/n2.0
@@ -2086,9 +2086,6 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. | |||||
@item n | @item n | ||||
the number of input frame, starting from 0 | the number of input frame, starting from 0 | ||||
@item pos | |||||
the position in the file of the input frame, NAN if unknown | |||||
@item t | @item t | ||||
timestamp expressed in seconds, NAN if the input timestamp is unknown | timestamp expressed in seconds, NAN if the input timestamp is unknown | ||||
@@ -1627,8 +1627,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) | |||||
(AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, | (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, | ||||
(AVRational){1, ist->st->codec->sample_rate}); | (AVRational){1, ist->st->codec->sample_rate}); | ||||
for (i = 0; i < ist->nb_filters; i++) | for (i = 0; i < ist->nb_filters; i++) | ||||
av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, | |||||
AV_BUFFERSRC_FLAG_PUSH); | |||||
av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame); | |||||
/* TODO re-add AV_BUFFERSRC_FLAG_PUSH */ | |||||
decoded_frame->pts = AV_NOPTS_VALUE; | decoded_frame->pts = AV_NOPTS_VALUE; | ||||
@@ -1737,7 +1737,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) | |||||
AV_BUFFERSRC_FLAG_NO_COPY | | AV_BUFFERSRC_FLAG_NO_COPY | | ||||
AV_BUFFERSRC_FLAG_PUSH); | AV_BUFFERSRC_FLAG_PUSH); | ||||
} else | } else | ||||
if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) { | |||||
if(av_buffersrc_add_frame_flags(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) { | |||||
av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n"); | av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n"); | ||||
exit(1); | exit(1); | ||||
} | } | ||||
@@ -33,7 +33,6 @@ OBJS = allfilters.o \ | |||||
avfilter.o \ | avfilter.o \ | ||||
avfiltergraph.o \ | avfiltergraph.o \ | ||||
buffer.o \ | buffer.o \ | ||||
buffersink.o \ | |||||
buffersrc.o \ | buffersrc.o \ | ||||
drawutils.o \ | drawutils.o \ | ||||
fifo.o \ | fifo.o \ | ||||
@@ -41,7 +40,6 @@ OBJS = allfilters.o \ | |||||
graphdump.o \ | graphdump.o \ | ||||
graphparser.o \ | graphparser.o \ | ||||
sink_buffer.o \ | sink_buffer.o \ | ||||
src_buffer.o \ | |||||
transform.o \ | transform.o \ | ||||
video.o \ | video.o \ | ||||
@@ -135,23 +135,23 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) | |||||
{ | { | ||||
AConvertContext *aconvert = inlink->dst->priv; | AConvertContext *aconvert = inlink->dst->priv; | ||||
const int n = insamplesref->audio->nb_samples; | |||||
const int n = insamplesref->nb_samples; | |||||
AVFilterLink *const outlink = inlink->dst->outputs[0]; | AVFilterLink *const outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); | |||||
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n); | |||||
int ret; | int ret; | ||||
swr_convert(aconvert->swr, outsamplesref->data, n, | |||||
(void *)insamplesref->data, n); | |||||
swr_convert(aconvert->swr, outsamplesref->extended_data, n, | |||||
(void *)insamplesref->extended_data, n); | |||||
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); | |||||
outsamplesref->audio->channels = outlink->channels; | |||||
outsamplesref->audio->channel_layout = outlink->channel_layout; | |||||
av_frame_copy_props(outsamplesref, insamplesref); | |||||
outsamplesref->channels = outlink->channels; | |||||
outsamplesref->channel_layout = outlink->channel_layout; | |||||
ret = ff_filter_frame(outlink, outsamplesref); | ret = ff_filter_frame(outlink, outsamplesref); | ||||
avfilter_unref_buffer(insamplesref); | |||||
av_frame_free(&insamplesref); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -160,7 +160,6 @@ static const AVFilterPad aconvert_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -232,22 +232,22 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AudioFadeContext *afade = inlink->dst->priv; | AudioFadeContext *afade = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
int nb_samples = buf->audio->nb_samples; | |||||
AVFilterBufferRef *out_buf; | |||||
int nb_samples = buf->nb_samples; | |||||
AVFrame *out_buf; | |||||
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base); | int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base); | ||||
if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) || | if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) || | ||||
( afade->type && (cur_sample + afade->nb_samples < afade->start_sample))) | ( afade->type && (cur_sample + afade->nb_samples < afade->start_sample))) | ||||
return ff_filter_frame(outlink, buf); | return ff_filter_frame(outlink, buf); | ||||
if (buf->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(buf)) { | |||||
out_buf = buf; | out_buf = buf; | ||||
} else { | } else { | ||||
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); | |||||
out_buf = ff_get_audio_buffer(inlink, nb_samples); | |||||
if (!out_buf) | if (!out_buf) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
out_buf->pts = buf->pts; | out_buf->pts = buf->pts; | ||||
@@ -256,7 +256,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) || | if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) || | ||||
( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) { | ( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) { | ||||
av_samples_set_silence(out_buf->extended_data, 0, nb_samples, | av_samples_set_silence(out_buf->extended_data, 0, nb_samples, | ||||
out_buf->audio->channels, out_buf->format); | |||||
out_buf->channels, out_buf->format); | |||||
} else { | } else { | ||||
int64_t start; | int64_t start; | ||||
@@ -266,13 +266,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
start = afade->start_sample + afade->nb_samples - cur_sample; | start = afade->start_sample + afade->nb_samples - cur_sample; | ||||
afade->fade_samples(out_buf->extended_data, buf->extended_data, | afade->fade_samples(out_buf->extended_data, buf->extended_data, | ||||
nb_samples, buf->audio->channels, | |||||
nb_samples, buf->channels, | |||||
afade->type ? -1 : 1, start, | afade->type ? -1 : 1, start, | ||||
afade->nb_samples, afade->curve); | afade->nb_samples, afade->curve); | ||||
} | } | ||||
if (buf != out_buf) | if (buf != out_buf) | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ff_filter_frame(outlink, out_buf); | return ff_filter_frame(outlink, out_buf); | ||||
} | } | ||||
@@ -219,14 +219,14 @@ static inline void copy_samples(int nb_inputs, struct amerge_input in[], | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AMergeContext *am = ctx->priv; | AMergeContext *am = ctx->priv; | ||||
AVFilterLink *const outlink = ctx->outputs[0]; | AVFilterLink *const outlink = ctx->outputs[0]; | ||||
int input_number; | int input_number; | ||||
int nb_samples, ns, i; | int nb_samples, ns, i; | ||||
AVFilterBufferRef *outbuf, *inbuf[SWR_CH_MAX]; | |||||
AVFrame *outbuf, *inbuf[SWR_CH_MAX]; | |||||
uint8_t *ins[SWR_CH_MAX], *outs; | uint8_t *ins[SWR_CH_MAX], *outs; | ||||
for (input_number = 0; input_number < am->nb_inputs; input_number++) | for (input_number = 0; input_number < am->nb_inputs; input_number++) | ||||
@@ -235,39 +235,40 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
av_assert1(input_number < am->nb_inputs); | av_assert1(input_number < am->nb_inputs); | ||||
if (ff_bufqueue_is_full(&am->in[input_number].queue)) { | if (ff_bufqueue_is_full(&am->in[input_number].queue)) { | ||||
av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n"); | av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n"); | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
ff_bufqueue_add(ctx, &am->in[input_number].queue, insamples); | |||||
am->in[input_number].nb_samples += insamples->audio->nb_samples; | |||||
ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples)); | |||||
am->in[input_number].nb_samples += insamples->nb_samples; | |||||
av_frame_free(&insamples); | |||||
nb_samples = am->in[0].nb_samples; | nb_samples = am->in[0].nb_samples; | ||||
for (i = 1; i < am->nb_inputs; i++) | for (i = 1; i < am->nb_inputs; i++) | ||||
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); | nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); | ||||
if (!nb_samples) | if (!nb_samples) | ||||
return 0; | return 0; | ||||
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples); | |||||
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples); | |||||
outs = outbuf->data[0]; | outs = outbuf->data[0]; | ||||
for (i = 0; i < am->nb_inputs; i++) { | for (i = 0; i < am->nb_inputs; i++) { | ||||
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); | inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); | ||||
ins[i] = inbuf[i]->data[0] + | ins[i] = inbuf[i]->data[0] + | ||||
am->in[i].pos * am->in[i].nb_ch * am->bps; | am->in[i].pos * am->in[i].nb_ch * am->bps; | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outbuf, inbuf[0]); | |||||
av_frame_copy_props(outbuf, inbuf[0]); | |||||
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : | outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : | ||||
inbuf[0]->pts + | inbuf[0]->pts + | ||||
av_rescale_q(am->in[0].pos, | av_rescale_q(am->in[0].pos, | ||||
(AVRational){ 1, ctx->inputs[0]->sample_rate }, | (AVRational){ 1, ctx->inputs[0]->sample_rate }, | ||||
ctx->outputs[0]->time_base); | ctx->outputs[0]->time_base); | ||||
outbuf->audio->nb_samples = nb_samples; | |||||
outbuf->audio->channel_layout = outlink->channel_layout; | |||||
outbuf->audio->channels = outlink->channels; | |||||
outbuf->nb_samples = nb_samples; | |||||
outbuf->channel_layout = outlink->channel_layout; | |||||
outbuf->channels = outlink->channels; | |||||
while (nb_samples) { | while (nb_samples) { | ||||
ns = nb_samples; | ns = nb_samples; | ||||
for (i = 0; i < am->nb_inputs; i++) | for (i = 0; i < am->nb_inputs; i++) | ||||
ns = FFMIN(ns, inbuf[i]->audio->nb_samples - am->in[i].pos); | |||||
ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos); | |||||
/* Unroll the most common sample formats: speed +~350% for the loop, | /* Unroll the most common sample formats: speed +~350% for the loop, | ||||
+~13% overall (including two common decoders) */ | +~13% overall (including two common decoders) */ | ||||
switch (am->bps) { | switch (am->bps) { | ||||
@@ -289,9 +290,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
for (i = 0; i < am->nb_inputs; i++) { | for (i = 0; i < am->nb_inputs; i++) { | ||||
am->in[i].nb_samples -= ns; | am->in[i].nb_samples -= ns; | ||||
am->in[i].pos += ns; | am->in[i].pos += ns; | ||||
if (am->in[i].pos == inbuf[i]->audio->nb_samples) { | |||||
if (am->in[i].pos == inbuf[i]->nb_samples) { | |||||
am->in[i].pos = 0; | am->in[i].pos = 0; | ||||
avfilter_unref_buffer(inbuf[i]); | |||||
av_frame_free(&inbuf[i]); | |||||
ff_bufqueue_get(&am->in[i].queue); | ff_bufqueue_get(&am->in[i].queue); | ||||
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); | inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); | ||||
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; | ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; | ||||
@@ -322,7 +323,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
.name = name, | .name = name, | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
}; | }; | ||||
if (!name) | if (!name) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
@@ -270,18 +270,18 @@ static int output_frame(AVFilterLink *outlink, int nb_samples) | |||||
{ | { | ||||
AVFilterContext *ctx = outlink->src; | AVFilterContext *ctx = outlink->src; | ||||
MixContext *s = ctx->priv; | MixContext *s = ctx->priv; | ||||
AVFilterBufferRef *out_buf, *in_buf; | |||||
AVFrame *out_buf, *in_buf; | |||||
int i; | int i; | ||||
calculate_scales(s, nb_samples); | calculate_scales(s, nb_samples); | ||||
out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); | |||||
out_buf = ff_get_audio_buffer(outlink, nb_samples); | |||||
if (!out_buf) | if (!out_buf) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); | |||||
in_buf = ff_get_audio_buffer(outlink, nb_samples); | |||||
if (!in_buf) { | if (!in_buf) { | ||||
avfilter_unref_buffer(out_buf); | |||||
av_frame_free(&out_buf); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
@@ -303,7 +303,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples) | |||||
} | } | ||||
} | } | ||||
} | } | ||||
avfilter_unref_buffer(in_buf); | |||||
av_frame_free(&in_buf); | |||||
out_buf->pts = s->next_pts; | out_buf->pts = s->next_pts; | ||||
if (s->next_pts != AV_NOPTS_VALUE) | if (s->next_pts != AV_NOPTS_VALUE) | ||||
@@ -450,7 +450,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
return output_frame(outlink, available_samples); | return output_frame(outlink, available_samples); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
MixContext *s = ctx->priv; | MixContext *s = ctx->priv; | ||||
@@ -469,16 +469,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
if (i == 0) { | if (i == 0) { | ||||
int64_t pts = av_rescale_q(buf->pts, inlink->time_base, | int64_t pts = av_rescale_q(buf->pts, inlink->time_base, | ||||
outlink->time_base); | outlink->time_base); | ||||
ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts); | |||||
ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts); | |||||
if (ret < 0) | if (ret < 0) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, | ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, | ||||
buf->audio->nb_samples); | |||||
buf->nb_samples); | |||||
fail: | fail: | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -77,15 +77,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
APadContext *apad = ctx->priv; | APadContext *apad = ctx->priv; | ||||
if (apad->whole_len) | if (apad->whole_len) | ||||
apad->whole_len -= frame->audio->nb_samples; | |||||
apad->whole_len -= frame->nb_samples; | |||||
apad->next_pts = frame->pts + av_rescale_q(frame->audio->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); | |||||
apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); | |||||
return ff_filter_frame(ctx->outputs[0], frame); | return ff_filter_frame(ctx->outputs[0], frame); | ||||
} | } | ||||
@@ -99,7 +99,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
if (ret == AVERROR_EOF) { | if (ret == AVERROR_EOF) { | ||||
int n_out = apad->packet_size; | int n_out = apad->packet_size; | ||||
AVFilterBufferRef *outsamplesref; | |||||
AVFrame *outsamplesref; | |||||
if (apad->whole_len > 0) { | if (apad->whole_len > 0) { | ||||
apad->pad_len = apad->whole_len; | apad->pad_len = apad->whole_len; | ||||
@@ -113,16 +113,16 @@ static int request_frame(AVFilterLink *outlink) | |||||
if(!n_out) | if(!n_out) | ||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); | |||||
outsamplesref = ff_get_audio_buffer(outlink, n_out); | |||||
if (!outsamplesref) | if (!outsamplesref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
av_assert0(outsamplesref->audio->sample_rate == outlink->sample_rate); | |||||
av_assert0(outsamplesref->audio->nb_samples == n_out); | |||||
av_assert0(outsamplesref->sample_rate == outlink->sample_rate); | |||||
av_assert0(outsamplesref->nb_samples == n_out); | |||||
av_samples_set_silence(outsamplesref->extended_data, 0, | av_samples_set_silence(outsamplesref->extended_data, 0, | ||||
n_out, | n_out, | ||||
outsamplesref->audio->channels, | |||||
outsamplesref->channels, | |||||
outsamplesref->format); | outsamplesref->format); | ||||
outsamplesref->pts = apad->next_pts; | outsamplesref->pts = apad->next_pts; | ||||
@@ -174,23 +174,23 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref) | |||||
{ | { | ||||
AResampleContext *aresample = inlink->dst->priv; | AResampleContext *aresample = inlink->dst->priv; | ||||
const int n_in = insamplesref->audio->nb_samples; | |||||
const int n_in = insamplesref->nb_samples; | |||||
int n_out = n_in * aresample->ratio * 2 + 256; | int n_out = n_in * aresample->ratio * 2 + 256; | ||||
AVFilterLink *const outlink = inlink->dst->outputs[0]; | AVFilterLink *const outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); | |||||
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out); | |||||
int ret; | int ret; | ||||
if(!outsamplesref) | if(!outsamplesref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref); | |||||
av_frame_copy_props(outsamplesref, insamplesref); | |||||
outsamplesref->format = outlink->format; | outsamplesref->format = outlink->format; | ||||
outsamplesref->audio->channels = outlink->channels; | |||||
outsamplesref->audio->channel_layout = outlink->channel_layout; | |||||
outsamplesref->audio->sample_rate = outlink->sample_rate; | |||||
outsamplesref->channels = outlink->channels; | |||||
outsamplesref->channel_layout = outlink->channel_layout; | |||||
outsamplesref->sample_rate = outlink->sample_rate; | |||||
if(insamplesref->pts != AV_NOPTS_VALUE) { | if(insamplesref->pts != AV_NOPTS_VALUE) { | ||||
int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); | int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den); | ||||
@@ -203,16 +203,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) | |||||
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, | n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, | ||||
(void *)insamplesref->extended_data, n_in); | (void *)insamplesref->extended_data, n_in); | ||||
if (n_out <= 0) { | if (n_out <= 0) { | ||||
avfilter_unref_buffer(outsamplesref); | |||||
avfilter_unref_buffer(insamplesref); | |||||
av_frame_free(&outsamplesref); | |||||
av_frame_free(&insamplesref); | |||||
return 0; | return 0; | ||||
} | } | ||||
outsamplesref->audio->nb_samples = n_out; | |||||
outsamplesref->nb_samples = n_out; | |||||
ret = ff_filter_frame(outlink, outsamplesref); | ret = ff_filter_frame(outlink, outsamplesref); | ||||
aresample->req_fullfilled= 1; | aresample->req_fullfilled= 1; | ||||
avfilter_unref_buffer(insamplesref); | |||||
av_frame_free(&insamplesref); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -229,20 +229,20 @@ static int request_frame(AVFilterLink *outlink) | |||||
}while(!aresample->req_fullfilled && ret>=0); | }while(!aresample->req_fullfilled && ret>=0); | ||||
if (ret == AVERROR_EOF) { | if (ret == AVERROR_EOF) { | ||||
AVFilterBufferRef *outsamplesref; | |||||
AVFrame *outsamplesref; | |||||
int n_out = 4096; | int n_out = 4096; | ||||
outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out); | |||||
outsamplesref = ff_get_audio_buffer(outlink, n_out); | |||||
if (!outsamplesref) | if (!outsamplesref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0); | n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0); | ||||
if (n_out <= 0) { | if (n_out <= 0) { | ||||
avfilter_unref_buffer(outsamplesref); | |||||
av_frame_free(&outsamplesref); | |||||
return (n_out == 0) ? AVERROR_EOF : n_out; | return (n_out == 0) ? AVERROR_EOF : n_out; | ||||
} | } | ||||
outsamplesref->audio->sample_rate = outlink->sample_rate; | |||||
outsamplesref->audio->nb_samples = n_out; | |||||
outsamplesref->sample_rate = outlink->sample_rate; | |||||
outsamplesref->nb_samples = n_out; | |||||
#if 0 | #if 0 | ||||
outsamplesref->pts = aresample->next_pts; | outsamplesref->pts = aresample->next_pts; | ||||
if(aresample->next_pts != AV_NOPTS_VALUE) | if(aresample->next_pts != AV_NOPTS_VALUE) | ||||
@@ -263,7 +263,6 @@ static const AVFilterPad aresample_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -93,7 +93,7 @@ static int config_props_output(AVFilterLink *outlink) | |||||
static int push_samples(AVFilterLink *outlink) | static int push_samples(AVFilterLink *outlink) | ||||
{ | { | ||||
ASNSContext *asns = outlink->src->priv; | ASNSContext *asns = outlink->src->priv; | ||||
AVFilterBufferRef *outsamples = NULL; | |||||
AVFrame *outsamples = NULL; | |||||
int nb_out_samples, nb_pad_samples; | int nb_out_samples, nb_pad_samples; | ||||
if (asns->pad) { | if (asns->pad) { | ||||
@@ -107,7 +107,7 @@ static int push_samples(AVFilterLink *outlink) | |||||
if (!nb_out_samples) | if (!nb_out_samples) | ||||
return 0; | return 0; | ||||
outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_out_samples); | |||||
outsamples = ff_get_audio_buffer(outlink, nb_out_samples); | |||||
av_assert0(outsamples); | av_assert0(outsamples); | ||||
av_audio_fifo_read(asns->fifo, | av_audio_fifo_read(asns->fifo, | ||||
@@ -117,9 +117,9 @@ static int push_samples(AVFilterLink *outlink) | |||||
av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples, | av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples, | ||||
nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout), | nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout), | ||||
outlink->format); | outlink->format); | ||||
outsamples->audio->nb_samples = nb_out_samples; | |||||
outsamples->audio->channel_layout = outlink->channel_layout; | |||||
outsamples->audio->sample_rate = outlink->sample_rate; | |||||
outsamples->nb_samples = nb_out_samples; | |||||
outsamples->channel_layout = outlink->channel_layout; | |||||
outsamples->sample_rate = outlink->sample_rate; | |||||
outsamples->pts = asns->next_out_pts; | outsamples->pts = asns->next_out_pts; | ||||
if (asns->next_out_pts != AV_NOPTS_VALUE) | if (asns->next_out_pts != AV_NOPTS_VALUE) | ||||
@@ -130,13 +130,13 @@ static int push_samples(AVFilterLink *outlink) | |||||
return nb_out_samples; | return nb_out_samples; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ASNSContext *asns = ctx->priv; | ASNSContext *asns = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
int ret; | int ret; | ||||
int nb_samples = insamples->audio->nb_samples; | |||||
int nb_samples = insamples->nb_samples; | |||||
if (av_audio_fifo_space(asns->fifo) < nb_samples) { | if (av_audio_fifo_space(asns->fifo) < nb_samples) { | ||||
av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples); | av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples); | ||||
@@ -150,7 +150,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples); | av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples); | ||||
if (asns->next_out_pts == AV_NOPTS_VALUE) | if (asns->next_out_pts == AV_NOPTS_VALUE) | ||||
asns->next_out_pts = insamples->pts; | asns->next_out_pts = insamples->pts; | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples) | while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples) | ||||
push_samples(outlink); | push_samples(outlink); | ||||
@@ -177,10 +177,10 @@ static int request_frame(AVFilterLink *outlink) | |||||
static const AVFilterPad asetnsamples_inputs[] = { | static const AVFilterPad asetnsamples_inputs[] = { | ||||
{ | { | ||||
.name = "default", | |||||
.type = AVMEDIA_TYPE_AUDIO, | |||||
.filter_frame = filter_frame, | |||||
.min_perms = AV_PERM_READ | AV_PERM_WRITE, | |||||
.name = "default", | |||||
.type = AVMEDIA_TYPE_AUDIO, | |||||
.filter_frame = filter_frame, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -55,16 +55,16 @@ static void uninit(AVFilterContext *ctx) | |||||
av_freep(&s->plane_checksums); | av_freep(&s->plane_checksums); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AShowInfoContext *s = ctx->priv; | AShowInfoContext *s = ctx->priv; | ||||
char chlayout_str[128]; | char chlayout_str[128]; | ||||
uint32_t checksum = 0; | uint32_t checksum = 0; | ||||
int channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); | |||||
int channels = av_get_channel_layout_nb_channels(buf->channel_layout); | |||||
int planar = av_sample_fmt_is_planar(buf->format); | int planar = av_sample_fmt_is_planar(buf->format); | ||||
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); | int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels); | ||||
int data_size = buf->audio->nb_samples * block_align; | |||||
int data_size = buf->nb_samples * block_align; | |||||
int planes = planar ? channels : 1; | int planes = planar ? channels : 1; | ||||
int i; | int i; | ||||
void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); | void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums)); | ||||
@@ -82,7 +82,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} | } | ||||
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, | av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1, | ||||
buf->audio->channel_layout); | |||||
buf->channel_layout); | |||||
av_log(ctx, AV_LOG_INFO, | av_log(ctx, AV_LOG_INFO, | ||||
"n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" " | "n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" " | ||||
@@ -90,9 +90,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
"checksum:%08X ", | "checksum:%08X ", | ||||
s->frame, | s->frame, | ||||
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), | av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base), | ||||
buf->pos, | |||||
av_get_sample_fmt_name(buf->format), buf->audio->channels, chlayout_str, | |||||
buf->audio->sample_rate, buf->audio->nb_samples, | |||||
av_frame_get_pkt_pos(buf), | |||||
av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str, | |||||
buf->sample_rate, buf->nb_samples, | |||||
checksum); | checksum); | ||||
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); | av_log(ctx, AV_LOG_INFO, "plane_checksums: [ "); | ||||
@@ -110,7 +110,6 @@ static const AVFilterPad inputs[] = { | |||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.get_audio_buffer = ff_null_get_audio_buffer, | .get_audio_buffer = ff_null_get_audio_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -48,7 +48,7 @@ typedef struct { | |||||
AVExpr *expr; | AVExpr *expr; | ||||
double var_values[VAR_NB]; | double var_values[VAR_NB]; | ||||
struct buf_queue { | struct buf_queue { | ||||
AVFilterBufferRef *buf[QUEUE_SIZE]; | |||||
AVFrame *buf[QUEUE_SIZE]; | |||||
unsigned tail, nb; | unsigned tail, nb; | ||||
/* buf[tail] is the oldest, | /* buf[tail] is the oldest, | ||||
buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ | buf[(tail + nb) % QUEUE_SIZE] is where the next is added */ | ||||
@@ -111,16 +111,16 @@ static int send_out(AVFilterContext *ctx, int out_id) | |||||
{ | { | ||||
AStreamSyncContext *as = ctx->priv; | AStreamSyncContext *as = ctx->priv; | ||||
struct buf_queue *queue = &as->queue[out_id]; | struct buf_queue *queue = &as->queue[out_id]; | ||||
AVFilterBufferRef *buf = queue->buf[queue->tail]; | |||||
AVFrame *buf = queue->buf[queue->tail]; | |||||
int ret; | int ret; | ||||
queue->buf[queue->tail] = NULL; | queue->buf[queue->tail] = NULL; | ||||
as->var_values[VAR_B1 + out_id]++; | as->var_values[VAR_B1 + out_id]++; | ||||
as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples; | |||||
as->var_values[VAR_S1 + out_id] += buf->nb_samples; | |||||
if (buf->pts != AV_NOPTS_VALUE) | if (buf->pts != AV_NOPTS_VALUE) | ||||
as->var_values[VAR_T1 + out_id] = | as->var_values[VAR_T1 + out_id] = | ||||
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; | av_q2d(ctx->outputs[out_id]->time_base) * buf->pts; | ||||
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples / | |||||
as->var_values[VAR_T1 + out_id] += buf->nb_samples / | |||||
(double)ctx->inputs[out_id]->sample_rate; | (double)ctx->inputs[out_id]->sample_rate; | ||||
ret = ff_filter_frame(ctx->outputs[out_id], buf); | ret = ff_filter_frame(ctx->outputs[out_id], buf); | ||||
queue->nb--; | queue->nb--; | ||||
@@ -167,7 +167,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AStreamSyncContext *as = ctx->priv; | AStreamSyncContext *as = ctx->priv; | ||||
@@ -185,12 +185,10 @@ static const AVFilterPad astreamsync_inputs[] = { | |||||
.name = "in1", | .name = "in1", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
},{ | },{ | ||||
.name = "in2", | .name = "in2", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -152,14 +152,13 @@ static int request_frame(AVFilterLink *link) | |||||
handle_trimming(ctx); | handle_trimming(ctx); | ||||
if (nb_samples = get_delay(s)) { | if (nb_samples = get_delay(s)) { | ||||
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE, | |||||
nb_samples); | |||||
AVFrame *buf = ff_get_audio_buffer(link, nb_samples); | |||||
if (!buf) | if (!buf) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
ret = avresample_convert(s->avr, buf->extended_data, | ret = avresample_convert(s->avr, buf->extended_data, | ||||
buf->linesize[0], nb_samples, NULL, 0, 0); | buf->linesize[0], nb_samples, NULL, 0, 0); | ||||
if (ret <= 0) { | if (ret <= 0) { | ||||
avfilter_unref_bufferp(&buf); | |||||
av_frame_free(&buf); | |||||
return (ret < 0) ? ret : AVERROR_EOF; | return (ret < 0) ? ret : AVERROR_EOF; | ||||
} | } | ||||
@@ -171,20 +170,20 @@ static int request_frame(AVFilterLink *link) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf) | |||||
static int write_to_fifo(ASyncContext *s, AVFrame *buf) | |||||
{ | { | ||||
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, | int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, | ||||
buf->linesize[0], buf->audio->nb_samples); | |||||
avfilter_unref_buffer(buf); | |||||
buf->linesize[0], buf->nb_samples); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ASyncContext *s = ctx->priv; | ASyncContext *s = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); | |||||
int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout); | |||||
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : | int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : | ||||
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); | av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); | ||||
int out_size, ret; | int out_size, ret; | ||||
@@ -223,8 +222,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} | } | ||||
if (out_size > 0) { | if (out_size > 0) { | ||||
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, | |||||
out_size); | |||||
AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size); | |||||
if (!buf_out) { | if (!buf_out) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto fail; | goto fail; | ||||
@@ -266,11 +264,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
s->pts = pts - avresample_get_delay(s->avr); | s->pts = pts - avresample_get_delay(s->avr); | ||||
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, | ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, | ||||
buf->linesize[0], buf->audio->nb_samples); | |||||
buf->linesize[0], buf->nb_samples); | |||||
s->first_frame = 0; | s->first_frame = 0; | ||||
fail: | fail: | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -140,7 +140,7 @@ typedef struct { | |||||
// for managing AVFilterPad.request_frame and AVFilterPad.filter_frame | // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame | ||||
int request_fulfilled; | int request_fulfilled; | ||||
AVFilterBufferRef *dst_buffer; | |||||
AVFrame *dst_buffer; | |||||
uint8_t *dst; | uint8_t *dst; | ||||
uint8_t *dst_end; | uint8_t *dst_end; | ||||
uint64_t nsamples_in; | uint64_t nsamples_in; | ||||
@@ -177,7 +177,7 @@ static void yae_clear(ATempoContext *atempo) | |||||
atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2); | atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2); | ||||
atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2); | atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2); | ||||
avfilter_unref_bufferp(&atempo->dst_buffer); | |||||
av_frame_free(&atempo->dst_buffer); | |||||
atempo->dst = NULL; | atempo->dst = NULL; | ||||
atempo->dst_end = NULL; | atempo->dst_end = NULL; | ||||
@@ -1024,8 +1024,8 @@ static void push_samples(ATempoContext *atempo, | |||||
AVFilterLink *outlink, | AVFilterLink *outlink, | ||||
int n_out) | int n_out) | ||||
{ | { | ||||
atempo->dst_buffer->audio->sample_rate = outlink->sample_rate; | |||||
atempo->dst_buffer->audio->nb_samples = n_out; | |||||
atempo->dst_buffer->sample_rate = outlink->sample_rate; | |||||
atempo->dst_buffer->nb_samples = n_out; | |||||
// adjust the PTS: | // adjust the PTS: | ||||
atempo->dst_buffer->pts = | atempo->dst_buffer->pts = | ||||
@@ -1041,14 +1041,13 @@ static void push_samples(ATempoContext *atempo, | |||||
atempo->nsamples_out += n_out; | atempo->nsamples_out += n_out; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, | |||||
AVFilterBufferRef *src_buffer) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ATempoContext *atempo = ctx->priv; | ATempoContext *atempo = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
int n_in = src_buffer->audio->nb_samples; | |||||
int n_in = src_buffer->nb_samples; | |||||
int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); | int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo); | ||||
const uint8_t *src = src_buffer->data[0]; | const uint8_t *src = src_buffer->data[0]; | ||||
@@ -1056,10 +1055,8 @@ static int filter_frame(AVFilterLink *inlink, | |||||
while (src < src_end) { | while (src < src_end) { | ||||
if (!atempo->dst_buffer) { | if (!atempo->dst_buffer) { | ||||
atempo->dst_buffer = ff_get_audio_buffer(outlink, | |||||
AV_PERM_WRITE, | |||||
n_out); | |||||
avfilter_copy_buffer_ref_props(atempo->dst_buffer, src_buffer); | |||||
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out); | |||||
av_frame_copy_props(atempo->dst_buffer, src_buffer); | |||||
atempo->dst = atempo->dst_buffer->data[0]; | atempo->dst = atempo->dst_buffer->data[0]; | ||||
atempo->dst_end = atempo->dst + n_out * atempo->stride; | atempo->dst_end = atempo->dst + n_out * atempo->stride; | ||||
@@ -1074,7 +1071,7 @@ static int filter_frame(AVFilterLink *inlink, | |||||
} | } | ||||
atempo->nsamples_in += n_in; | atempo->nsamples_in += n_in; | ||||
avfilter_unref_bufferp(&src_buffer); | |||||
av_frame_free(&src_buffer); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1098,9 +1095,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
while (err == AVERROR(EAGAIN)) { | while (err == AVERROR(EAGAIN)) { | ||||
if (!atempo->dst_buffer) { | if (!atempo->dst_buffer) { | ||||
atempo->dst_buffer = ff_get_audio_buffer(outlink, | |||||
AV_PERM_WRITE, | |||||
n_max); | |||||
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max); | |||||
atempo->dst = atempo->dst_buffer->data[0]; | atempo->dst = atempo->dst_buffer->data[0]; | ||||
atempo->dst_end = atempo->dst + n_max * atempo->stride; | atempo->dst_end = atempo->dst + n_max * atempo->stride; | ||||
@@ -1116,7 +1111,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
} | } | ||||
} | } | ||||
avfilter_unref_bufferp(&atempo->dst_buffer); | |||||
av_frame_free(&atempo->dst_buffer); | |||||
atempo->dst = NULL; | atempo->dst = NULL; | ||||
atempo->dst_end = NULL; | atempo->dst_end = NULL; | ||||
@@ -1142,7 +1137,6 @@ static const AVFilterPad atempo_inputs[] = { | |||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -392,24 +392,24 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
BiquadsContext *p = inlink->dst->priv; | BiquadsContext *p = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out_buf; | |||||
int nb_samples = buf->audio->nb_samples; | |||||
AVFrame *out_buf; | |||||
int nb_samples = buf->nb_samples; | |||||
int ch; | int ch; | ||||
if (buf->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(buf)) { | |||||
out_buf = buf; | out_buf = buf; | ||||
} else { | } else { | ||||
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); | |||||
out_buf = ff_get_audio_buffer(inlink, nb_samples); | |||||
if (!out_buf) | if (!out_buf) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
out_buf->pts = buf->pts; | out_buf->pts = buf->pts; | ||||
} | } | ||||
for (ch = 0; ch < buf->audio->channels; ch++) | |||||
for (ch = 0; ch < buf->channels; ch++) | |||||
p->filter(buf->extended_data[ch], | p->filter(buf->extended_data[ch], | ||||
out_buf->extended_data[ch], nb_samples, | out_buf->extended_data[ch], nb_samples, | ||||
&p->cache[ch].i1, &p->cache[ch].i2, | &p->cache[ch].i1, &p->cache[ch].i2, | ||||
@@ -417,7 +417,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
p->b0, p->b1, p->b2, p->a1, p->a2); | p->b0, p->b1, p->b2, p->a1, p->a2); | ||||
if (buf != out_buf) | if (buf != out_buf) | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ff_filter_frame(outlink, out_buf); | return ff_filter_frame(outlink, out_buf); | ||||
} | } | ||||
@@ -312,7 +312,7 @@ static int channelmap_query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
@@ -330,7 +330,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
uint8_t **new_extended_data = | uint8_t **new_extended_data = | ||||
av_mallocz(nch_out * sizeof(*buf->extended_data)); | av_mallocz(nch_out * sizeof(*buf->extended_data)); | ||||
if (!new_extended_data) { | if (!new_extended_data) { | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
if (buf->extended_data == buf->data) { | if (buf->extended_data == buf->data) { | ||||
@@ -105,13 +105,13 @@ static int query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
int i, ret = 0; | int i, ret = 0; | ||||
for (i = 0; i < ctx->nb_outputs; i++) { | for (i = 0; i < ctx->nb_outputs; i++) { | ||||
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE); | |||||
AVFrame *buf_out = av_frame_clone(buf); | |||||
if (!buf_out) { | if (!buf_out) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
@@ -119,14 +119,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} | } | ||||
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; | buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; | ||||
buf_out->audio->channel_layout = | |||||
av_channel_layout_extract_channel(buf->audio->channel_layout, i); | |||||
buf_out->channel_layout = | |||||
av_channel_layout_extract_channel(buf->channel_layout, i); | |||||
ret = ff_filter_frame(ctx->outputs[i], buf_out); | ret = ff_filter_frame(ctx->outputs[i], buf_out); | ||||
if (ret < 0) | if (ret < 0) | ||||
break; | break; | ||||
} | } | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -109,18 +109,18 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in | |||||
return out; | return out; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
int16_t *taps, *endin, *in, *out; | int16_t *taps, *endin, *in, *out; | ||||
AVFilterBufferRef *outsamples = | |||||
ff_get_audio_buffer(inlink, AV_PERM_WRITE, | |||||
insamples->audio->nb_samples); | |||||
AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples); | |||||
int ret; | int ret; | ||||
if (!outsamples) | |||||
if (!outsamples) { | |||||
av_frame_free(&insamples); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
avfilter_copy_buffer_ref_props(outsamples, insamples); | |||||
} | |||||
av_frame_copy_props(outsamples, insamples); | |||||
taps = ((EarwaxContext *)inlink->dst->priv)->taps; | taps = ((EarwaxContext *)inlink->dst->priv)->taps; | ||||
out = (int16_t *)outsamples->data[0]; | out = (int16_t *)outsamples->data[0]; | ||||
@@ -131,14 +131,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
out = scalarproduct(taps, taps + NUMTAPS, out); | out = scalarproduct(taps, taps + NUMTAPS, out); | ||||
// process current input | // process current input | ||||
endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; | |||||
endin = in + insamples->nb_samples * 2 - NUMTAPS; | |||||
scalarproduct(in, endin, out); | scalarproduct(in, endin, out); | ||||
// save part of input for next round | // save part of input for next round | ||||
memcpy(taps, endin, NUMTAPS * sizeof(*taps)); | memcpy(taps, endin, NUMTAPS * sizeof(*taps)); | ||||
ret = ff_filter_frame(outlink, outsamples); | ret = ff_filter_frame(outlink, outsamples); | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -147,7 +147,6 @@ static const AVFilterPad earwax_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -56,24 +56,14 @@ typedef struct JoinContext { | |||||
/** | /** | ||||
* Temporary storage for input frames, until we get one on each input. | * Temporary storage for input frames, until we get one on each input. | ||||
*/ | */ | ||||
AVFilterBufferRef **input_frames; | |||||
AVFrame **input_frames; | |||||
/** | /** | ||||
* Temporary storage for data pointers, for assembling the output buffer. | |||||
* Temporary storage for buffer references, for assembling the output frame. | |||||
*/ | */ | ||||
uint8_t **data; | |||||
AVBufferRef **buffers; | |||||
} JoinContext; | } JoinContext; | ||||
/** | |||||
* To avoid copying the data from input buffers, this filter creates | |||||
* a custom output buffer that stores references to all inputs and | |||||
* unrefs them on free. | |||||
*/ | |||||
typedef struct JoinBufferPriv { | |||||
AVFilterBufferRef **in_buffers; | |||||
int nb_in_buffers; | |||||
} JoinBufferPriv; | |||||
#define OFFSET(x) offsetof(JoinContext, x) | #define OFFSET(x) offsetof(JoinContext, x) | ||||
#define A AV_OPT_FLAG_AUDIO_PARAM | #define A AV_OPT_FLAG_AUDIO_PARAM | ||||
#define F AV_OPT_FLAG_FILTERING_PARAM | #define F AV_OPT_FLAG_FILTERING_PARAM | ||||
@@ -94,7 +84,7 @@ static const AVClass join_class = { | |||||
.version = LIBAVUTIL_VERSION_INT, | .version = LIBAVUTIL_VERSION_INT, | ||||
}; | }; | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
JoinContext *s = ctx->priv; | JoinContext *s = ctx->priv; | ||||
@@ -105,7 +95,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) | |||||
break; | break; | ||||
av_assert0(i < ctx->nb_inputs); | av_assert0(i < ctx->nb_inputs); | ||||
av_assert0(!s->input_frames[i]); | av_assert0(!s->input_frames[i]); | ||||
s->input_frames[i] = buf; | |||||
s->input_frames[i] = frame; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -207,9 +197,9 @@ static int join_init(AVFilterContext *ctx, const char *args) | |||||
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); | s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); | ||||
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels); | s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels); | ||||
s->data = av_mallocz(sizeof(*s->data) * s->nb_channels); | |||||
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels); | |||||
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs); | s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs); | ||||
if (!s->channels || !s->data || !s->input_frames) { | |||||
if (!s->channels || !s->buffers|| !s->input_frames) { | |||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -248,11 +238,11 @@ static void join_uninit(AVFilterContext *ctx) | |||||
for (i = 0; i < ctx->nb_inputs; i++) { | for (i = 0; i < ctx->nb_inputs; i++) { | ||||
av_freep(&ctx->input_pads[i].name); | av_freep(&ctx->input_pads[i].name); | ||||
avfilter_unref_bufferp(&s->input_frames[i]); | |||||
av_frame_free(&s->input_frames[i]); | |||||
} | } | ||||
av_freep(&s->channels); | av_freep(&s->channels); | ||||
av_freep(&s->data); | |||||
av_freep(&s->buffers); | |||||
av_freep(&s->input_frames); | av_freep(&s->input_frames); | ||||
} | } | ||||
@@ -394,34 +384,14 @@ fail: | |||||
return ret; | return ret; | ||||
} | } | ||||
static void join_free_buffer(AVFilterBuffer *buf) | |||||
{ | |||||
JoinBufferPriv *priv = buf->priv; | |||||
if (priv) { | |||||
int i; | |||||
for (i = 0; i < priv->nb_in_buffers; i++) | |||||
avfilter_unref_bufferp(&priv->in_buffers[i]); | |||||
av_freep(&priv->in_buffers); | |||||
av_freep(&buf->priv); | |||||
} | |||||
if (buf->extended_data != buf->data) | |||||
av_freep(&buf->extended_data); | |||||
av_freep(&buf); | |||||
} | |||||
static int join_request_frame(AVFilterLink *outlink) | static int join_request_frame(AVFilterLink *outlink) | ||||
{ | { | ||||
AVFilterContext *ctx = outlink->src; | AVFilterContext *ctx = outlink->src; | ||||
JoinContext *s = ctx->priv; | JoinContext *s = ctx->priv; | ||||
AVFilterBufferRef *buf; | |||||
JoinBufferPriv *priv; | |||||
AVFrame *frame; | |||||
int linesize = INT_MAX; | int linesize = INT_MAX; | ||||
int perms = ~0; | |||||
int nb_samples = 0; | int nb_samples = 0; | ||||
int nb_buffers = 0; | |||||
int i, j, ret; | int i, j, ret; | ||||
/* get a frame on each input */ | /* get a frame on each input */ | ||||
@@ -434,54 +404,95 @@ static int join_request_frame(AVFilterLink *outlink) | |||||
/* request the same number of samples on all inputs */ | /* request the same number of samples on all inputs */ | ||||
if (i == 0) { | if (i == 0) { | ||||
nb_samples = s->input_frames[0]->audio->nb_samples; | |||||
nb_samples = s->input_frames[0]->nb_samples; | |||||
for (j = 1; !i && j < ctx->nb_inputs; j++) | for (j = 1; !i && j < ctx->nb_inputs; j++) | ||||
ctx->inputs[j]->request_samples = nb_samples; | ctx->inputs[j]->request_samples = nb_samples; | ||||
} | } | ||||
} | } | ||||
/* setup the output frame */ | |||||
frame = av_frame_alloc(); | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | |||||
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) { | |||||
frame->extended_data = av_mallocz(s->nb_channels * | |||||
sizeof(*frame->extended_data)); | |||||
if (!frame->extended_data) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
} | |||||
/* copy the data pointers */ | |||||
for (i = 0; i < s->nb_channels; i++) { | for (i = 0; i < s->nb_channels; i++) { | ||||
ChannelMap *ch = &s->channels[i]; | ChannelMap *ch = &s->channels[i]; | ||||
AVFilterBufferRef *cur_buf = s->input_frames[ch->input]; | |||||
s->data[i] = cur_buf->extended_data[ch->in_channel_idx]; | |||||
linesize = FFMIN(linesize, cur_buf->linesize[0]); | |||||
perms &= cur_buf->perms; | |||||
} | |||||
AVFrame *cur = s->input_frames[ch->input]; | |||||
AVBufferRef *buf; | |||||
av_assert0(nb_samples > 0); | |||||
buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms, | |||||
nb_samples, outlink->format, | |||||
outlink->channel_layout); | |||||
if (!buf) | |||||
return AVERROR(ENOMEM); | |||||
frame->extended_data[i] = cur->extended_data[ch->in_channel_idx]; | |||||
linesize = FFMIN(linesize, cur->linesize[0]); | |||||
buf->buf->free = join_free_buffer; | |||||
buf->pts = s->input_frames[0]->pts; | |||||
/* add the buffer where this plan is stored to the list if it's | |||||
* not already there */ | |||||
buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx); | |||||
if (!buf) { | |||||
ret = AVERROR(EINVAL); | |||||
goto fail; | |||||
} | |||||
for (j = 0; j < nb_buffers; j++) | |||||
if (s->buffers[j]->buffer == buf->buffer) | |||||
break; | |||||
if (j == i) | |||||
s->buffers[nb_buffers++] = buf; | |||||
} | |||||
if (!(priv = av_mallocz(sizeof(*priv)))) | |||||
goto fail; | |||||
if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs))) | |||||
goto fail; | |||||
/* create references to the buffers we copied to output */ | |||||
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) { | |||||
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf); | |||||
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * | |||||
frame->nb_extended_buf); | |||||
if (!frame->extended_buf) { | |||||
frame->nb_extended_buf = 0; | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
} | |||||
for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) { | |||||
frame->buf[i] = av_buffer_ref(s->buffers[i]); | |||||
if (!frame->buf[i]) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
} | |||||
for (i = 0; i < frame->nb_extended_buf; i++) { | |||||
frame->extended_buf[i] = av_buffer_ref(s->buffers[i + | |||||
FF_ARRAY_ELEMS(frame->buf)]); | |||||
if (!frame->extended_buf[i]) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
} | |||||
for (i = 0; i < ctx->nb_inputs; i++) | |||||
priv->in_buffers[i] = s->input_frames[i]; | |||||
priv->nb_in_buffers = ctx->nb_inputs; | |||||
buf->buf->priv = priv; | |||||
frame->nb_samples = nb_samples; | |||||
frame->channel_layout = outlink->channel_layout; | |||||
frame->sample_rate = outlink->sample_rate; | |||||
frame->pts = s->input_frames[0]->pts; | |||||
frame->linesize[0] = linesize; | |||||
if (frame->data != frame->extended_data) { | |||||
memcpy(frame->data, frame->extended_data, sizeof(*frame->data) * | |||||
FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels)); | |||||
} | |||||
ret = ff_filter_frame(outlink, buf); | |||||
ret = ff_filter_frame(outlink, frame); | |||||
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); | memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs); | ||||
return ret; | return ret; | ||||
fail: | fail: | ||||
avfilter_unref_buffer(buf); | |||||
if (priv) | |||||
av_freep(&priv->in_buffers); | |||||
av_freep(&priv); | |||||
return AVERROR(ENOMEM); | |||||
av_frame_free(&frame); | |||||
return ret; | |||||
} | } | ||||
static const AVFilterPad avfilter_af_join_outputs[] = { | static const AVFilterPad avfilter_af_join_outputs[] = { | ||||
@@ -353,21 +353,21 @@ static int config_props(AVFilterLink *link) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
int ret; | int ret; | ||||
int n = insamples->audio->nb_samples; | |||||
int n = insamples->nb_samples; | |||||
AVFilterLink *const outlink = inlink->dst->outputs[0]; | AVFilterLink *const outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n); | |||||
AVFrame *outsamples = ff_get_audio_buffer(outlink, n); | |||||
PanContext *pan = inlink->dst->priv; | PanContext *pan = inlink->dst->priv; | ||||
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n); | swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n); | ||||
avfilter_copy_buffer_ref_props(outsamples, insamples); | |||||
outsamples->audio->channel_layout = outlink->channel_layout; | |||||
outsamples->audio->channels = outlink->channels; | |||||
av_frame_copy_props(outsamples, insamples); | |||||
outsamples->channel_layout = outlink->channel_layout; | |||||
outsamples->channels = outlink->channels; | |||||
ret = ff_filter_frame(outlink, outsamples); | ret = ff_filter_frame(outlink, outsamples); | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -383,7 +383,6 @@ static const AVFilterPad pan_inputs[] = { | |||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -174,7 +174,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
/* flush the lavr delay buffer */ | /* flush the lavr delay buffer */ | ||||
if (ret == AVERROR_EOF && s->avr) { | if (ret == AVERROR_EOF && s->avr) { | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *frame; | |||||
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), | int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), | ||||
outlink->sample_rate, | outlink->sample_rate, | ||||
ctx->inputs[0]->sample_rate, | ctx->inputs[0]->sample_rate, | ||||
@@ -183,25 +183,25 @@ static int request_frame(AVFilterLink *outlink) | |||||
if (!nb_samples) | if (!nb_samples) | ||||
return ret; | return ret; | ||||
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); | |||||
if (!buf) | |||||
frame = ff_get_audio_buffer(outlink, nb_samples); | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
ret = avresample_convert(s->avr, buf->extended_data, | |||||
buf->linesize[0], nb_samples, | |||||
ret = avresample_convert(s->avr, frame->extended_data, | |||||
frame->linesize[0], nb_samples, | |||||
NULL, 0, 0); | NULL, 0, 0); | ||||
if (ret <= 0) { | if (ret <= 0) { | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&frame); | |||||
return (ret == 0) ? AVERROR_EOF : ret; | return (ret == 0) ? AVERROR_EOF : ret; | ||||
} | } | ||||
buf->pts = s->next_pts; | |||||
return ff_filter_frame(outlink, buf); | |||||
frame->pts = s->next_pts; | |||||
return ff_filter_frame(outlink, frame); | |||||
} | } | ||||
return ret; | return ret; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ResampleContext *s = ctx->priv; | ResampleContext *s = ctx->priv; | ||||
@@ -209,27 +209,26 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
int ret; | int ret; | ||||
if (s->avr) { | if (s->avr) { | ||||
AVFilterBufferRef *buf_out; | |||||
AVFrame *out; | |||||
int delay, nb_samples; | int delay, nb_samples; | ||||
/* maximum possible samples lavr can output */ | /* maximum possible samples lavr can output */ | ||||
delay = avresample_get_delay(s->avr); | delay = avresample_get_delay(s->avr); | ||||
nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay, | |||||
nb_samples = av_rescale_rnd(in->nb_samples + delay, | |||||
outlink->sample_rate, inlink->sample_rate, | outlink->sample_rate, inlink->sample_rate, | ||||
AV_ROUND_UP); | AV_ROUND_UP); | ||||
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); | |||||
if (!buf_out) { | |||||
out = ff_get_audio_buffer(outlink, nb_samples); | |||||
if (!out) { | |||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
ret = avresample_convert(s->avr, buf_out->extended_data, | |||||
buf_out->linesize[0], nb_samples, | |||||
buf->extended_data, buf->linesize[0], | |||||
buf->audio->nb_samples); | |||||
ret = avresample_convert(s->avr, out->extended_data, out->linesize[0], | |||||
nb_samples, in->extended_data, in->linesize[0], | |||||
in->nb_samples); | |||||
if (ret <= 0) { | if (ret <= 0) { | ||||
avfilter_unref_buffer(buf_out); | |||||
av_frame_free(&out); | |||||
if (ret < 0) | if (ret < 0) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -237,36 +236,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
av_assert0(!avresample_available(s->avr)); | av_assert0(!avresample_available(s->avr)); | ||||
if (s->next_pts == AV_NOPTS_VALUE) { | if (s->next_pts == AV_NOPTS_VALUE) { | ||||
if (buf->pts == AV_NOPTS_VALUE) { | |||||
if (in->pts == AV_NOPTS_VALUE) { | |||||
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " | av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, " | ||||
"assuming 0.\n"); | "assuming 0.\n"); | ||||
s->next_pts = 0; | s->next_pts = 0; | ||||
} else | } else | ||||
s->next_pts = av_rescale_q(buf->pts, inlink->time_base, | |||||
s->next_pts = av_rescale_q(in->pts, inlink->time_base, | |||||
outlink->time_base); | outlink->time_base); | ||||
} | } | ||||
if (ret > 0) { | if (ret > 0) { | ||||
buf_out->audio->nb_samples = ret; | |||||
if (buf->pts != AV_NOPTS_VALUE) { | |||||
buf_out->pts = av_rescale_q(buf->pts, inlink->time_base, | |||||
out->nb_samples = ret; | |||||
if (in->pts != AV_NOPTS_VALUE) { | |||||
out->pts = av_rescale_q(in->pts, inlink->time_base, | |||||
outlink->time_base) - | outlink->time_base) - | ||||
av_rescale(delay, outlink->sample_rate, | av_rescale(delay, outlink->sample_rate, | ||||
inlink->sample_rate); | inlink->sample_rate); | ||||
} else | } else | ||||
buf_out->pts = s->next_pts; | |||||
out->pts = s->next_pts; | |||||
s->next_pts = buf_out->pts + buf_out->audio->nb_samples; | |||||
s->next_pts = out->pts + out->nb_samples; | |||||
ret = ff_filter_frame(outlink, buf_out); | |||||
ret = ff_filter_frame(outlink, out); | |||||
s->got_output = 1; | s->got_output = 1; | ||||
} | } | ||||
fail: | fail: | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&in); | |||||
} else { | } else { | ||||
buf->format = outlink->format; | |||||
ret = ff_filter_frame(outlink, buf); | |||||
in->format = outlink->format; | |||||
ret = ff_filter_frame(outlink, in); | |||||
s->got_output = 1; | s->got_output = 1; | ||||
} | } | ||||
@@ -278,7 +277,6 @@ static const AVFilterPad avfilter_af_resample_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -70,20 +70,20 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
return 0; | return 0; | ||||
} | } | ||||
static char *get_metadata_val(AVFilterBufferRef *insamples, const char *key) | |||||
static char *get_metadata_val(AVFrame *insamples, const char *key) | |||||
{ | { | ||||
AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0); | AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0); | ||||
return e && e->value ? e->value : NULL; | return e && e->value ? e->value : NULL; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
int i; | int i; | ||||
SilenceDetectContext *silence = inlink->dst->priv; | SilenceDetectContext *silence = inlink->dst->priv; | ||||
const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); | const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); | ||||
const int srate = inlink->sample_rate; | const int srate = inlink->sample_rate; | ||||
const int nb_samples = insamples->audio->nb_samples * nb_channels; | |||||
const int64_t nb_samples_notify = srate * silence->duration * nb_channels; | |||||
const int nb_samples = insamples->nb_samples * nb_channels; | |||||
const int64_t nb_samples_notify = srate * silence->duration * nb_channels; | |||||
// scale number of null samples to the new sample rate | // scale number of null samples to the new sample rate | ||||
if (silence->last_sample_rate && silence->last_sample_rate != srate) | if (silence->last_sample_rate && silence->last_sample_rate != srate) | ||||
@@ -226,21 +226,21 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
VolumeContext *vol = inlink->dst->priv; | VolumeContext *vol = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
int nb_samples = buf->audio->nb_samples; | |||||
AVFilterBufferRef *out_buf; | |||||
int nb_samples = buf->nb_samples; | |||||
AVFrame *out_buf; | |||||
if (vol->volume == 1.0 || vol->volume_i == 256) | if (vol->volume == 1.0 || vol->volume_i == 256) | ||||
return ff_filter_frame(outlink, buf); | return ff_filter_frame(outlink, buf); | ||||
/* do volume scaling in-place if input buffer is writable */ | /* do volume scaling in-place if input buffer is writable */ | ||||
if (buf->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(buf)) { | |||||
out_buf = buf; | out_buf = buf; | ||||
} else { | } else { | ||||
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples); | |||||
out_buf = ff_get_audio_buffer(inlink, nb_samples); | |||||
if (!out_buf) | if (!out_buf) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
out_buf->pts = buf->pts; | out_buf->pts = buf->pts; | ||||
@@ -276,7 +276,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} | } | ||||
if (buf != out_buf) | if (buf != out_buf) | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ff_filter_frame(outlink, out_buf); | return ff_filter_frame(outlink, out_buf); | ||||
} | } | ||||
@@ -49,12 +49,12 @@ static int query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *samples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
VolDetectContext *vd = ctx->priv; | VolDetectContext *vd = ctx->priv; | ||||
int64_t layout = samples->audio->channel_layout; | |||||
int nb_samples = samples->audio->nb_samples; | |||||
int64_t layout = samples->channel_layout; | |||||
int nb_samples = samples->nb_samples; | |||||
int nb_channels = av_get_channel_layout_nb_channels(layout); | int nb_channels = av_get_channel_layout_nb_channels(layout); | ||||
int nb_planes = nb_channels; | int nb_planes = nb_channels; | ||||
int plane, i; | int plane, i; | ||||
@@ -137,7 +137,6 @@ static const AVFilterPad volumedetect_inputs[] = { | |||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.get_audio_buffer = ff_null_get_audio_buffer, | .get_audio_buffer = ff_null_get_audio_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -193,8 +193,8 @@ void avfilter_register_all(void) | |||||
* unconditionally */ | * unconditionally */ | ||||
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer); | REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer); | ||||
REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer); | REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer); | ||||
REGISTER_FILTER_UNCONDITIONAL(asink_abuffer); | |||||
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer); | |||||
//REGISTER_FILTER_UNCONDITIONAL(asink_abuffer); | |||||
//REGISTER_FILTER_UNCONDITIONAL(vsink_buffer); | |||||
REGISTER_FILTER_UNCONDITIONAL(af_afifo); | REGISTER_FILTER_UNCONDITIONAL(af_afifo); | ||||
REGISTER_FILTER_UNCONDITIONAL(vf_fifo); | REGISTER_FILTER_UNCONDITIONAL(vf_fifo); | ||||
} | } |
@@ -22,9 +22,9 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref) | |||||
static int null_filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
avfilter_unref_bufferp(&samplesref); | |||||
av_frame_free(&frame); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -212,14 +212,14 @@ static int query_formats(AVFilterContext *ctx) | |||||
static int request_frame(AVFilterLink *outlink) | static int request_frame(AVFilterLink *outlink) | ||||
{ | { | ||||
EvalContext *eval = outlink->src->priv; | EvalContext *eval = outlink->src->priv; | ||||
AVFilterBufferRef *samplesref; | |||||
AVFrame *samplesref; | |||||
int i, j; | int i, j; | ||||
double t = eval->n * (double)1/eval->sample_rate; | double t = eval->n * (double)1/eval->sample_rate; | ||||
if (eval->duration >= 0 && t >= eval->duration) | if (eval->duration >= 0 && t >= eval->duration) | ||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples); | |||||
samplesref = ff_get_audio_buffer(outlink, eval->nb_samples); | |||||
/* evaluate expression for each single sample and for each channel */ | /* evaluate expression for each single sample and for each channel */ | ||||
for (i = 0; i < eval->nb_samples; i++, eval->n++) { | for (i = 0; i < eval->nb_samples; i++, eval->n++) { | ||||
@@ -233,8 +233,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
} | } | ||||
samplesref->pts = eval->pts; | samplesref->pts = eval->pts; | ||||
samplesref->pos = -1; | |||||
samplesref->audio->sample_rate = eval->sample_rate; | |||||
samplesref->sample_rate = eval->sample_rate; | |||||
eval->pts += eval->nb_samples; | eval->pts += eval->nb_samples; | ||||
ff_filter_frame(outlink, samplesref); | ff_filter_frame(outlink, samplesref); | ||||
@@ -102,17 +102,15 @@ static int config_props(AVFilterLink *outlink) | |||||
static int request_frame(AVFilterLink *outlink) | static int request_frame(AVFilterLink *outlink) | ||||
{ | { | ||||
ANullContext *null = outlink->src->priv; | ANullContext *null = outlink->src->priv; | ||||
AVFilterBufferRef *samplesref; | |||||
AVFrame *samplesref; | |||||
samplesref = | |||||
ff_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples); | |||||
samplesref = ff_get_audio_buffer(outlink, null->nb_samples); | |||||
samplesref->pts = null->pts; | samplesref->pts = null->pts; | ||||
samplesref->pos = -1; | |||||
samplesref->audio->channel_layout = null->channel_layout; | |||||
samplesref->audio->sample_rate = outlink->sample_rate; | |||||
samplesref->channel_layout = null->channel_layout; | |||||
samplesref->sample_rate = outlink->sample_rate; | |||||
ff_filter_frame(outlink, avfilter_ref_buffer(samplesref, ~0)); | |||||
avfilter_unref_buffer(samplesref); | |||||
ff_filter_frame(outlink, av_frame_clone(samplesref)); | |||||
av_frame_free(&samplesref); | |||||
null->pts += null->nb_samples; | null->pts += null->nb_samples; | ||||
return 0; | return 0; | ||||
@@ -245,22 +245,22 @@ static int config_props(AVFilterLink *outlink) | |||||
static int request_frame(AVFilterLink *outlink) | static int request_frame(AVFilterLink *outlink) | ||||
{ | { | ||||
AVFilterBufferRef *samplesref; | |||||
AVFrame *samplesref; | |||||
FliteContext *flite = outlink->src->priv; | FliteContext *flite = outlink->src->priv; | ||||
int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples); | int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples); | ||||
if (!nb_samples) | if (!nb_samples) | ||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); | |||||
samplesref = ff_get_audio_buffer(outlink, nb_samples); | |||||
if (!samplesref) | if (!samplesref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
memcpy(samplesref->data[0], flite->wave_samples, | memcpy(samplesref->data[0], flite->wave_samples, | ||||
nb_samples * flite->wave->num_channels * 2); | nb_samples * flite->wave->num_channels * 2); | ||||
samplesref->pts = flite->pts; | samplesref->pts = flite->pts; | ||||
samplesref->pos = -1; | |||||
samplesref->audio->sample_rate = flite->wave->sample_rate; | |||||
av_frame_set_pkt_pos(samplesref, -1); | |||||
av_frame_set_sample_rate(samplesref, flite->wave->sample_rate); | |||||
flite->pts += nb_samples; | flite->pts += nb_samples; | ||||
flite->wave_samples += nb_samples * flite->wave->num_channels; | flite->wave_samples += nb_samples * flite->wave->num_channels; | ||||
flite->wave_nb_samples -= nb_samples; | flite->wave_nb_samples -= nb_samples; | ||||
@@ -22,6 +22,7 @@ | |||||
#include "libavutil/avassert.h" | #include "libavutil/avassert.h" | ||||
#include "libavutil/channel_layout.h" | #include "libavutil/channel_layout.h" | ||||
#include "libavutil/common.h" | #include "libavutil/common.h" | ||||
#include "libavcodec/avcodec.h" | |||||
#include "audio.h" | #include "audio.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
@@ -32,69 +33,70 @@ int avfilter_ref_get_channels(AVFilterBufferRef *ref) | |||||
return ref->audio ? ref->audio->channels : 0; | return ref->audio ? ref->audio->channels : 0; | ||||
} | } | ||||
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples) | |||||
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples) | |||||
{ | { | ||||
return ff_get_audio_buffer(link->dst->outputs[0], perms, nb_samples); | |||||
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples); | |||||
} | } | ||||
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples) | |||||
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples) | |||||
{ | { | ||||
AVFilterBufferRef *samplesref = NULL; | |||||
uint8_t **data; | |||||
int planar = av_sample_fmt_is_planar(link->format); | |||||
int nb_channels = link->channels; | |||||
int planes = planar ? nb_channels : 1; | |||||
int linesize; | |||||
int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE | | |||||
AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN; | |||||
av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES))); | |||||
if (!(data = av_mallocz(sizeof(*data) * planes))) | |||||
AVFrame *frame = av_frame_alloc(); | |||||
int channels = link->channels; | |||||
int buf_size, ret; | |||||
av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout)); | |||||
if (!frame) | |||||
return NULL; | |||||
buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples, | |||||
link->format, 0); | |||||
if (buf_size < 0) | |||||
goto fail; | goto fail; | ||||
if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0) | |||||
frame->buf[0] = av_buffer_alloc(buf_size); | |||||
if (!frame->buf[0]) | |||||
goto fail; | goto fail; | ||||
samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( | |||||
data, linesize, full_perms, nb_samples, link->format, | |||||
link->channels, link->channel_layout); | |||||
if (!samplesref) | |||||
frame->nb_samples = nb_samples; | |||||
ret = avcodec_fill_audio_frame(frame, channels, link->format, | |||||
frame->buf[0]->data, buf_size, 0); | |||||
if (ret < 0) | |||||
goto fail; | goto fail; | ||||
samplesref->audio->sample_rate = link->sample_rate; | |||||
av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, | |||||
link->format); | |||||
frame->nb_samples = nb_samples; | |||||
frame->format = link->format; | |||||
frame->channels = link->channels; | |||||
frame->channel_layout = link->channel_layout; | |||||
frame->sample_rate = link->sample_rate; | |||||
av_freep(&data); | |||||
return frame; | |||||
fail: | fail: | ||||
if (data) | |||||
av_freep(&data[0]); | |||||
av_freep(&data); | |||||
return samplesref; | |||||
av_buffer_unref(&frame->buf[0]); | |||||
av_frame_free(&frame); | |||||
return NULL; | |||||
} | } | ||||
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples) | |||||
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples) | |||||
{ | { | ||||
AVFilterBufferRef *ret = NULL; | |||||
AVFrame *ret = NULL; | |||||
if (link->dstpad->get_audio_buffer) | if (link->dstpad->get_audio_buffer) | ||||
ret = link->dstpad->get_audio_buffer(link, perms, nb_samples); | |||||
ret = link->dstpad->get_audio_buffer(link, nb_samples); | |||||
if (!ret) | if (!ret) | ||||
ret = ff_default_get_audio_buffer(link, perms, nb_samples); | |||||
if (ret) | |||||
ret->type = AVMEDIA_TYPE_AUDIO; | |||||
ret = ff_default_get_audio_buffer(link, nb_samples); | |||||
return ret; | return ret; | ||||
} | } | ||||
#if FF_API_AVFILTERBUFFER | |||||
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, | AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, | ||||
int linesize, | |||||
int perms, | |||||
int linesize,int perms, | |||||
int nb_samples, | int nb_samples, | ||||
enum AVSampleFormat sample_fmt, | enum AVSampleFormat sample_fmt, | ||||
int channels, | int channels, | ||||
@@ -179,3 +181,4 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, | |||||
nb_samples, sample_fmt, | nb_samples, sample_fmt, | ||||
channels, channel_layout); | channels, channel_layout); | ||||
} | } | ||||
#endif |
@@ -44,25 +44,21 @@ static const enum AVSampleFormat ff_planar_sample_fmts_array[] = { | |||||
}; | }; | ||||
/** default handler for get_audio_buffer() for audio inputs */ | /** default handler for get_audio_buffer() for audio inputs */ | ||||
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples); | |||||
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples); | |||||
/** get_audio_buffer() handler for filters which simply pass audio along */ | /** get_audio_buffer() handler for filters which simply pass audio along */ | ||||
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples); | |||||
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples); | |||||
/** | /** | ||||
* Request an audio samples buffer with a specific set of permissions. | * Request an audio samples buffer with a specific set of permissions. | ||||
* | * | ||||
* @param link the output link to the filter from which the buffer will | * @param link the output link to the filter from which the buffer will | ||||
* be requested | * be requested | ||||
* @param perms the required access permissions | |||||
* @param nb_samples the number of samples per channel | * @param nb_samples the number of samples per channel | ||||
* @return A reference to the samples. This must be unreferenced with | * @return A reference to the samples. This must be unreferenced with | ||||
* avfilter_unref_buffer when you are finished with it. | * avfilter_unref_buffer when you are finished with it. | ||||
*/ | */ | ||||
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms, | |||||
int nb_samples); | |||||
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples); | |||||
/** | /** | ||||
* Send a buffer of audio samples to the next filter. | * Send a buffer of audio samples to the next filter. | ||||
@@ -27,52 +27,6 @@ | |||||
#include "libavutil/avassert.h" | #include "libavutil/avassert.h" | ||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) | |||||
{ | |||||
dst->pts = src->pts; | |||||
dst->pos = av_frame_get_pkt_pos(src); | |||||
dst->format = src->format; | |||||
av_dict_free(&dst->metadata); | |||||
av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); | |||||
switch (dst->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
dst->video->w = src->width; | |||||
dst->video->h = src->height; | |||||
dst->video->sample_aspect_ratio = src->sample_aspect_ratio; | |||||
dst->video->interlaced = src->interlaced_frame; | |||||
dst->video->top_field_first = src->top_field_first; | |||||
dst->video->key_frame = src->key_frame; | |||||
dst->video->pict_type = src->pict_type; | |||||
av_freep(&dst->video->qp_table); | |||||
dst->video->qp_table_linesize = 0; | |||||
if (src->qscale_table) { | |||||
int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16; | |||||
dst->video->qp_table = av_malloc(qsize); | |||||
if (!dst->video->qp_table) | |||||
return AVERROR(ENOMEM); | |||||
dst->video->qp_table_linesize = src->qstride; | |||||
dst->video->qp_table_size = qsize; | |||||
memcpy(dst->video->qp_table, src->qscale_table, qsize); | |||||
} | |||||
break; | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
dst->audio->sample_rate = src->sample_rate; | |||||
dst->audio->channel_layout = src->channel_layout; | |||||
dst->audio->channels = src->channels; | |||||
if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) { | |||||
av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n"); | |||||
return AVERROR(EINVAL); | |||||
} | |||||
break; | |||||
default: | |||||
return AVERROR(EINVAL); | |||||
} | |||||
return 0; | |||||
} | |||||
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, | AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, | ||||
int perms) | int perms) | ||||
{ | { | ||||
@@ -31,22 +31,6 @@ | |||||
#include "libavcodec/avcodec.h" // AVFrame | #include "libavcodec/avcodec.h" // AVFrame | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
/** | |||||
* Copy the frame properties of src to dst, without copying the actual | |||||
* image data. | |||||
* | |||||
* @return 0 on success, a negative number on error. | |||||
*/ | |||||
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); | |||||
/** | |||||
* Copy the frame properties and data pointers of src to dst, without copying | |||||
* the actual data. | |||||
* | |||||
* @return 0 on success, a negative number on error. | |||||
*/ | |||||
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); | |||||
/** | /** | ||||
* Create and return a picref reference from the data and properties | * Create and return a picref reference from the data and properties | ||||
* contained in frame. | * contained in frame. | ||||
@@ -116,16 +100,4 @@ int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, | |||||
const AVFilterBufferRef *ref); | const AVFilterBufferRef *ref); | ||||
#endif | #endif | ||||
/** | |||||
* Add frame data to buffer_src. | |||||
* | |||||
* @param buffer_src pointer to a buffer source context | |||||
* @param frame a frame, or NULL to mark EOF | |||||
* @param flags a combination of AV_BUFFERSRC_FLAG_* | |||||
* @return >= 0 in case of success, a negative AVERROR code | |||||
* in case of failure | |||||
*/ | |||||
int av_buffersrc_add_frame(AVFilterContext *buffer_src, | |||||
const AVFrame *frame, int flags); | |||||
#endif /* AVFILTER_AVCODEC_H */ | #endif /* AVFILTER_AVCODEC_H */ |
@@ -157,7 +157,7 @@ static int config_output(AVFilterLink *outlink) | |||||
} | } | ||||
static void push_frame(AVFilterContext *ctx, unsigned in_no, | static void push_frame(AVFilterContext *ctx, unsigned in_no, | ||||
AVFilterBufferRef *buf) | |||||
AVFrame *buf) | |||||
{ | { | ||||
ConcatContext *cat = ctx->priv; | ConcatContext *cat = ctx->priv; | ||||
unsigned out_no = in_no % ctx->nb_outputs; | unsigned out_no = in_no % ctx->nb_outputs; | ||||
@@ -171,7 +171,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no, | |||||
/* add duration to input PTS */ | /* add duration to input PTS */ | ||||
if (inlink->sample_rate) | if (inlink->sample_rate) | ||||
/* use number of audio samples */ | /* use number of audio samples */ | ||||
in->pts += av_rescale_q(buf->audio->nb_samples, | |||||
in->pts += av_rescale_q(buf->nb_samples, | |||||
(AVRational){ 1, inlink->sample_rate }, | (AVRational){ 1, inlink->sample_rate }, | ||||
outlink->time_base); | outlink->time_base); | ||||
else if (in->nb_frames >= 2) | else if (in->nb_frames >= 2) | ||||
@@ -182,7 +182,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no, | |||||
ff_filter_frame(outlink, buf); | ff_filter_frame(outlink, buf); | ||||
} | } | ||||
static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static void process_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ConcatContext *cat = ctx->priv; | ConcatContext *cat = ctx->priv; | ||||
@@ -191,7 +191,7 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
if (in_no < cat->cur_idx) { | if (in_no < cat->cur_idx) { | ||||
av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", | av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n", | ||||
ctx->input_pads[in_no].name); | ctx->input_pads[in_no].name); | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
} else if (in_no >= cat->cur_idx + ctx->nb_outputs) { | } else if (in_no >= cat->cur_idx + ctx->nb_outputs) { | ||||
ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); | ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf); | ||||
} else { | } else { | ||||
@@ -199,27 +199,25 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} | } | ||||
} | } | ||||
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, | |||||
int w, int h) | |||||
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
unsigned in_no = FF_INLINK_IDX(inlink); | unsigned in_no = FF_INLINK_IDX(inlink); | ||||
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; | AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; | ||||
return ff_get_video_buffer(outlink, perms, w, h); | |||||
return ff_get_video_buffer(outlink, w, h); | |||||
} | } | ||||
static AVFilterBufferRef *get_audio_buffer(AVFilterLink *inlink, int perms, | |||||
int nb_samples) | |||||
static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
unsigned in_no = FF_INLINK_IDX(inlink); | unsigned in_no = FF_INLINK_IDX(inlink); | ||||
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; | AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs]; | ||||
return ff_get_audio_buffer(outlink, perms, nb_samples); | |||||
return ff_get_audio_buffer(outlink, nb_samples); | |||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
process_frame(inlink, buf); | process_frame(inlink, buf); | ||||
return 0; /* enhancement: handle error return */ | return 0; /* enhancement: handle error return */ | ||||
@@ -256,7 +254,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no) | |||||
int64_t nb_samples, sent = 0; | int64_t nb_samples, sent = 0; | ||||
int frame_nb_samples; | int frame_nb_samples; | ||||
AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate }; | AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate }; | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *buf; | |||||
int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); | int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout); | ||||
if (!rate_tb.den) | if (!rate_tb.den) | ||||
@@ -266,7 +264,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no) | |||||
frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */ | frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */ | ||||
while (nb_samples) { | while (nb_samples) { | ||||
frame_nb_samples = FFMIN(frame_nb_samples, nb_samples); | frame_nb_samples = FFMIN(frame_nb_samples, nb_samples); | ||||
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, frame_nb_samples); | |||||
buf = ff_get_audio_buffer(outlink, frame_nb_samples); | |||||
if (!buf) | if (!buf) | ||||
return; | return; | ||||
av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, | av_samples_set_silence(buf->extended_data, 0, frame_nb_samples, | ||||
@@ -360,7 +358,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
for (str = 0; str < cat->nb_streams[type]; str++) { | for (str = 0; str < cat->nb_streams[type]; str++) { | ||||
AVFilterPad pad = { | AVFilterPad pad = { | ||||
.type = type, | .type = type, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
.get_video_buffer = get_video_buffer, | .get_video_buffer = get_video_buffer, | ||||
.get_audio_buffer = get_audio_buffer, | .get_audio_buffer = get_audio_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
@@ -41,7 +41,7 @@ enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES }; | |||||
typedef struct { | typedef struct { | ||||
const AVClass *class; | const AVClass *class; | ||||
int w, h; | int w, h; | ||||
AVFilterBufferRef *outpicref; | |||||
AVFrame *outpicref; | |||||
int req_fullfilled; | int req_fullfilled; | ||||
int nb_display_channels; | int nb_display_channels; | ||||
int channel_height; | int channel_height; | ||||
@@ -122,7 +122,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_freep(&showspectrum->rdft_data[i]); | av_freep(&showspectrum->rdft_data[i]); | ||||
av_freep(&showspectrum->rdft_data); | av_freep(&showspectrum->rdft_data); | ||||
av_freep(&showspectrum->window_func_lut); | av_freep(&showspectrum->window_func_lut); | ||||
avfilter_unref_bufferp(&showspectrum->outpicref); | |||||
av_frame_free(&showspectrum->outpicref); | |||||
} | } | ||||
static int query_formats(AVFilterContext *ctx) | static int query_formats(AVFilterContext *ctx) | ||||
@@ -179,7 +179,7 @@ static int config_output(AVFilterLink *outlink) | |||||
/* (re-)configuration if the video output changed (or first init) */ | /* (re-)configuration if the video output changed (or first init) */ | ||||
if (rdft_bits != showspectrum->rdft_bits) { | if (rdft_bits != showspectrum->rdft_bits) { | ||||
size_t rdft_size, rdft_listsize; | size_t rdft_size, rdft_listsize; | ||||
AVFilterBufferRef *outpicref; | |||||
AVFrame *outpicref; | |||||
av_rdft_end(showspectrum->rdft); | av_rdft_end(showspectrum->rdft); | ||||
showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C); | showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C); | ||||
@@ -219,10 +219,9 @@ static int config_output(AVFilterLink *outlink) | |||||
showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1))); | showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1))); | ||||
/* prepare the initial picref buffer (black frame) */ | /* prepare the initial picref buffer (black frame) */ | ||||
avfilter_unref_bufferp(&showspectrum->outpicref); | |||||
av_frame_free(&showspectrum->outpicref); | |||||
showspectrum->outpicref = outpicref = | showspectrum->outpicref = outpicref = | ||||
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, | |||||
outlink->w, outlink->h); | |||||
ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpicref) | if (!outpicref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
outlink->sample_aspect_ratio = (AVRational){1,1}; | outlink->sample_aspect_ratio = (AVRational){1,1}; | ||||
@@ -253,7 +252,7 @@ inline static void push_frame(AVFilterLink *outlink) | |||||
showspectrum->filled = 0; | showspectrum->filled = 0; | ||||
showspectrum->req_fullfilled = 1; | showspectrum->req_fullfilled = 1; | ||||
ff_filter_frame(outlink, avfilter_ref_buffer(showspectrum->outpicref, ~AV_PERM_WRITE)); | |||||
ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref)); | |||||
} | } | ||||
static int request_frame(AVFilterLink *outlink) | static int request_frame(AVFilterLink *outlink) | ||||
@@ -272,12 +271,12 @@ static int request_frame(AVFilterLink *outlink) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insamples, int nb_samples) | |||||
static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
ShowSpectrumContext *showspectrum = ctx->priv; | ShowSpectrumContext *showspectrum = ctx->priv; | ||||
AVFilterBufferRef *outpicref = showspectrum->outpicref; | |||||
AVFrame *outpicref = showspectrum->outpicref; | |||||
/* nb_freq contains the power of two superior or equal to the output image | /* nb_freq contains the power of two superior or equal to the output image | ||||
* height (or half the RDFT window size) */ | * height (or half the RDFT window size) */ | ||||
@@ -462,11 +461,11 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insampl | |||||
return add_samples; | return add_samples; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
ShowSpectrumContext *showspectrum = ctx->priv; | ShowSpectrumContext *showspectrum = ctx->priv; | ||||
int left_samples = insamples->audio->nb_samples; | |||||
int left_samples = insamples->nb_samples; | |||||
showspectrum->consumed = 0; | showspectrum->consumed = 0; | ||||
while (left_samples) { | while (left_samples) { | ||||
@@ -475,7 +474,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
left_samples -= added_samples; | left_samples -= added_samples; | ||||
} | } | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -484,7 +483,6 @@ static const AVFilterPad showspectrum_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -44,7 +44,7 @@ typedef struct { | |||||
char *rate_str; | char *rate_str; | ||||
AVRational rate; | AVRational rate; | ||||
int buf_idx; | int buf_idx; | ||||
AVFilterBufferRef *outpicref; | |||||
AVFrame *outpicref; | |||||
int req_fullfilled; | int req_fullfilled; | ||||
int n; | int n; | ||||
int sample_count_mod; | int sample_count_mod; | ||||
@@ -89,7 +89,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
ShowWavesContext *showwaves = ctx->priv; | ShowWavesContext *showwaves = ctx->priv; | ||||
av_freep(&showwaves->rate_str); | av_freep(&showwaves->rate_str); | ||||
avfilter_unref_bufferp(&showwaves->outpicref); | |||||
av_frame_free(&showwaves->outpicref); | |||||
} | } | ||||
static int query_formats(AVFilterContext *ctx) | static int query_formats(AVFilterContext *ctx) | ||||
@@ -190,16 +190,16 @@ static int request_frame(AVFilterLink *outlink) | |||||
#define MAX_INT16 ((1<<15) -1) | #define MAX_INT16 ((1<<15) -1) | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
ShowWavesContext *showwaves = ctx->priv; | ShowWavesContext *showwaves = ctx->priv; | ||||
const int nb_samples = insamples->audio->nb_samples; | |||||
AVFilterBufferRef *outpicref = showwaves->outpicref; | |||||
const int nb_samples = insamples->nb_samples; | |||||
AVFrame *outpicref = showwaves->outpicref; | |||||
int linesize = outpicref ? outpicref->linesize[0] : 0; | int linesize = outpicref ? outpicref->linesize[0] : 0; | ||||
int16_t *p = (int16_t *)insamples->data[0]; | int16_t *p = (int16_t *)insamples->data[0]; | ||||
int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout); | |||||
int nb_channels = av_get_channel_layout_nb_channels(insamples->channel_layout); | |||||
int i, j, k, h, ret = 0; | int i, j, k, h, ret = 0; | ||||
const int n = showwaves->n; | const int n = showwaves->n; | ||||
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ | const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ | ||||
@@ -208,12 +208,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
for (i = 0; i < nb_samples; i++) { | for (i = 0; i < nb_samples; i++) { | ||||
if (!showwaves->outpicref) { | if (!showwaves->outpicref) { | ||||
showwaves->outpicref = outpicref = | showwaves->outpicref = outpicref = | ||||
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, | |||||
outlink->w, outlink->h); | |||||
ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpicref) | if (!outpicref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
outpicref->video->w = outlink->w; | |||||
outpicref->video->h = outlink->h; | |||||
outpicref->width = outlink->w; | |||||
outpicref->height = outlink->h; | |||||
outpicref->pts = insamples->pts + | outpicref->pts = insamples->pts + | ||||
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, | av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, | ||||
(AVRational){ 1, inlink->sample_rate }, | (AVRational){ 1, inlink->sample_rate }, | ||||
@@ -251,7 +250,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
outpicref = showwaves->outpicref; | outpicref = showwaves->outpicref; | ||||
} | } | ||||
avfilter_unref_buffer(insamples); | |||||
av_frame_free(&insamples); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -260,7 +259,6 @@ static const AVFilterPad showwaves_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -34,43 +34,31 @@ | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "audio.h" | #include "audio.h" | ||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame); | |||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame); | |||||
char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms) | |||||
{ | |||||
snprintf(buf, buf_size, "%s%s%s%s%s%s", | |||||
perms & AV_PERM_READ ? "r" : "", | |||||
perms & AV_PERM_WRITE ? "w" : "", | |||||
perms & AV_PERM_PRESERVE ? "p" : "", | |||||
perms & AV_PERM_REUSE ? "u" : "", | |||||
perms & AV_PERM_REUSE2 ? "U" : "", | |||||
perms & AV_PERM_NEG_LINESIZES ? "n" : ""); | |||||
return buf; | |||||
} | |||||
void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end) | |||||
void ff_tlog_ref(void *ctx, AVFrame *ref, int end) | |||||
{ | { | ||||
av_unused char buf[16]; | av_unused char buf[16]; | ||||
ff_tlog(ctx, | ff_tlog(ctx, | ||||
"ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, | |||||
ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0], | |||||
"ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, | |||||
ref, ref->buf, ref->data[0], | |||||
ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], | ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], | ||||
ref->pts, ref->pos); | |||||
ref->pts, av_frame_get_pkt_pos(ref)); | |||||
if (ref->video) { | |||||
if (ref->width) { | |||||
ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", | ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", | ||||
ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den, | |||||
ref->video->w, ref->video->h, | |||||
!ref->video->interlaced ? 'P' : /* Progressive */ | |||||
ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ | |||||
ref->video->key_frame, | |||||
av_get_picture_type_char(ref->video->pict_type)); | |||||
ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den, | |||||
ref->width, ref->height, | |||||
!ref->interlaced_frame ? 'P' : /* Progressive */ | |||||
ref->top_field_first ? 'T' : 'B', /* Top / Bottom */ | |||||
ref->key_frame, | |||||
av_get_picture_type_char(ref->pict_type)); | |||||
} | } | ||||
if (ref->audio) { | |||||
if (ref->nb_samples) { | |||||
ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", | ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", | ||||
ref->audio->channel_layout, | |||||
ref->audio->nb_samples, | |||||
ref->audio->sample_rate); | |||||
ref->channel_layout, | |||||
ref->nb_samples, | |||||
ref->sample_rate); | |||||
} | } | ||||
ff_tlog(ctx, "]%s", end ? "\n" : ""); | ff_tlog(ctx, "]%s", end ? "\n" : ""); | ||||
@@ -158,10 +146,7 @@ void avfilter_link_free(AVFilterLink **link) | |||||
if (!*link) | if (!*link) | ||||
return; | return; | ||||
if ((*link)->pool) | |||||
ff_free_pool((*link)->pool); | |||||
avfilter_unref_bufferp(&(*link)->partial_buf); | |||||
av_frame_free(&(*link)->partial_buf); | |||||
av_freep(link); | av_freep(link); | ||||
} | } | ||||
@@ -342,7 +327,7 @@ int ff_request_frame(AVFilterLink *link) | |||||
else if (link->src->inputs[0]) | else if (link->src->inputs[0]) | ||||
ret = ff_request_frame(link->src->inputs[0]); | ret = ff_request_frame(link->src->inputs[0]); | ||||
if (ret == AVERROR_EOF && link->partial_buf) { | if (ret == AVERROR_EOF && link->partial_buf) { | ||||
AVFilterBufferRef *pbuf = link->partial_buf; | |||||
AVFrame *pbuf = link->partial_buf; | |||||
link->partial_buf = NULL; | link->partial_buf = NULL; | ||||
ff_filter_frame_framed(link, pbuf); | ff_filter_frame_framed(link, pbuf); | ||||
return 0; | return 0; | ||||
@@ -633,76 +618,64 @@ enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx) | |||||
return pads[pad_idx].type; | return pads[pad_idx].type; | ||||
} | } | ||||
static int default_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
static int default_filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
return ff_filter_frame(link->dst->outputs[0], frame); | return ff_filter_frame(link->dst->outputs[0], frame); | ||||
} | } | ||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
int (*filter_frame)(AVFilterLink *, AVFilterBufferRef *); | |||||
int (*filter_frame)(AVFilterLink *, AVFrame *); | |||||
AVFilterPad *src = link->srcpad; | AVFilterPad *src = link->srcpad; | ||||
AVFilterPad *dst = link->dstpad; | AVFilterPad *dst = link->dstpad; | ||||
AVFilterBufferRef *out; | |||||
int perms, ret; | |||||
AVFrame *out; | |||||
int ret; | |||||
AVFilterCommand *cmd= link->dst->command_queue; | AVFilterCommand *cmd= link->dst->command_queue; | ||||
int64_t pts; | int64_t pts; | ||||
if (link->closed) { | if (link->closed) { | ||||
avfilter_unref_buffer(frame); | |||||
av_frame_free(&frame); | |||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
} | } | ||||
if (!(filter_frame = dst->filter_frame)) | if (!(filter_frame = dst->filter_frame)) | ||||
filter_frame = default_filter_frame; | filter_frame = default_filter_frame; | ||||
av_assert1((frame->perms & src->min_perms) == src->min_perms); | |||||
frame->perms &= ~ src->rej_perms; | |||||
perms = frame->perms; | |||||
if (frame->linesize[0] < 0) | |||||
perms |= AV_PERM_NEG_LINESIZES; | |||||
/* prepare to copy the frame if the buffer has insufficient permissions */ | |||||
if ((dst->min_perms & perms) != dst->min_perms || | |||||
dst->rej_perms & perms) { | |||||
av_log(link->dst, AV_LOG_DEBUG, | |||||
"Copying data in avfilter (have perms %x, need %x, reject %x)\n", | |||||
perms, link->dstpad->min_perms, link->dstpad->rej_perms); | |||||
/* copy the frame if needed */ | |||||
if (dst->needs_writable && !av_frame_is_writable(frame)) { | |||||
av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); | |||||
/* Maybe use ff_copy_buffer_ref instead? */ | /* Maybe use ff_copy_buffer_ref instead? */ | ||||
switch (link->type) { | switch (link->type) { | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
out = ff_get_video_buffer(link, dst->min_perms, | |||||
link->w, link->h); | |||||
out = ff_get_video_buffer(link, link->w, link->h); | |||||
break; | break; | ||||
case AVMEDIA_TYPE_AUDIO: | case AVMEDIA_TYPE_AUDIO: | ||||
out = ff_get_audio_buffer(link, dst->min_perms, | |||||
frame->audio->nb_samples); | |||||
out = ff_get_audio_buffer(link, frame->nb_samples); | |||||
break; | break; | ||||
default: return AVERROR(EINVAL); | default: return AVERROR(EINVAL); | ||||
} | } | ||||
if (!out) { | if (!out) { | ||||
avfilter_unref_buffer(frame); | |||||
av_frame_free(&frame); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, frame); | |||||
av_frame_copy_props(out, frame); | |||||
switch (link->type) { | switch (link->type) { | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, | av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, | ||||
frame->format, frame->video->w, frame->video->h); | |||||
frame->format, frame->width, frame->height); | |||||
break; | break; | ||||
case AVMEDIA_TYPE_AUDIO: | case AVMEDIA_TYPE_AUDIO: | ||||
av_samples_copy(out->extended_data, frame->extended_data, | av_samples_copy(out->extended_data, frame->extended_data, | ||||
0, 0, frame->audio->nb_samples, | |||||
av_get_channel_layout_nb_channels(frame->audio->channel_layout), | |||||
0, 0, frame->nb_samples, | |||||
av_get_channel_layout_nb_channels(frame->channel_layout), | |||||
frame->format); | frame->format); | ||||
break; | break; | ||||
default: return AVERROR(EINVAL); | default: return AVERROR(EINVAL); | ||||
} | } | ||||
avfilter_unref_buffer(frame); | |||||
av_frame_free(&frame); | |||||
} else | } else | ||||
out = frame; | out = frame; | ||||
@@ -721,48 +694,47 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
int insamples = frame->audio->nb_samples, inpos = 0, nb_samples; | |||||
AVFilterBufferRef *pbuf = link->partial_buf; | |||||
int nb_channels = frame->audio->channels; | |||||
int insamples = frame->nb_samples, inpos = 0, nb_samples; | |||||
AVFrame *pbuf = link->partial_buf; | |||||
int nb_channels = frame->channels; | |||||
int ret = 0; | int ret = 0; | ||||
/* Handle framing (min_samples, max_samples) */ | /* Handle framing (min_samples, max_samples) */ | ||||
while (insamples) { | while (insamples) { | ||||
if (!pbuf) { | if (!pbuf) { | ||||
AVRational samples_tb = { 1, link->sample_rate }; | AVRational samples_tb = { 1, link->sample_rate }; | ||||
int perms = link->dstpad->min_perms | AV_PERM_WRITE; | |||||
pbuf = ff_get_audio_buffer(link, perms, link->partial_buf_size); | |||||
pbuf = ff_get_audio_buffer(link, link->partial_buf_size); | |||||
if (!pbuf) { | if (!pbuf) { | ||||
av_log(link->dst, AV_LOG_WARNING, | av_log(link->dst, AV_LOG_WARNING, | ||||
"Samples dropped due to memory allocation failure.\n"); | "Samples dropped due to memory allocation failure.\n"); | ||||
return 0; | return 0; | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(pbuf, frame); | |||||
av_frame_copy_props(pbuf, frame); | |||||
pbuf->pts = frame->pts + | pbuf->pts = frame->pts + | ||||
av_rescale_q(inpos, samples_tb, link->time_base); | av_rescale_q(inpos, samples_tb, link->time_base); | ||||
pbuf->audio->nb_samples = 0; | |||||
pbuf->nb_samples = 0; | |||||
} | } | ||||
nb_samples = FFMIN(insamples, | nb_samples = FFMIN(insamples, | ||||
link->partial_buf_size - pbuf->audio->nb_samples); | |||||
link->partial_buf_size - pbuf->nb_samples); | |||||
av_samples_copy(pbuf->extended_data, frame->extended_data, | av_samples_copy(pbuf->extended_data, frame->extended_data, | ||||
pbuf->audio->nb_samples, inpos, | |||||
pbuf->nb_samples, inpos, | |||||
nb_samples, nb_channels, link->format); | nb_samples, nb_channels, link->format); | ||||
inpos += nb_samples; | inpos += nb_samples; | ||||
insamples -= nb_samples; | insamples -= nb_samples; | ||||
pbuf->audio->nb_samples += nb_samples; | |||||
if (pbuf->audio->nb_samples >= link->min_samples) { | |||||
pbuf->nb_samples += nb_samples; | |||||
if (pbuf->nb_samples >= link->min_samples) { | |||||
ret = ff_filter_frame_framed(link, pbuf); | ret = ff_filter_frame_framed(link, pbuf); | ||||
pbuf = NULL; | pbuf = NULL; | ||||
} | } | ||||
} | } | ||||
avfilter_unref_buffer(frame); | |||||
av_frame_free(&frame); | |||||
link->partial_buf = pbuf; | link->partial_buf = pbuf; | ||||
return ret; | return ret; | ||||
} | } | ||||
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
int ff_filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); | FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); | ||||
@@ -770,22 +742,22 @@ int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
if (link->type == AVMEDIA_TYPE_VIDEO) { | if (link->type == AVMEDIA_TYPE_VIDEO) { | ||||
if (strcmp(link->dst->filter->name, "scale")) { | if (strcmp(link->dst->filter->name, "scale")) { | ||||
av_assert1(frame->format == link->format); | av_assert1(frame->format == link->format); | ||||
av_assert1(frame->video->w == link->w); | |||||
av_assert1(frame->video->h == link->h); | |||||
av_assert1(frame->width == link->w); | |||||
av_assert1(frame->height == link->h); | |||||
} | } | ||||
} else { | } else { | ||||
av_assert1(frame->format == link->format); | av_assert1(frame->format == link->format); | ||||
av_assert1(frame->audio->channels == link->channels); | |||||
av_assert1(frame->audio->channel_layout == link->channel_layout); | |||||
av_assert1(frame->audio->sample_rate == link->sample_rate); | |||||
av_assert1(frame->channels == link->channels); | |||||
av_assert1(frame->channel_layout == link->channel_layout); | |||||
av_assert1(frame->sample_rate == link->sample_rate); | |||||
} | } | ||||
/* Go directly to actual filtering if possible */ | /* Go directly to actual filtering if possible */ | ||||
if (link->type == AVMEDIA_TYPE_AUDIO && | if (link->type == AVMEDIA_TYPE_AUDIO && | ||||
link->min_samples && | link->min_samples && | ||||
(link->partial_buf || | (link->partial_buf || | ||||
frame->audio->nb_samples < link->min_samples || | |||||
frame->audio->nb_samples > link->max_samples)) { | |||||
frame->nb_samples < link->min_samples || | |||||
frame->nb_samples > link->max_samples)) { | |||||
return ff_filter_frame_needs_framing(link, frame); | return ff_filter_frame_needs_framing(link, frame); | ||||
} else { | } else { | ||||
return ff_filter_frame_framed(link, frame); | return ff_filter_frame_framed(link, frame); | ||||
@@ -37,6 +37,7 @@ | |||||
#include "libavutil/avutil.h" | #include "libavutil/avutil.h" | ||||
#include "libavutil/dict.h" | #include "libavutil/dict.h" | ||||
#include "libavutil/frame.h" | |||||
#include "libavutil/log.h" | #include "libavutil/log.h" | ||||
#include "libavutil/samplefmt.h" | #include "libavutil/samplefmt.h" | ||||
#include "libavutil/pixfmt.h" | #include "libavutil/pixfmt.h" | ||||
@@ -69,6 +70,7 @@ typedef struct AVFilterLink AVFilterLink; | |||||
typedef struct AVFilterPad AVFilterPad; | typedef struct AVFilterPad AVFilterPad; | ||||
typedef struct AVFilterFormats AVFilterFormats; | typedef struct AVFilterFormats AVFilterFormats; | ||||
#if FF_API_AVFILTERBUFFER | |||||
/** | /** | ||||
* A reference-counted buffer data type used by the filter system. Filters | * A reference-counted buffer data type used by the filter system. Filters | ||||
* should not store pointers to this structure directly, but instead use the | * should not store pointers to this structure directly, but instead use the | ||||
@@ -200,6 +202,7 @@ typedef struct AVFilterBufferRef { | |||||
/** | /** | ||||
* Copy properties of src to dst, without copying the actual data | * Copy properties of src to dst, without copying the actual data | ||||
*/ | */ | ||||
attribute_deprecated | |||||
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); | void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); | ||||
/** | /** | ||||
@@ -211,6 +214,7 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s | |||||
* @return a new reference to the buffer with the same properties as the | * @return a new reference to the buffer with the same properties as the | ||||
* old, excluding any permissions denied by pmask | * old, excluding any permissions denied by pmask | ||||
*/ | */ | ||||
attribute_deprecated | |||||
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); | AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); | ||||
/** | /** | ||||
@@ -222,6 +226,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); | |||||
* @note it is recommended to use avfilter_unref_bufferp() instead of this | * @note it is recommended to use avfilter_unref_bufferp() instead of this | ||||
* function | * function | ||||
*/ | */ | ||||
attribute_deprecated | |||||
void avfilter_unref_buffer(AVFilterBufferRef *ref); | void avfilter_unref_buffer(AVFilterBufferRef *ref); | ||||
/** | /** | ||||
@@ -231,11 +236,14 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref); | |||||
* | * | ||||
* @param ref pointer to the buffer reference | * @param ref pointer to the buffer reference | ||||
*/ | */ | ||||
attribute_deprecated | |||||
void avfilter_unref_bufferp(AVFilterBufferRef **ref); | void avfilter_unref_bufferp(AVFilterBufferRef **ref); | ||||
#endif | |||||
/** | /** | ||||
* Get the number of channels of a buffer reference. | * Get the number of channels of a buffer reference. | ||||
*/ | */ | ||||
attribute_deprecated | |||||
int avfilter_ref_get_channels(AVFilterBufferRef *ref); | int avfilter_ref_get_channels(AVFilterBufferRef *ref); | ||||
#if FF_API_AVFILTERPAD_PUBLIC | #if FF_API_AVFILTERPAD_PUBLIC | ||||
@@ -273,7 +281,7 @@ struct AVFilterPad { | |||||
* link must have at least these permissions; this fact is checked by | * link must have at least these permissions; this fact is checked by | ||||
* asserts. It can be used to optimize buffer allocation. | * asserts. It can be used to optimize buffer allocation. | ||||
*/ | */ | ||||
int min_perms; | |||||
attribute_deprecated int min_perms; | |||||
/** | /** | ||||
* Input pads: | * Input pads: | ||||
@@ -287,7 +295,7 @@ struct AVFilterPad { | |||||
* Permissions which are automatically removed on outgoing buffers. It | * Permissions which are automatically removed on outgoing buffers. It | ||||
* can be used to optimize buffer allocation. | * can be used to optimize buffer allocation. | ||||
*/ | */ | ||||
int rej_perms; | |||||
attribute_deprecated int rej_perms; | |||||
/** | /** | ||||
* @deprecated unused | * @deprecated unused | ||||
@@ -300,7 +308,7 @@ struct AVFilterPad { | |||||
* | * | ||||
* Input video pads only. | * Input video pads only. | ||||
*/ | */ | ||||
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h); | |||||
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); | |||||
/** | /** | ||||
* Callback function to get an audio buffer. If NULL, the filter system will | * Callback function to get an audio buffer. If NULL, the filter system will | ||||
@@ -308,8 +316,7 @@ struct AVFilterPad { | |||||
* | * | ||||
* Input audio pads only. | * Input audio pads only. | ||||
*/ | */ | ||||
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, | |||||
int nb_samples); | |||||
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); | |||||
/** | /** | ||||
* @deprecated unused | * @deprecated unused | ||||
@@ -331,7 +338,7 @@ struct AVFilterPad { | |||||
* must ensure that frame is properly unreferenced on error if it | * must ensure that frame is properly unreferenced on error if it | ||||
* hasn't been passed on to another filter. | * hasn't been passed on to another filter. | ||||
*/ | */ | ||||
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); | |||||
int (*filter_frame)(AVFilterLink *link, AVFrame *frame); | |||||
/** | /** | ||||
* Frame poll callback. This returns the number of immediately available | * Frame poll callback. This returns the number of immediately available | ||||
@@ -381,6 +388,8 @@ struct AVFilterPad { | |||||
* input pads only. | * input pads only. | ||||
*/ | */ | ||||
int needs_fifo; | int needs_fifo; | ||||
int needs_writable; | |||||
}; | }; | ||||
#endif | #endif | ||||
@@ -616,7 +625,7 @@ struct AVFilterLink { | |||||
/** | /** | ||||
* Buffer partially filled with samples to achieve a fixed/minimum size. | * Buffer partially filled with samples to achieve a fixed/minimum size. | ||||
*/ | */ | ||||
AVFilterBufferRef *partial_buf; | |||||
AVFrame *partial_buf; | |||||
/** | /** | ||||
* Size of the partial buffer to allocate. | * Size of the partial buffer to allocate. | ||||
@@ -701,6 +710,7 @@ void avfilter_link_set_closed(AVFilterLink *link, int closed); | |||||
*/ | */ | ||||
int avfilter_config_links(AVFilterContext *filter); | int avfilter_config_links(AVFilterContext *filter); | ||||
#if FF_API_AVFILTERBUFFER | |||||
/** | /** | ||||
* Create a buffer reference wrapped around an already allocated image | * Create a buffer reference wrapped around an already allocated image | ||||
* buffer. | * buffer. | ||||
@@ -712,6 +722,7 @@ int avfilter_config_links(AVFilterContext *filter); | |||||
* @param h the height of the image specified by the data and linesize arrays | * @param h the height of the image specified by the data and linesize arrays | ||||
* @param format the pixel format of the image specified by the data and linesize arrays | * @param format the pixel format of the image specified by the data and linesize arrays | ||||
*/ | */ | ||||
attribute_deprecated | |||||
AVFilterBufferRef * | AVFilterBufferRef * | ||||
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, | avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, | ||||
int w, int h, enum AVPixelFormat format); | int w, int h, enum AVPixelFormat format); | ||||
@@ -730,6 +741,7 @@ avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int lin | |||||
* @param sample_fmt the format of each sample in the buffer to allocate | * @param sample_fmt the format of each sample in the buffer to allocate | ||||
* @param channel_layout the channel layout of the buffer | * @param channel_layout the channel layout of the buffer | ||||
*/ | */ | ||||
attribute_deprecated | |||||
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, | AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, | ||||
int linesize, | int linesize, | ||||
int perms, | int perms, | ||||
@@ -749,6 +761,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, | |||||
* @param channel_layout the channel layout of the buffer, | * @param channel_layout the channel layout of the buffer, | ||||
* must be either 0 or consistent with channels | * must be either 0 or consistent with channels | ||||
*/ | */ | ||||
attribute_deprecated | |||||
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, | AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, | ||||
int linesize, | int linesize, | ||||
int perms, | int perms, | ||||
@@ -757,6 +770,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t ** | |||||
int channels, | int channels, | ||||
uint64_t channel_layout); | uint64_t channel_layout); | ||||
#endif | |||||
#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically | #define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically | ||||
@@ -845,6 +859,26 @@ void avfilter_free(AVFilterContext *filter); | |||||
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, | int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, | ||||
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); | unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); | ||||
#if FF_API_AVFILTERBUFFER | |||||
/** | |||||
* Copy the frame properties of src to dst, without copying the actual | |||||
* image data. | |||||
* | |||||
* @return 0 on success, a negative number on error. | |||||
*/ | |||||
attribute_deprecated | |||||
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); | |||||
/** | |||||
* Copy the frame properties and data pointers of src to dst, without copying | |||||
* the actual data. | |||||
* | |||||
* @return 0 on success, a negative number on error. | |||||
*/ | |||||
attribute_deprecated | |||||
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); | |||||
#endif | |||||
/** | /** | ||||
* @} | * @} | ||||
*/ | */ | ||||
@@ -92,84 +92,13 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask) | |||||
return ret; | return ret; | ||||
} | } | ||||
void ff_free_pool(AVFilterPool *pool) | |||||
{ | |||||
int i; | |||||
av_assert0(pool->refcount > 0); | |||||
for (i = 0; i < POOL_SIZE; i++) { | |||||
if (pool->pic[i]) { | |||||
AVFilterBufferRef *picref = pool->pic[i]; | |||||
/* free buffer: picrefs stored in the pool are not | |||||
* supposed to contain a free callback */ | |||||
av_assert0(!picref->buf->refcount); | |||||
av_freep(&picref->buf->data[0]); | |||||
av_freep(&picref->buf); | |||||
av_freep(&picref->audio); | |||||
av_assert0(!picref->video || !picref->video->qp_table); | |||||
av_freep(&picref->video); | |||||
av_freep(&pool->pic[i]); | |||||
pool->count--; | |||||
} | |||||
} | |||||
pool->draining = 1; | |||||
if (!--pool->refcount) { | |||||
av_assert0(!pool->count); | |||||
av_free(pool); | |||||
} | |||||
} | |||||
static void store_in_pool(AVFilterBufferRef *ref) | |||||
{ | |||||
int i; | |||||
AVFilterPool *pool= ref->buf->priv; | |||||
av_assert0(ref->buf->data[0]); | |||||
av_assert0(pool->refcount>0); | |||||
if (ref->video) | |||||
av_freep(&ref->video->qp_table); | |||||
if (pool->count == POOL_SIZE) { | |||||
AVFilterBufferRef *ref1 = pool->pic[0]; | |||||
av_freep(&ref1->video); | |||||
av_freep(&ref1->audio); | |||||
av_freep(&ref1->buf->data[0]); | |||||
av_freep(&ref1->buf); | |||||
av_free(ref1); | |||||
memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1)); | |||||
pool->count--; | |||||
pool->pic[POOL_SIZE-1] = NULL; | |||||
} | |||||
for (i = 0; i < POOL_SIZE; i++) { | |||||
if (!pool->pic[i]) { | |||||
pool->pic[i] = ref; | |||||
pool->count++; | |||||
break; | |||||
} | |||||
} | |||||
if (pool->draining) { | |||||
ff_free_pool(pool); | |||||
} else | |||||
--pool->refcount; | |||||
} | |||||
void avfilter_unref_buffer(AVFilterBufferRef *ref) | void avfilter_unref_buffer(AVFilterBufferRef *ref) | ||||
{ | { | ||||
if (!ref) | if (!ref) | ||||
return; | return; | ||||
av_assert0(ref->buf->refcount > 0); | av_assert0(ref->buf->refcount > 0); | ||||
if (!(--ref->buf->refcount)) { | |||||
if (!ref->buf->free) { | |||||
store_in_pool(ref); | |||||
return; | |||||
} | |||||
if (!(--ref->buf->refcount)) | |||||
ref->buf->free(ref->buf); | ref->buf->free(ref->buf); | ||||
} | |||||
if (ref->extended_data != ref->data) | if (ref->extended_data != ref->data) | ||||
av_freep(&ref->extended_data); | av_freep(&ref->extended_data); | ||||
if (ref->video) | if (ref->video) | ||||
@@ -186,6 +115,36 @@ void avfilter_unref_bufferp(AVFilterBufferRef **ref) | |||||
*ref = NULL; | *ref = NULL; | ||||
} | } | ||||
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) | |||||
{ | |||||
dst->pts = src->pts; | |||||
dst->pos = av_frame_get_pkt_pos(src); | |||||
dst->format = src->format; | |||||
av_dict_free(&dst->metadata); | |||||
av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); | |||||
switch (dst->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
dst->video->w = src->width; | |||||
dst->video->h = src->height; | |||||
dst->video->sample_aspect_ratio = src->sample_aspect_ratio; | |||||
dst->video->interlaced = src->interlaced_frame; | |||||
dst->video->top_field_first = src->top_field_first; | |||||
dst->video->key_frame = src->key_frame; | |||||
dst->video->pict_type = src->pict_type; | |||||
break; | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
dst->audio->sample_rate = src->sample_rate; | |||||
dst->audio->channel_layout = src->channel_layout; | |||||
break; | |||||
default: | |||||
return AVERROR(EINVAL); | |||||
} | |||||
return 0; | |||||
} | |||||
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) | void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src) | ||||
{ | { | ||||
// copy common properties | // copy common properties | ||||
@@ -206,40 +165,3 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s | |||||
av_dict_free(&dst->metadata); | av_dict_free(&dst->metadata); | ||||
av_dict_copy(&dst->metadata, src->metadata, 0); | av_dict_copy(&dst->metadata, src->metadata, 0); | ||||
} | } | ||||
AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink, | |||||
AVFilterBufferRef *ref) | |||||
{ | |||||
AVFilterBufferRef *buf; | |||||
int channels; | |||||
switch (outlink->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, | |||||
ref->video->w, ref->video->h); | |||||
if(!buf) | |||||
return NULL; | |||||
av_image_copy(buf->data, buf->linesize, | |||||
(void*)ref->data, ref->linesize, | |||||
ref->format, ref->video->w, ref->video->h); | |||||
break; | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, | |||||
ref->audio->nb_samples); | |||||
if(!buf) | |||||
return NULL; | |||||
channels = ref->audio->channels; | |||||
av_samples_copy(buf->extended_data, ref->buf->extended_data, | |||||
0, 0, ref->audio->nb_samples, | |||||
channels, | |||||
ref->format); | |||||
break; | |||||
default: | |||||
return NULL; | |||||
} | |||||
avfilter_copy_buffer_ref_props(buf, ref); | |||||
return buf; | |||||
} |
@@ -23,7 +23,7 @@ | |||||
#define AVFILTER_BUFFERQUEUE_H | #define AVFILTER_BUFFERQUEUE_H | ||||
/** | /** | ||||
* FFBufQueue: simple AVFilterBufferRef queue API | |||||
* FFBufQueue: simple AVFrame queue API | |||||
* | * | ||||
* Note: this API is not thread-safe. Concurrent access to the same queue | * Note: this API is not thread-safe. Concurrent access to the same queue | ||||
* must be protected by a mutex or any synchronization mechanism. | * must be protected by a mutex or any synchronization mechanism. | ||||
@@ -47,7 +47,7 @@ | |||||
* Structure holding the queue | * Structure holding the queue | ||||
*/ | */ | ||||
struct FFBufQueue { | struct FFBufQueue { | ||||
AVFilterBufferRef *queue[FF_BUFQUEUE_SIZE]; | |||||
AVFrame *queue[FF_BUFQUEUE_SIZE]; | |||||
unsigned short head; | unsigned short head; | ||||
unsigned short available; /**< number of available buffers */ | unsigned short available; /**< number of available buffers */ | ||||
}; | }; | ||||
@@ -69,11 +69,11 @@ static inline int ff_bufqueue_is_full(struct FFBufQueue *queue) | |||||
* (and unrefed) with a warning before adding the new buffer. | * (and unrefed) with a warning before adding the new buffer. | ||||
*/ | */ | ||||
static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, | static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, | ||||
AVFilterBufferRef *buf) | |||||
AVFrame *buf) | |||||
{ | { | ||||
if (ff_bufqueue_is_full(queue)) { | if (ff_bufqueue_is_full(queue)) { | ||||
av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n"); | av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n"); | ||||
avfilter_unref_buffer(BUCKET(--queue->available)); | |||||
av_frame_free(&BUCKET(--queue->available)); | |||||
} | } | ||||
BUCKET(queue->available++) = buf; | BUCKET(queue->available++) = buf; | ||||
} | } | ||||
@@ -84,8 +84,8 @@ static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue, | |||||
* Buffer with index 0 is the first buffer in the queue. | * Buffer with index 0 is the first buffer in the queue. | ||||
* Return NULL if the queue has not enough buffers. | * Return NULL if the queue has not enough buffers. | ||||
*/ | */ | ||||
static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue, | |||||
unsigned index) | |||||
static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue, | |||||
unsigned index) | |||||
{ | { | ||||
return index < queue->available ? BUCKET(index) : NULL; | return index < queue->available ? BUCKET(index) : NULL; | ||||
} | } | ||||
@@ -95,9 +95,9 @@ static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue, | |||||
* | * | ||||
* Do not use on an empty queue. | * Do not use on an empty queue. | ||||
*/ | */ | ||||
static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue) | |||||
static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue) | |||||
{ | { | ||||
AVFilterBufferRef *ret = queue->queue[queue->head]; | |||||
AVFrame *ret = queue->queue[queue->head]; | |||||
av_assert0(queue->available); | av_assert0(queue->available); | ||||
queue->available--; | queue->available--; | ||||
queue->queue[queue->head] = NULL; | queue->queue[queue->head] = NULL; | ||||
@@ -110,8 +110,10 @@ static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue) | |||||
*/ | */ | ||||
static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue) | static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue) | ||||
{ | { | ||||
while (queue->available) | |||||
avfilter_unref_buffer(ff_bufqueue_get(queue)); | |||||
while (queue->available) { | |||||
AVFrame *buf = ff_bufqueue_get(queue); | |||||
av_frame_free(&buf); | |||||
} | |||||
} | } | ||||
#undef BUCKET | #undef BUCKET | ||||
@@ -35,7 +35,7 @@ | |||||
#include "internal.h" | #include "internal.h" | ||||
typedef struct { | typedef struct { | ||||
AVFilterBufferRef *cur_buf; ///< last buffer delivered on the sink | |||||
AVFrame *cur_frame; ///< last frame delivered on the sink | |||||
AVAudioFifo *audio_fifo; ///< FIFO for audio samples | AVAudioFifo *audio_fifo; ///< FIFO for audio samples | ||||
int64_t next_pts; ///< interpolating audio pts | int64_t next_pts; ///< interpolating audio pts | ||||
} BufferSinkContext; | } BufferSinkContext; | ||||
@@ -48,59 +48,71 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_audio_fifo_free(sink->audio_fifo); | av_audio_fifo_free(sink->audio_fifo); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
BufferSinkContext *s = link->dst->priv; | BufferSinkContext *s = link->dst->priv; | ||||
// av_assert0(!s->cur_buf); | |||||
s->cur_buf = buf; | |||||
// av_assert0(!s->cur_frame); | |||||
s->cur_frame = frame; | |||||
return 0; | return 0; | ||||
} | } | ||||
<<<<<<< HEAD | |||||
int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf) | int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf) | ||||
||||||| merged common ancestors | |||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) | |||||
======= | |||||
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) | |||||
>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911 | |||||
{ | { | ||||
BufferSinkContext *s = ctx->priv; | BufferSinkContext *s = ctx->priv; | ||||
AVFilterLink *link = ctx->inputs[0]; | AVFilterLink *link = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
if (!buf) | |||||
return ff_poll_frame(ctx->inputs[0]); | |||||
if ((ret = ff_request_frame(link)) < 0) | if ((ret = ff_request_frame(link)) < 0) | ||||
return ret; | return ret; | ||||
if (!s->cur_buf) | |||||
if (!s->cur_frame) | |||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
*buf = s->cur_buf; | |||||
s->cur_buf = NULL; | |||||
av_frame_move_ref(frame, s->cur_frame); | |||||
av_frame_free(&s->cur_frame); | |||||
return 0; | return 0; | ||||
} | } | ||||
static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf, | |||||
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame, | |||||
int nb_samples) | int nb_samples) | ||||
{ | { | ||||
BufferSinkContext *s = ctx->priv; | BufferSinkContext *s = ctx->priv; | ||||
AVFilterLink *link = ctx->inputs[0]; | AVFilterLink *link = ctx->inputs[0]; | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *tmp; | |||||
if (!(buf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples))) | |||||
if (!(tmp = ff_get_audio_buffer(link, nb_samples))) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
av_audio_fifo_read(s->audio_fifo, (void**)buf->extended_data, nb_samples); | |||||
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples); | |||||
buf->pts = s->next_pts; | |||||
tmp->pts = s->next_pts; | |||||
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate}, | s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate}, | ||||
link->time_base); | link->time_base); | ||||
*pbuf = buf; | |||||
av_frame_move_ref(frame, tmp); | |||||
av_frame_free(&tmp); | |||||
return 0; | return 0; | ||||
} | } | ||||
<<<<<<< HEAD | |||||
int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf, | int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf, | ||||
int nb_samples) | int nb_samples) | ||||
||||||| merged common ancestors | |||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf, | |||||
int nb_samples) | |||||
======= | |||||
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples) | |||||
>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911 | |||||
{ | { | ||||
BufferSinkContext *s = ctx->priv; | BufferSinkContext *s = ctx->priv; | ||||
AVFilterLink *link = ctx->inputs[0]; | AVFilterLink *link = ctx->inputs[0]; | ||||
@@ -113,38 +125,107 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef ** | |||||
} | } | ||||
while (ret >= 0) { | while (ret >= 0) { | ||||
AVFilterBufferRef *buf; | |||||
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples) | if (av_audio_fifo_size(s->audio_fifo) >= nb_samples) | ||||
return read_from_fifo(ctx, pbuf, nb_samples); | |||||
return read_from_fifo(ctx, frame, nb_samples); | |||||
ret = av_buffersink_read(ctx, &buf); | |||||
ret = ff_request_frame(link); | |||||
if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) | if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) | ||||
return read_from_fifo(ctx, pbuf, av_audio_fifo_size(s->audio_fifo)); | |||||
return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo)); | |||||
else if (ret < 0) | else if (ret < 0) | ||||
return ret; | return ret; | ||||
if (buf->pts != AV_NOPTS_VALUE) { | |||||
s->next_pts = buf->pts - | |||||
if (s->cur_frame->pts != AV_NOPTS_VALUE) { | |||||
s->next_pts = s->cur_frame->pts - | |||||
av_rescale_q(av_audio_fifo_size(s->audio_fifo), | av_rescale_q(av_audio_fifo_size(s->audio_fifo), | ||||
(AVRational){ 1, link->sample_rate }, | (AVRational){ 1, link->sample_rate }, | ||||
link->time_base); | link->time_base); | ||||
} | } | ||||
ret = av_audio_fifo_write(s->audio_fifo, (void**)buf->extended_data, | |||||
buf->audio->nb_samples); | |||||
avfilter_unref_buffer(buf); | |||||
ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data, | |||||
s->cur_frame->nb_samples); | |||||
av_frame_free(&s->cur_frame); | |||||
} | |||||
return ret; | |||||
} | |||||
#if FF_API_AVFILTERBUFFER | |||||
static void compat_free_buffer(AVFilterBuffer *buf) | |||||
{ | |||||
AVFrame *frame = buf->priv; | |||||
av_frame_free(&frame); | |||||
av_free(buf); | |||||
} | |||||
static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples) | |||||
{ | |||||
AVFilterBufferRef *buf; | |||||
AVFrame *frame; | |||||
int ret; | |||||
if (!pbuf) | |||||
return ff_poll_frame(ctx->inputs[0]); | |||||
frame = av_frame_alloc(); | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | |||||
if (!nb_samples) | |||||
ret = av_buffersink_get_frame(ctx, frame); | |||||
else | |||||
ret = av_buffersink_get_samples(ctx, frame, nb_samples); | |||||
if (ret < 0) | |||||
goto fail; | |||||
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { | |||||
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, | |||||
AV_PERM_READ, | |||||
frame->width, frame->height, | |||||
frame->format); | |||||
} else { | |||||
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, | |||||
frame->linesize[0], AV_PERM_READ, | |||||
frame->nb_samples, | |||||
frame->format, | |||||
frame->channel_layout); | |||||
} | } | ||||
if (!buf) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
avfilter_copy_frame_props(buf, frame); | |||||
buf->buf->priv = frame; | |||||
buf->buf->free = compat_free_buffer; | |||||
*pbuf = buf; | |||||
return 0; | |||||
fail: | |||||
av_frame_free(&frame); | |||||
return ret; | return ret; | ||||
} | } | ||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) | |||||
{ | |||||
return compat_read(ctx, buf, 0); | |||||
} | |||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, | |||||
int nb_samples) | |||||
{ | |||||
return compat_read(ctx, buf, nb_samples); | |||||
} | |||||
#endif | |||||
static const AVFilterPad avfilter_vsink_buffer_inputs[] = { | static const AVFilterPad avfilter_vsink_buffer_inputs[] = { | ||||
{ | { | ||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
.needs_fifo = 1 | .needs_fifo = 1 | ||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
@@ -169,7 +250,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
.needs_fifo = 1 | .needs_fifo = 1 | ||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
@@ -26,6 +26,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#if FF_API_AVFILTERBUFFER | |||||
/** | /** | ||||
* Struct to use for initializing a buffersink context. | * Struct to use for initializing a buffersink context. | ||||
*/ | */ | ||||
@@ -94,6 +95,8 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); | |||||
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, | int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, | ||||
AVFilterBufferRef **bufref, int flags); | AVFilterBufferRef **bufref, int flags); | ||||
/* TODO */ | |||||
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); | |||||
/** | /** | ||||
* Get the number of immediately available frames. | * Get the number of immediately available frames. | ||||
@@ -122,6 +125,7 @@ AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); | |||||
* @return >= 0 in case of success, a negative AVERROR code in case of | * @return >= 0 in case of success, a negative AVERROR code in case of | ||||
* failure. | * failure. | ||||
*/ | */ | ||||
attribute_deprecated | |||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); | int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); | ||||
/** | /** | ||||
@@ -140,8 +144,38 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); | |||||
* @warning do not mix this function with av_buffersink_read(). Use only one or | * @warning do not mix this function with av_buffersink_read(). Use only one or | ||||
* the other with a single sink, not both. | * the other with a single sink, not both. | ||||
*/ | */ | ||||
attribute_deprecated | |||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, | int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, | ||||
int nb_samples); | int nb_samples); | ||||
#endif | |||||
/** | |||||
* Get a frame with filtered data from sink and put it in frame. | |||||
* | |||||
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter. | |||||
* @param frame pointer to an allocated frame that will be filled with data. | |||||
* The data must be freed using av_frame_unref() / av_frame_free() | |||||
* | |||||
* @return >= 0 in case of success, a negative AVERROR code in case of | |||||
* failure. | |||||
*/ | |||||
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); | |||||
/** | |||||
* Same as av_buffersink_get_frame(), but with the ability to specify the number | |||||
* of samples read. This function is less efficient than | |||||
* av_buffersink_get_frame(), because it copies the data around. | |||||
* | |||||
* @param ctx pointer to a context of the abuffersink AVFilter. | |||||
* @param frame pointer to an allocated frame that will be filled with data. | |||||
* The data must be freed using av_frame_unref() / av_frame_free() | |||||
* frame will contain exactly nb_samples audio samples, except at | |||||
* the end of stream, when it can contain less than nb_samples. | |||||
* | |||||
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or | |||||
* the other with a single sink, not both. | |||||
*/ | |||||
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); | |||||
/** | /** | ||||
* @} | * @} | ||||
@@ -26,6 +26,7 @@ | |||||
#include "libavutil/channel_layout.h" | #include "libavutil/channel_layout.h" | ||||
#include "libavutil/common.h" | #include "libavutil/common.h" | ||||
#include "libavutil/fifo.h" | #include "libavutil/fifo.h" | ||||
#include "libavutil/frame.h" | |||||
#include "libavutil/imgutils.h" | #include "libavutil/imgutils.h" | ||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "libavutil/samplefmt.h" | #include "libavutil/samplefmt.h" | ||||
@@ -74,99 +75,193 @@ typedef struct { | |||||
return AVERROR(EINVAL);\ | return AVERROR(EINVAL);\ | ||||
} | } | ||||
int av_buffersrc_add_frame(AVFilterContext *buffer_src, | |||||
const AVFrame *frame, int flags) | |||||
int av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) | |||||
{ | { | ||||
AVFilterBufferRef *picref; | |||||
int ret; | |||||
return av_buffersrc_add_frame(ctx, frame); | |||||
} | |||||
int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame) | |||||
{ | |||||
AVFrame *copy; | |||||
int ret = 0; | |||||
int64_t layout = frame->channel_layout; | |||||
if (!frame) /* NULL for EOF */ | |||||
return av_buffersrc_add_ref(buffer_src, NULL, flags); | |||||
if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { | |||||
av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); | |||||
return AVERROR(EINVAL); | |||||
} | |||||
picref = avfilter_get_buffer_ref_from_frame(buffer_src->outputs[0]->type, | |||||
frame, AV_PERM_WRITE); | |||||
if (!picref) | |||||
if (!(copy = av_frame_alloc())) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
ret = av_buffersrc_add_ref(buffer_src, picref, flags); | |||||
picref->buf->data[0] = NULL; | |||||
avfilter_unref_buffer(picref); | |||||
return ret; | |||||
} | |||||
ret = av_frame_ref(copy, frame); | |||||
if (ret >= 0) | |||||
ret = av_buffersrc_add_frame(ctx, copy); | |||||
int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame) | |||||
{ | |||||
return av_buffersrc_add_frame(buffer_filter, frame, 0); | |||||
av_frame_free(©); | |||||
return ret; | |||||
} | } | ||||
int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags) | |||||
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame) | |||||
{ | { | ||||
BufferSourceContext *c = s->priv; | |||||
AVFilterBufferRef *to_free = NULL; | |||||
BufferSourceContext *s = ctx->priv; | |||||
AVFrame *copy; | |||||
int ret; | int ret; | ||||
int64_t layout; | |||||
if (!buf) { | |||||
c->eof = 1; | |||||
if (!frame) { | |||||
s->eof = 1; | |||||
return 0; | return 0; | ||||
} else if (c->eof) | |||||
} else if (s->eof) | |||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
if (!av_fifo_space(c->fifo) && | |||||
(ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) + | |||||
sizeof(buf))) < 0) | |||||
return ret; | |||||
switch (ctx->outputs[0]->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height, | |||||
frame->format); | |||||
break; | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout, | |||||
frame->format); | |||||
if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { | |||||
switch (s->outputs[0]->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format); | |||||
break; | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
if (!buf->audio->channel_layout) | |||||
buf->audio->channel_layout = c->channel_layout; | |||||
CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout, | |||||
buf->format); | |||||
break; | |||||
default: | |||||
layout = frame->channel_layout; | |||||
if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { | |||||
av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); | |||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
} | } | ||||
break; | |||||
default: | |||||
return AVERROR(EINVAL); | |||||
} | } | ||||
if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY)) | |||||
to_free = buf = ff_copy_buffer_ref(s->outputs[0], buf); | |||||
if(!buf) | |||||
return -1; | |||||
if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) { | |||||
avfilter_unref_buffer(to_free); | |||||
if (!av_fifo_space(s->fifo) && | |||||
(ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + | |||||
sizeof(copy))) < 0) | |||||
return ret; | return ret; | ||||
} | |||||
c->nb_failed_requests = 0; | |||||
if (c->warning_limit && | |||||
av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) { | |||||
av_log(s, AV_LOG_WARNING, | |||||
"%d buffers queued in %s, something may be wrong.\n", | |||||
c->warning_limit, | |||||
(char *)av_x_if_null(s->name, s->filter->name)); | |||||
c->warning_limit *= 10; | |||||
} | |||||
if ((flags & AV_BUFFERSRC_FLAG_PUSH)) | |||||
if ((ret = s->output_pads[0].request_frame(s->outputs[0])) < 0) | |||||
return ret; | |||||
if (!(copy = av_frame_alloc())) | |||||
return AVERROR(ENOMEM); | |||||
av_frame_move_ref(copy, frame); | |||||
if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { | |||||
av_frame_move_ref(frame, copy); | |||||
av_frame_free(©); | |||||
return ret; | |||||
} | |||||
return 0; | return 0; | ||||
} | } | ||||
#ifdef FF_API_BUFFERSRC_BUFFER | |||||
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) | |||||
#if FF_API_AVFILTERBUFFER | |||||
static void compat_free_buffer(void *opaque, uint8_t *data) | |||||
{ | { | ||||
return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY); | |||||
AVFilterBufferRef *buf = opaque; | |||||
avfilter_unref_buffer(buf); | |||||
} | } | ||||
#endif | |||||
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) | |||||
static void compat_unref_buffer(void *opaque, uint8_t *data) | |||||
{ | { | ||||
return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; | |||||
AVBufferRef *buf = opaque; | |||||
av_buffer_unref(&buf); | |||||
} | |||||
int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf, | |||||
int flags) | |||||
{ | |||||
BufferSourceContext *s = ctx->priv; | |||||
AVFrame *frame = NULL; | |||||
AVBufferRef *dummy_buf = NULL; | |||||
int ret = 0, planes, i; | |||||
if (!buf) { | |||||
s->eof = 1; | |||||
return 0; | |||||
} else if (s->eof) | |||||
return AVERROR(EINVAL); | |||||
frame = av_frame_alloc(); | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | |||||
dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, | |||||
(buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); | |||||
if (!dummy_buf) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
if ((ret = avfilter_copy_buf_props(frame, buf)) < 0) | |||||
goto fail; | |||||
#define WRAP_PLANE(ref_out, data, data_size) \ | |||||
do { \ | |||||
AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ | |||||
if (!dummy_ref) { \ | |||||
ret = AVERROR(ENOMEM); \ | |||||
goto fail; \ | |||||
} \ | |||||
ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \ | |||||
dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \ | |||||
if (!ref_out) { \ | |||||
av_frame_unref(frame); \ | |||||
ret = AVERROR(ENOMEM); \ | |||||
goto fail; \ | |||||
} \ | |||||
} while (0) | |||||
if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) { | |||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); | |||||
if (!desc) { | |||||
ret = AVERROR(EINVAL); | |||||
goto fail; | |||||
} | |||||
planes = (desc->flags & PIX_FMT_PLANAR) ? desc->nb_components : 1; | |||||
for (i = 0; i < planes; i++) { | |||||
int h_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; | |||||
int plane_size = (frame->width >> h_shift) * frame->linesize[i]; | |||||
WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); | |||||
} | |||||
} else { | |||||
int planar = av_sample_fmt_is_planar(frame->format); | |||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout); | |||||
planes = planar ? channels : 1; | |||||
if (planes > FF_ARRAY_ELEMS(frame->buf)) { | |||||
frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); | |||||
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * | |||||
frame->nb_extended_buf); | |||||
if (!frame->extended_buf) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
} | |||||
for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) | |||||
WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); | |||||
for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++) | |||||
WRAP_PLANE(frame->extended_buf[i], | |||||
frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], | |||||
frame->linesize[0]); | |||||
} | |||||
ret = av_buffersrc_add_frame_flags(ctx, frame, flags); | |||||
fail: | |||||
av_buffer_unref(&dummy_buf); | |||||
av_frame_free(&frame); | |||||
return ret; | |||||
} | |||||
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf) | |||||
{ | |||||
return av_buffersrc_add_ref(ctx, buf, 0); | |||||
} | } | ||||
#endif | |||||
#define OFFSET(x) offsetof(BufferSourceContext, x) | #define OFFSET(x) offsetof(BufferSourceContext, x) | ||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||||
@@ -186,7 +281,7 @@ AVFILTER_DEFINE_CLASS(buffer); | |||||
static av_cold int init_video(AVFilterContext *ctx, const char *args) | static av_cold int init_video(AVFilterContext *ctx, const char *args) | ||||
{ | { | ||||
BufferSourceContext *c = ctx->priv; | BufferSourceContext *c = ctx->priv; | ||||
char pix_fmt_str[128], sws_param[256] = "", *colon, *equal; | |||||
char pix_fmt_str[128], *colon, *equal; | |||||
int ret, n = 0; | int ret, n = 0; | ||||
c->class = &buffer_class; | c->class = &buffer_class; | ||||
@@ -195,6 +290,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) | |||||
av_log(ctx, AV_LOG_ERROR, "Arguments required\n"); | av_log(ctx, AV_LOG_ERROR, "Arguments required\n"); | ||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
} | } | ||||
colon = strchr(args, ':'); | colon = strchr(args, ':'); | ||||
equal = strchr(args, '='); | equal = strchr(args, '='); | ||||
if (equal && (!colon || equal < colon)) { | if (equal && (!colon || equal < colon)) { | ||||
@@ -203,28 +299,25 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) | |||||
if (ret < 0) | if (ret < 0) | ||||
goto fail; | goto fail; | ||||
} else { | } else { | ||||
if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str, | |||||
if (!args || | |||||
(n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, | |||||
&c->time_base.num, &c->time_base.den, | &c->time_base.num, &c->time_base.den, | ||||
&c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) { | |||||
av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args); | |||||
ret = AVERROR(EINVAL); | |||||
goto fail; | |||||
&c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) { | |||||
av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args); | |||||
return AVERROR(EINVAL); | |||||
} | } | ||||
av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n"); | |||||
if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0) | |||||
goto fail; | |||||
c->sws_param = av_strdup(sws_param); | |||||
if (!c->sws_param) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == AV_PIX_FMT_NONE) { | |||||
char *tail; | |||||
c->pix_fmt = strtol(pix_fmt_str, &tail, 10); | |||||
if (*tail || c->pix_fmt < 0 || c->pix_fmt >= AV_PIX_FMT_NB) { | |||||
av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); | |||||
return AVERROR(EINVAL); | |||||
} | |||||
} | } | ||||
} | } | ||||
if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*)))) | |||||
return AVERROR(ENOMEM); | |||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", | av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", | ||||
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), | c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), | ||||
@@ -238,6 +331,11 @@ fail: | |||||
return ret; | return ret; | ||||
} | } | ||||
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src) | |||||
{ | |||||
return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests; | |||||
} | |||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM | ||||
static const AVOption abuffer_options[] = { | static const AVOption abuffer_options[] = { | ||||
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, | { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS }, | ||||
@@ -298,7 +396,7 @@ static av_cold int init_audio(AVFilterContext *ctx, const char *args) | |||||
goto fail; | goto fail; | ||||
} | } | ||||
if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) { | |||||
if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) { | |||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -321,9 +419,9 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
{ | { | ||||
BufferSourceContext *s = ctx->priv; | BufferSourceContext *s = ctx->priv; | ||||
while (s->fifo && av_fifo_size(s->fifo)) { | while (s->fifo && av_fifo_size(s->fifo)) { | ||||
AVFilterBufferRef *buf; | |||||
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); | |||||
avfilter_unref_buffer(buf); | |||||
AVFrame *frame; | |||||
av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); | |||||
av_frame_free(&frame); | |||||
} | } | ||||
av_fifo_free(s->fifo); | av_fifo_free(s->fifo); | ||||
s->fifo = NULL; | s->fifo = NULL; | ||||
@@ -387,7 +485,8 @@ static int config_props(AVFilterLink *link) | |||||
static int request_frame(AVFilterLink *link) | static int request_frame(AVFilterLink *link) | ||||
{ | { | ||||
BufferSourceContext *c = link->src->priv; | BufferSourceContext *c = link->src->priv; | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *frame; | |||||
int ret = 0; | |||||
if (!av_fifo_size(c->fifo)) { | if (!av_fifo_size(c->fifo)) { | ||||
if (c->eof) | if (c->eof) | ||||
@@ -395,9 +494,12 @@ static int request_frame(AVFilterLink *link) | |||||
c->nb_failed_requests++; | c->nb_failed_requests++; | ||||
return AVERROR(EAGAIN); | return AVERROR(EAGAIN); | ||||
} | } | ||||
av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL); | |||||
av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL); | |||||
return ff_filter_frame(link, buf); | |||||
/* CIG TODO do not ignore error */ | |||||
ff_filter_frame(link, frame); | |||||
return ret; | |||||
} | } | ||||
static int poll_frame(AVFilterLink *link) | static int poll_frame(AVFilterLink *link) | ||||
@@ -406,7 +508,7 @@ static int poll_frame(AVFilterLink *link) | |||||
int size = av_fifo_size(c->fifo); | int size = av_fifo_size(c->fifo); | ||||
if (!size && c->eof) | if (!size && c->eof) | ||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
return size/sizeof(AVFilterBufferRef*); | |||||
return size/sizeof(AVFrame*); | |||||
} | } | ||||
static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { | static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { | ||||
@@ -68,14 +68,15 @@ int av_buffersrc_add_ref(AVFilterContext *buffer_src, | |||||
*/ | */ | ||||
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); | unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); | ||||
#ifdef FF_API_BUFFERSRC_BUFFER | |||||
#if FF_API_AVFILTERBUFFER | |||||
/** | /** | ||||
* Add a buffer to the filtergraph s. | * Add a buffer to the filtergraph s. | ||||
* | * | ||||
* @param buf buffer containing frame data to be passed down the filtergraph. | * @param buf buffer containing frame data to be passed down the filtergraph. | ||||
* This function will take ownership of buf, the user must not free it. | * This function will take ownership of buf, the user must not free it. | ||||
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. | * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. | ||||
* @deprecated Use av_buffersrc_add_ref(s, picref, AV_BUFFERSRC_FLAG_NO_COPY) instead. | |||||
* | |||||
* @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() | |||||
*/ | */ | ||||
attribute_deprecated | attribute_deprecated | ||||
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); | int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); | ||||
@@ -85,11 +86,42 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); | |||||
* Add a frame to the buffer source. | * Add a frame to the buffer source. | ||||
* | * | ||||
* @param s an instance of the buffersrc filter. | * @param s an instance of the buffersrc filter. | ||||
* @param frame frame to be added. | |||||
* @param frame frame to be added. If the frame is reference counted, this | |||||
* function will make a new reference to it. Otherwise the frame data will be | |||||
* copied. | |||||
* | * | ||||
* @warning frame data will be memcpy()ed, which may be a big performance | |||||
* hit. Use av_buffersrc_buffer() to avoid copying the data. | |||||
* @return 0 on success, a negative AVERROR on error | |||||
*/ | */ | ||||
int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); | int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); | ||||
/** | |||||
* Add a frame to the buffer source. | |||||
* | |||||
* @param s an instance of the buffersrc filter. | |||||
* @param frame frame to be added. If the frame is reference counted, this | |||||
* function will take ownership of the reference(s) and reset the frame. | |||||
* Otherwise the frame data will be copied. If this function returns an error, | |||||
* the input frame is not touched. | |||||
* | |||||
* @return 0 on success, a negative AVERROR on error. | |||||
* | |||||
* @note the difference between this function and av_buffersrc_write_frame() is | |||||
* that av_buffersrc_write_frame() creates a new reference to the input frame, | |||||
* while this function takes ownership of the reference passed to it. | |||||
*/ | |||||
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); | |||||
/** | |||||
* Add frame data to buffer_src. XXX | |||||
* | |||||
* @param buffer_src pointer to a buffer source context | |||||
* @param frame a frame, or NULL to mark EOF | |||||
* @param flags a combination of AV_BUFFERSRC_FLAG_* | |||||
* @return >= 0 in case of success, a negative AVERROR code | |||||
* in case of failure | |||||
*/ | |||||
int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, | |||||
AVFrame *frame, int flags); | |||||
#endif /* AVFILTER_BUFFERSRC_H */ | #endif /* AVFILTER_BUFFERSRC_H */ |
@@ -97,7 +97,7 @@ typedef struct { | |||||
struct rect text; ///< rectangle for the LU legend on the left | struct rect text; ///< rectangle for the LU legend on the left | ||||
struct rect graph; ///< rectangle for the main graph in the center | struct rect graph; ///< rectangle for the main graph in the center | ||||
struct rect gauge; ///< rectangle for the gauge on the right | struct rect gauge; ///< rectangle for the gauge on the right | ||||
AVFilterBufferRef *outpicref; ///< output picture reference, updated regularly | |||||
AVFrame *outpicref; ///< output picture reference, updated regularly | |||||
int meter; ///< select a EBU mode between +9 and +18 | int meter; ///< select a EBU mode between +9 and +18 | ||||
int scale_range; ///< the range of LU values according to the meter | int scale_range; ///< the range of LU values according to the meter | ||||
int y_zero_lu; ///< the y value (pixel position) for 0 LU | int y_zero_lu; ///< the y value (pixel position) for 0 LU | ||||
@@ -174,7 +174,7 @@ static const uint8_t font_colors[] = { | |||||
0x00, 0x96, 0x96, | 0x00, 0x96, 0x96, | ||||
}; | }; | ||||
static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) | |||||
static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...) | |||||
{ | { | ||||
int i; | int i; | ||||
char buf[128] = {0}; | char buf[128] = {0}; | ||||
@@ -207,7 +207,7 @@ static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8 | |||||
} | } | ||||
} | } | ||||
static void drawline(AVFilterBufferRef *pic, int x, int y, int len, int step) | |||||
static void drawline(AVFrame *pic, int x, int y, int len, int step) | |||||
{ | { | ||||
int i; | int i; | ||||
uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3; | uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3; | ||||
@@ -224,7 +224,7 @@ static int config_video_output(AVFilterLink *outlink) | |||||
uint8_t *p; | uint8_t *p; | ||||
AVFilterContext *ctx = outlink->src; | AVFilterContext *ctx = outlink->src; | ||||
EBUR128Context *ebur128 = ctx->priv; | EBUR128Context *ebur128 = ctx->priv; | ||||
AVFilterBufferRef *outpicref; | |||||
AVFrame *outpicref; | |||||
/* check if there is enough space to represent everything decently */ | /* check if there is enough space to represent everything decently */ | ||||
if (ebur128->w < 640 || ebur128->h < 480) { | if (ebur128->w < 640 || ebur128->h < 480) { | ||||
@@ -259,10 +259,9 @@ static int config_video_output(AVFilterLink *outlink) | |||||
av_assert0(ebur128->graph.h == ebur128->gauge.h); | av_assert0(ebur128->graph.h == ebur128->gauge.h); | ||||
/* prepare the initial picref buffer */ | /* prepare the initial picref buffer */ | ||||
avfilter_unref_bufferp(&ebur128->outpicref); | |||||
av_frame_free(&ebur128->outpicref); | |||||
ebur128->outpicref = outpicref = | ebur128->outpicref = outpicref = | ||||
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2, | |||||
outlink->w, outlink->h); | |||||
ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpicref) | if (!outpicref) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
outlink->sample_aspect_ratio = (AVRational){1,1}; | outlink->sample_aspect_ratio = (AVRational){1,1}; | ||||
@@ -450,15 +449,15 @@ static int gate_update(struct integrator *integ, double power, | |||||
return gate_hist_pos; | return gate_hist_pos; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |||||
{ | { | ||||
int i, ch, idx_insample; | int i, ch, idx_insample; | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
EBUR128Context *ebur128 = ctx->priv; | EBUR128Context *ebur128 = ctx->priv; | ||||
const int nb_channels = ebur128->nb_channels; | const int nb_channels = ebur128->nb_channels; | ||||
const int nb_samples = insamples->audio->nb_samples; | |||||
const int nb_samples = insamples->nb_samples; | |||||
const double *samples = (double *)insamples->data[0]; | const double *samples = (double *)insamples->data[0]; | ||||
AVFilterBufferRef *pic = ebur128->outpicref; | |||||
AVFrame *pic = ebur128->outpicref; | |||||
for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) { | for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) { | ||||
const int bin_id_400 = ebur128->i400.cache_pos; | const int bin_id_400 = ebur128->i400.cache_pos; | ||||
@@ -639,7 +638,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) | |||||
/* set pts and push frame */ | /* set pts and push frame */ | ||||
pic->pts = pts; | pic->pts = pts; | ||||
ret = ff_filter_frame(outlink, avfilter_ref_buffer(pic, ~AV_PERM_WRITE)); | |||||
ret = ff_filter_frame(outlink, av_frame_clone(pic)); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -738,7 +737,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
} | } | ||||
for (i = 0; i < ctx->nb_outputs; i++) | for (i = 0; i < ctx->nb_outputs; i++) | ||||
av_freep(&ctx->output_pads[i].name); | av_freep(&ctx->output_pads[i].name); | ||||
avfilter_unref_bufferp(&ebur128->outpicref); | |||||
av_frame_free(&ebur128->outpicref); | |||||
} | } | ||||
static const AVFilterPad ebur128_inputs[] = { | static const AVFilterPad ebur128_inputs[] = { | ||||
@@ -134,7 +134,7 @@ typedef struct { | |||||
DSPContext c; ///< context providing optimized SAD methods (scene detect only) | DSPContext c; ///< context providing optimized SAD methods (scene detect only) | ||||
double prev_mafd; ///< previous MAFD (scene detect only) | double prev_mafd; ///< previous MAFD (scene detect only) | ||||
#endif | #endif | ||||
AVFilterBufferRef *prev_picref; ///< previous frame (scene detect only) | |||||
AVFrame *prev_picref; ///< previous frame (scene detect only) | |||||
double select; | double select; | ||||
} SelectContext; | } SelectContext; | ||||
@@ -219,25 +219,25 @@ static int config_input(AVFilterLink *inlink) | |||||
} | } | ||||
#if CONFIG_AVCODEC | #if CONFIG_AVCODEC | ||||
static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) | |||||
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame) | |||||
{ | { | ||||
double ret = 0; | double ret = 0; | ||||
SelectContext *select = ctx->priv; | SelectContext *select = ctx->priv; | ||||
AVFilterBufferRef *prev_picref = select->prev_picref; | |||||
AVFrame *prev_picref = select->prev_picref; | |||||
if (prev_picref && | if (prev_picref && | ||||
picref->video->h == prev_picref->video->h && | |||||
picref->video->w == prev_picref->video->w && | |||||
picref->linesize[0] == prev_picref->linesize[0]) { | |||||
frame->height == prev_picref->height && | |||||
frame->width == prev_picref->width && | |||||
frame->linesize[0] == prev_picref->linesize[0]) { | |||||
int x, y, nb_sad = 0; | int x, y, nb_sad = 0; | ||||
int64_t sad = 0; | int64_t sad = 0; | ||||
double mafd, diff; | double mafd, diff; | ||||
uint8_t *p1 = picref->data[0]; | |||||
uint8_t *p1 = frame->data[0]; | |||||
uint8_t *p2 = prev_picref->data[0]; | uint8_t *p2 = prev_picref->data[0]; | ||||
const int linesize = picref->linesize[0]; | |||||
const int linesize = frame->linesize[0]; | |||||
for (y = 0; y < picref->video->h - 8; y += 8) { | |||||
for (x = 0; x < picref->video->w*3 - 8; x += 8) { | |||||
for (y = 0; y < frame->height - 8; y += 8) { | |||||
for (x = 0; x < frame->width*3 - 8; x += 8) { | |||||
sad += select->c.sad[1](select, p1 + x, p2 + x, | sad += select->c.sad[1](select, p1 + x, p2 + x, | ||||
linesize, 8); | linesize, 8); | ||||
nb_sad += 8 * 8; | nb_sad += 8 * 8; | ||||
@@ -250,9 +250,9 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) | |||||
diff = fabs(mafd - select->prev_mafd); | diff = fabs(mafd - select->prev_mafd); | ||||
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); | ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); | ||||
select->prev_mafd = mafd; | select->prev_mafd = mafd; | ||||
avfilter_unref_buffer(prev_picref); | |||||
av_frame_free(&prev_picref); | |||||
} | } | ||||
select->prev_picref = avfilter_ref_buffer(picref, ~0); | |||||
select->prev_picref = av_frame_clone(frame); | |||||
return ret; | return ret; | ||||
} | } | ||||
#endif | #endif | ||||
@@ -260,38 +260,38 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) | |||||
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) | #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) | ||||
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | ||||
static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
static int select_frame(AVFilterContext *ctx, AVFrame *frame) | |||||
{ | { | ||||
SelectContext *select = ctx->priv; | SelectContext *select = ctx->priv; | ||||
AVFilterLink *inlink = ctx->inputs[0]; | AVFilterLink *inlink = ctx->inputs[0]; | ||||
double res; | double res; | ||||
if (isnan(select->var_values[VAR_START_PTS])) | if (isnan(select->var_values[VAR_START_PTS])) | ||||
select->var_values[VAR_START_PTS] = TS2D(ref->pts); | |||||
select->var_values[VAR_START_PTS] = TS2D(frame->pts); | |||||
if (isnan(select->var_values[VAR_START_T])) | if (isnan(select->var_values[VAR_START_T])) | ||||
select->var_values[VAR_START_T] = TS2D(ref->pts) * av_q2d(inlink->time_base); | |||||
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base); | |||||
select->var_values[VAR_PTS] = TS2D(ref->pts); | |||||
select->var_values[VAR_T ] = TS2D(ref->pts) * av_q2d(inlink->time_base); | |||||
select->var_values[VAR_POS] = ref->pos == -1 ? NAN : ref->pos; | |||||
select->var_values[VAR_PTS] = TS2D(frame->pts); | |||||
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base); | |||||
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); | |||||
switch (inlink->type) { | switch (inlink->type) { | ||||
case AVMEDIA_TYPE_AUDIO: | case AVMEDIA_TYPE_AUDIO: | ||||
select->var_values[VAR_SAMPLES_N] = ref->audio->nb_samples; | |||||
select->var_values[VAR_SAMPLES_N] = frame->nb_samples; | |||||
break; | break; | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
select->var_values[VAR_INTERLACE_TYPE] = | select->var_values[VAR_INTERLACE_TYPE] = | ||||
!ref->video->interlaced ? INTERLACE_TYPE_P : | |||||
ref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; | |||||
select->var_values[VAR_PICT_TYPE] = ref->video->pict_type; | |||||
!frame->interlaced_frame ? INTERLACE_TYPE_P : | |||||
frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; | |||||
select->var_values[VAR_PICT_TYPE] = frame->pict_type; | |||||
#if CONFIG_AVCODEC | #if CONFIG_AVCODEC | ||||
if (select->do_scene_detect) { | if (select->do_scene_detect) { | ||||
char buf[32]; | char buf[32]; | ||||
select->var_values[VAR_SCENE] = get_scene_score(ctx, ref); | |||||
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame); | |||||
// TODO: document metadata | // TODO: document metadata | ||||
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); | snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); | ||||
av_dict_set(&ref->metadata, "lavfi.scene_score", buf, 0); | |||||
av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0); | |||||
} | } | ||||
#endif | #endif | ||||
break; | break; | ||||
@@ -299,11 +299,10 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
res = av_expr_eval(select->expr, select->var_values, NULL); | res = av_expr_eval(select->expr, select->var_values, NULL); | ||||
av_log(inlink->dst, AV_LOG_DEBUG, | av_log(inlink->dst, AV_LOG_DEBUG, | ||||
"n:%f pts:%f t:%f pos:%f key:%d", | |||||
"n:%f pts:%f t:%f key:%d", | |||||
select->var_values[VAR_N], | select->var_values[VAR_N], | ||||
select->var_values[VAR_PTS], | select->var_values[VAR_PTS], | ||||
select->var_values[VAR_T], | select->var_values[VAR_T], | ||||
select->var_values[VAR_POS], | |||||
(int)select->var_values[VAR_KEY]); | (int)select->var_values[VAR_KEY]); | ||||
switch (inlink->type) { | switch (inlink->type) { | ||||
@@ -330,7 +329,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; | select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; | ||||
select->var_values[VAR_SELECTED_N] += 1.0; | select->var_values[VAR_SELECTED_N] += 1.0; | ||||
if (inlink->type == AVMEDIA_TYPE_AUDIO) | if (inlink->type == AVMEDIA_TYPE_AUDIO) | ||||
select->var_values[VAR_CONSUMED_SAMPLES_N] += ref->audio->nb_samples; | |||||
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples; | |||||
} | } | ||||
select->var_values[VAR_N] += 1.0; | select->var_values[VAR_N] += 1.0; | ||||
@@ -340,7 +339,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
return res; | return res; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
SelectContext *select = inlink->dst->priv; | SelectContext *select = inlink->dst->priv; | ||||
@@ -348,7 +347,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
if (select->select) | if (select->select) | ||||
return ff_filter_frame(inlink->dst->outputs[0], frame); | return ff_filter_frame(inlink->dst->outputs[0], frame); | ||||
avfilter_unref_bufferp(&frame); | |||||
av_frame_free(&frame); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -378,7 +377,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
#if CONFIG_AVCODEC | #if CONFIG_AVCODEC | ||||
if (select->do_scene_detect) { | if (select->do_scene_detect) { | ||||
avfilter_unref_bufferp(&select->prev_picref); | |||||
av_frame_free(&select->prev_picref); | |||||
if (select->avctx) { | if (select->avctx) { | ||||
avcodec_close(select->avctx); | avcodec_close(select->avctx); | ||||
av_freep(&select->avctx); | av_freep(&select->avctx); | ||||
@@ -448,7 +448,7 @@ static void av_cold uninit(AVFilterContext *ctx) | |||||
av_freep(&sendcmd->intervals); | av_freep(&sendcmd->intervals); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *ref) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
SendCmdContext *sendcmd = ctx->priv; | SendCmdContext *sendcmd = ctx->priv; | ||||
@@ -138,7 +138,7 @@ static inline char *double2int64str(char *buf, double v) | |||||
#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) | #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
SetPTSContext *setpts = inlink->dst->priv; | SetPTSContext *setpts = inlink->dst->priv; | ||||
int64_t in_pts = frame->pts; | int64_t in_pts = frame->pts; | ||||
@@ -150,16 +150,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
} | } | ||||
setpts->var_values[VAR_PTS ] = TS2D(frame->pts); | setpts->var_values[VAR_PTS ] = TS2D(frame->pts); | ||||
setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); | setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); | ||||
setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos; | |||||
setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); | |||||
setpts->var_values[VAR_RTCTIME ] = av_gettime(); | setpts->var_values[VAR_RTCTIME ] = av_gettime(); | ||||
switch (inlink->type) { | switch (inlink->type) { | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
setpts->var_values[VAR_INTERLACED] = frame->video->interlaced; | |||||
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; | |||||
break; | break; | ||||
case AVMEDIA_TYPE_AUDIO: | case AVMEDIA_TYPE_AUDIO: | ||||
setpts->var_values[VAR_NB_SAMPLES] = frame->audio->nb_samples; | |||||
setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; | |||||
break; | break; | ||||
} | } | ||||
@@ -192,7 +192,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); | setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); | ||||
setpts->var_values[VAR_N] += 1.0; | setpts->var_values[VAR_N] += 1.0; | ||||
if (setpts->type == AVMEDIA_TYPE_AUDIO) { | if (setpts->type == AVMEDIA_TYPE_AUDIO) { | ||||
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->audio->nb_samples; | |||||
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; | |||||
} | } | ||||
return ff_filter_frame(inlink->dst->outputs[0], frame); | return ff_filter_frame(inlink->dst->outputs[0], frame); | ||||
} | } | ||||
@@ -103,7 +103,7 @@ static int config_output_props(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
@@ -35,7 +35,7 @@ | |||||
#include "video.h" | #include "video.h" | ||||
typedef struct Buf { | typedef struct Buf { | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *frame; | |||||
struct Buf *next; | struct Buf *next; | ||||
} Buf; | } Buf; | ||||
@@ -47,8 +47,8 @@ typedef struct { | |||||
* When a specific number of output samples is requested, the partial | * When a specific number of output samples is requested, the partial | ||||
* buffer is stored here | * buffer is stored here | ||||
*/ | */ | ||||
AVFilterBufferRef *buf_out; | |||||
int allocated_samples; ///< number of samples buf_out was allocated for | |||||
AVFrame *out; | |||||
int allocated_samples; ///< number of samples out was allocated for | |||||
} FifoContext; | } FifoContext; | ||||
static av_cold int init(AVFilterContext *ctx, const char *args) | static av_cold int init(AVFilterContext *ctx, const char *args) | ||||
@@ -66,25 +66,25 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
for (buf = fifo->root.next; buf; buf = tmp) { | for (buf = fifo->root.next; buf; buf = tmp) { | ||||
tmp = buf->next; | tmp = buf->next; | ||||
avfilter_unref_bufferp(&buf->buf); | |||||
av_frame_free(&buf->frame); | |||||
av_free(buf); | av_free(buf); | ||||
} | } | ||||
avfilter_unref_bufferp(&fifo->buf_out); | |||||
av_frame_free(&fifo->out); | |||||
} | } | ||||
static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int add_to_queue(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
FifoContext *fifo = inlink->dst->priv; | FifoContext *fifo = inlink->dst->priv; | ||||
fifo->last->next = av_mallocz(sizeof(Buf)); | fifo->last->next = av_mallocz(sizeof(Buf)); | ||||
if (!fifo->last->next) { | if (!fifo->last->next) { | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&frame); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
fifo->last = fifo->last->next; | fifo->last = fifo->last->next; | ||||
fifo->last->buf = buf; | |||||
fifo->last->frame = frame; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -101,7 +101,7 @@ static void queue_pop(FifoContext *s) | |||||
/** | /** | ||||
* Move data pointers and pts offset samples forward. | * Move data pointers and pts offset samples forward. | ||||
*/ | */ | ||||
static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf, | |||||
static void buffer_offset(AVFilterLink *link, AVFrame *frame, | |||||
int offset) | int offset) | ||||
{ | { | ||||
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); | int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); | ||||
@@ -110,32 +110,32 @@ static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf, | |||||
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels); | int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels); | ||||
int i; | int i; | ||||
av_assert0(buf->audio->nb_samples > offset); | |||||
av_assert0(frame->nb_samples > offset); | |||||
for (i = 0; i < planes; i++) | for (i = 0; i < planes; i++) | ||||
buf->extended_data[i] += block_align*offset; | |||||
if (buf->data != buf->extended_data) | |||||
memcpy(buf->data, buf->extended_data, | |||||
FFMIN(planes, FF_ARRAY_ELEMS(buf->data)) * sizeof(*buf->data)); | |||||
buf->linesize[0] -= block_align*offset; | |||||
buf->audio->nb_samples -= offset; | |||||
if (buf->pts != AV_NOPTS_VALUE) { | |||||
buf->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate}, | |||||
link->time_base); | |||||
frame->extended_data[i] += block_align * offset; | |||||
if (frame->data != frame->extended_data) | |||||
memcpy(frame->data, frame->extended_data, | |||||
FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data)); | |||||
frame->linesize[0] -= block_align*offset; | |||||
frame->nb_samples -= offset; | |||||
if (frame->pts != AV_NOPTS_VALUE) { | |||||
frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate}, | |||||
link->time_base); | |||||
} | } | ||||
} | } | ||||
static int calc_ptr_alignment(AVFilterBufferRef *buf) | |||||
static int calc_ptr_alignment(AVFrame *frame) | |||||
{ | { | ||||
int planes = av_sample_fmt_is_planar(buf->format) ? | |||||
av_get_channel_layout_nb_channels(buf->audio->channel_layout) : 1; | |||||
int planes = av_sample_fmt_is_planar(frame->format) ? | |||||
av_get_channel_layout_nb_channels(frame->channel_layout) : 1; | |||||
int min_align = 128; | int min_align = 128; | ||||
int p; | int p; | ||||
for (p = 0; p < planes; p++) { | for (p = 0; p < planes; p++) { | ||||
int cur_align = 128; | int cur_align = 128; | ||||
while ((intptr_t)buf->extended_data[p] % cur_align) | |||||
while ((intptr_t)frame->extended_data[p] % cur_align) | |||||
cur_align >>= 1; | cur_align >>= 1; | ||||
if (cur_align < min_align) | if (cur_align < min_align) | ||||
min_align = cur_align; | min_align = cur_align; | ||||
@@ -147,35 +147,34 @@ static int return_audio_frame(AVFilterContext *ctx) | |||||
{ | { | ||||
AVFilterLink *link = ctx->outputs[0]; | AVFilterLink *link = ctx->outputs[0]; | ||||
FifoContext *s = ctx->priv; | FifoContext *s = ctx->priv; | ||||
AVFilterBufferRef *head = s->root.next->buf; | |||||
AVFilterBufferRef *buf_out; | |||||
AVFrame *head = s->root.next->frame; | |||||
AVFrame *out; | |||||
int ret; | int ret; | ||||
if (!s->buf_out && | |||||
head->audio->nb_samples >= link->request_samples && | |||||
if (!s->out && | |||||
head->nb_samples >= link->request_samples && | |||||
calc_ptr_alignment(head) >= 32) { | calc_ptr_alignment(head) >= 32) { | ||||
if (head->audio->nb_samples == link->request_samples) { | |||||
buf_out = head; | |||||
if (head->nb_samples == link->request_samples) { | |||||
out = head; | |||||
queue_pop(s); | queue_pop(s); | ||||
} else { | } else { | ||||
buf_out = avfilter_ref_buffer(head, AV_PERM_READ); | |||||
if (!buf_out) | |||||
out = av_frame_clone(head); | |||||
if (!out) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
buf_out->audio->nb_samples = link->request_samples; | |||||
out->nb_samples = link->request_samples; | |||||
buffer_offset(link, head, link->request_samples); | buffer_offset(link, head, link->request_samples); | ||||
} | } | ||||
} else { | } else { | ||||
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); | int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); | ||||
if (!s->buf_out) { | |||||
s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE, | |||||
link->request_samples); | |||||
if (!s->buf_out) | |||||
if (!s->out) { | |||||
s->out = ff_get_audio_buffer(link, link->request_samples); | |||||
if (!s->out) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
s->buf_out->audio->nb_samples = 0; | |||||
s->buf_out->pts = head->pts; | |||||
s->out->nb_samples = 0; | |||||
s->out->pts = head->pts; | |||||
s->allocated_samples = link->request_samples; | s->allocated_samples = link->request_samples; | ||||
} else if (link->request_samples != s->allocated_samples) { | } else if (link->request_samples != s->allocated_samples) { | ||||
av_log(ctx, AV_LOG_ERROR, "request_samples changed before the " | av_log(ctx, AV_LOG_ERROR, "request_samples changed before the " | ||||
@@ -183,41 +182,41 @@ static int return_audio_frame(AVFilterContext *ctx) | |||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
} | } | ||||
while (s->buf_out->audio->nb_samples < s->allocated_samples) { | |||||
int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples, | |||||
head->audio->nb_samples); | |||||
while (s->out->nb_samples < s->allocated_samples) { | |||||
int len = FFMIN(s->allocated_samples - s->out->nb_samples, | |||||
head->nb_samples); | |||||
av_samples_copy(s->buf_out->extended_data, head->extended_data, | |||||
s->buf_out->audio->nb_samples, 0, len, nb_channels, | |||||
av_samples_copy(s->out->extended_data, head->extended_data, | |||||
s->out->nb_samples, 0, len, nb_channels, | |||||
link->format); | link->format); | ||||
s->buf_out->audio->nb_samples += len; | |||||
s->out->nb_samples += len; | |||||
if (len == head->audio->nb_samples) { | |||||
avfilter_unref_buffer(head); | |||||
if (len == head->nb_samples) { | |||||
av_frame_free(&head); | |||||
queue_pop(s); | queue_pop(s); | ||||
if (!s->root.next && | if (!s->root.next && | ||||
(ret = ff_request_frame(ctx->inputs[0])) < 0) { | (ret = ff_request_frame(ctx->inputs[0])) < 0) { | ||||
if (ret == AVERROR_EOF) { | if (ret == AVERROR_EOF) { | ||||
av_samples_set_silence(s->buf_out->extended_data, | |||||
s->buf_out->audio->nb_samples, | |||||
av_samples_set_silence(s->out->extended_data, | |||||
s->out->nb_samples, | |||||
s->allocated_samples - | s->allocated_samples - | ||||
s->buf_out->audio->nb_samples, | |||||
s->out->nb_samples, | |||||
nb_channels, link->format); | nb_channels, link->format); | ||||
s->buf_out->audio->nb_samples = s->allocated_samples; | |||||
s->out->nb_samples = s->allocated_samples; | |||||
break; | break; | ||||
} | } | ||||
return ret; | return ret; | ||||
} | } | ||||
head = s->root.next->buf; | |||||
head = s->root.next->frame; | |||||
} else { | } else { | ||||
buffer_offset(link, head, len); | buffer_offset(link, head, len); | ||||
} | } | ||||
} | } | ||||
buf_out = s->buf_out; | |||||
s->buf_out = NULL; | |||||
out = s->out; | |||||
s->out = NULL; | |||||
} | } | ||||
return ff_filter_frame(link, buf_out); | |||||
return ff_filter_frame(link, out); | |||||
} | } | ||||
static int request_frame(AVFilterLink *outlink) | static int request_frame(AVFilterLink *outlink) | ||||
@@ -234,7 +233,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
if (outlink->request_samples) { | if (outlink->request_samples) { | ||||
return return_audio_frame(outlink->src); | return return_audio_frame(outlink->src); | ||||
} else { | } else { | ||||
ret = ff_filter_frame(outlink, fifo->root.next->buf); | |||||
ret = ff_filter_frame(outlink, fifo->root.next->frame); | |||||
queue_pop(fifo); | queue_pop(fifo); | ||||
} | } | ||||
@@ -247,7 +246,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.filter_frame = add_to_queue, | .filter_frame = add_to_queue, | ||||
.min_perms = AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -280,7 +278,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = { | |||||
.type = AVMEDIA_TYPE_AUDIO, | .type = AVMEDIA_TYPE_AUDIO, | ||||
.get_audio_buffer = ff_null_get_audio_buffer, | .get_audio_buffer = ff_null_get_audio_buffer, | ||||
.filter_frame = add_to_queue, | .filter_frame = add_to_queue, | ||||
.min_perms = AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -67,33 +67,13 @@ struct AVFilterPad { | |||||
*/ | */ | ||||
enum AVMediaType type; | enum AVMediaType type; | ||||
/** | |||||
* Minimum required permissions on incoming buffers. Any buffer with | |||||
* insufficient permissions will be automatically copied by the filter | |||||
* system to a new buffer which provides the needed access permissions. | |||||
* | |||||
* Input pads only. | |||||
*/ | |||||
int min_perms; | |||||
/** | |||||
* Permissions which are not accepted on incoming buffers. Any buffer | |||||
* which has any of these permissions set will be automatically copied | |||||
* by the filter system to a new buffer which does not have those | |||||
* permissions. This can be used to easily disallow buffers with | |||||
* AV_PERM_REUSE. | |||||
* | |||||
* Input pads only. | |||||
*/ | |||||
int rej_perms; | |||||
/** | /** | ||||
* Callback function to get a video buffer. If NULL, the filter system will | * Callback function to get a video buffer. If NULL, the filter system will | ||||
* use ff_default_get_video_buffer(). | * use ff_default_get_video_buffer(). | ||||
* | * | ||||
* Input video pads only. | * Input video pads only. | ||||
*/ | */ | ||||
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h); | |||||
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); | |||||
/** | /** | ||||
* Callback function to get an audio buffer. If NULL, the filter system will | * Callback function to get an audio buffer. If NULL, the filter system will | ||||
@@ -101,8 +81,7 @@ struct AVFilterPad { | |||||
* | * | ||||
* Input audio pads only. | * Input audio pads only. | ||||
*/ | */ | ||||
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, | |||||
int nb_samples); | |||||
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); | |||||
/** | /** | ||||
* Filtering callback. This is where a filter receives a frame with | * Filtering callback. This is where a filter receives a frame with | ||||
@@ -114,7 +93,7 @@ struct AVFilterPad { | |||||
* must ensure that samplesref is properly unreferenced on error if it | * must ensure that samplesref is properly unreferenced on error if it | ||||
* hasn't been passed on to another filter. | * hasn't been passed on to another filter. | ||||
*/ | */ | ||||
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame); | |||||
int (*filter_frame)(AVFilterLink *link, AVFrame *frame); | |||||
/** | /** | ||||
* Frame poll callback. This returns the number of immediately available | * Frame poll callback. This returns the number of immediately available | ||||
@@ -234,8 +213,6 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx); | |||||
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts); | void ff_update_link_current_pts(AVFilterLink *link, int64_t pts); | ||||
void ff_free_pool(AVFilterPool *pool); | |||||
void ff_command_queue_pop(AVFilterContext *filter); | void ff_command_queue_pop(AVFilterContext *filter); | ||||
/* misc trace functions */ | /* misc trace functions */ | ||||
@@ -252,7 +229,7 @@ void ff_command_queue_pop(AVFilterContext *filter); | |||||
char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms); | char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms); | ||||
void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end); | |||||
void ff_tlog_ref(void *ctx, AVFrame *ref, int end); | |||||
void ff_tlog_link(void *ctx, AVFilterLink *link, int end); | void ff_tlog_link(void *ctx, AVFilterLink *link, int end); | ||||
@@ -346,6 +323,6 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef ** | |||||
* @return >= 0 on success, a negative AVERROR on error. The receiving filter | * @return >= 0 on success, a negative AVERROR on error. The receiving filter | ||||
* is responsible for unreferencing frame in case of error. | * is responsible for unreferencing frame in case of error. | ||||
*/ | */ | ||||
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame); | |||||
int ff_filter_frame(AVFilterLink *link, AVFrame *frame); | |||||
#endif /* AVFILTER_INTERNAL_H */ | #endif /* AVFILTER_INTERNAL_H */ |
@@ -31,6 +31,8 @@ | |||||
#include "audio.h" | #include "audio.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "libavutil/audio_fifo.h" | |||||
AVBufferSinkParams *av_buffersink_params_alloc(void) | AVBufferSinkParams *av_buffersink_params_alloc(void) | ||||
{ | { | ||||
static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; | static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; | ||||
@@ -88,14 +90,14 @@ static av_cold void common_uninit(AVFilterContext *ctx) | |||||
if (buf->fifo) { | if (buf->fifo) { | ||||
while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) { | while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) { | ||||
av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL); | av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL); | ||||
avfilter_unref_buffer(picref); | |||||
av_frame_unref(picref); | |||||
} | } | ||||
av_fifo_free(buf->fifo); | av_fifo_free(buf->fifo); | ||||
buf->fifo = NULL; | buf->fifo = NULL; | ||||
} | } | ||||
} | } | ||||
static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref) | |||||
{ | { | ||||
BufferSinkContext *buf = ctx->priv; | BufferSinkContext *buf = ctx->priv; | ||||
@@ -114,7 +116,7 @@ static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *ref) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
BufferSinkContext *buf = inlink->dst->priv; | BufferSinkContext *buf = inlink->dst->priv; | ||||
@@ -141,18 +143,12 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size) | |||||
inlink->partial_buf_size = frame_size; | inlink->partial_buf_size = frame_size; | ||||
} | } | ||||
int av_buffersink_get_buffer_ref(AVFilterContext *ctx, | |||||
AVFilterBufferRef **bufref, int flags) | |||||
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) | |||||
{ | { | ||||
BufferSinkContext *buf = ctx->priv; | BufferSinkContext *buf = ctx->priv; | ||||
AVFilterLink *inlink = ctx->inputs[0]; | AVFilterLink *inlink = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
*bufref = NULL; | |||||
av_assert0( !strcmp(ctx->filter->name, "buffersink") | |||||
|| !strcmp(ctx->filter->name, "abuffersink") | |||||
|| !strcmp(ctx->filter->name, "ffbuffersink") | |||||
|| !strcmp(ctx->filter->name, "ffabuffersink")); | |||||
AVFrame *cur_frame; | |||||
/* no picref available, fetch it from the filterchain */ | /* no picref available, fetch it from the filterchain */ | ||||
if (!av_fifo_size(buf->fifo)) { | if (!av_fifo_size(buf->fifo)) { | ||||
@@ -165,13 +161,114 @@ int av_buffersink_get_buffer_ref(AVFilterContext *ctx, | |||||
if (!av_fifo_size(buf->fifo)) | if (!av_fifo_size(buf->fifo)) | ||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
if (flags & AV_BUFFERSINK_FLAG_PEEK) | |||||
*bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0)); | |||||
if (flags & AV_BUFFERSINK_FLAG_PEEK) { | |||||
cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0)); | |||||
av_frame_ref(frame, cur_frame); /* TODO check failure */ | |||||
} else { | |||||
av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL); | |||||
av_frame_move_ref(frame, cur_frame); | |||||
av_frame_free(&cur_frame); | |||||
} | |||||
return 0; | |||||
} | |||||
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) | |||||
{ | |||||
return av_buffersink_get_frame_flags(ctx, frame, 0); | |||||
} | |||||
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples) | |||||
{ | |||||
av_assert0(!"TODO"); | |||||
} | |||||
#if FF_API_AVFILTERBUFFER | |||||
static void compat_free_buffer(AVFilterBuffer *buf) | |||||
{ | |||||
AVFrame *frame = buf->priv; | |||||
av_frame_free(&frame); | |||||
av_free(buf); | |||||
} | |||||
static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags) | |||||
{ | |||||
AVFilterBufferRef *buf; | |||||
AVFrame *frame; | |||||
int ret; | |||||
if (!pbuf) | |||||
return ff_poll_frame(ctx->inputs[0]); | |||||
frame = av_frame_alloc(); | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | |||||
if (!nb_samples) | |||||
ret = av_buffersink_get_frame_flags(ctx, frame, flags); | |||||
else | else | ||||
av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL); | |||||
ret = av_buffersink_get_samples(ctx, frame, nb_samples); | |||||
if (ret < 0) | |||||
goto fail; | |||||
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { | |||||
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, | |||||
AV_PERM_READ, | |||||
frame->width, frame->height, | |||||
frame->format); | |||||
} else { | |||||
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, | |||||
frame->linesize[0], AV_PERM_READ, | |||||
frame->nb_samples, | |||||
frame->format, | |||||
frame->channel_layout); | |||||
} | |||||
if (!buf) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto fail; | |||||
} | |||||
avfilter_copy_frame_props(buf, frame); | |||||
buf->buf->priv = frame; | |||||
buf->buf->free = compat_free_buffer; | |||||
*pbuf = buf; | |||||
return 0; | return 0; | ||||
fail: | |||||
av_frame_free(&frame); | |||||
return ret; | |||||
} | |||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) | |||||
{ | |||||
return compat_read(ctx, buf, 0, 0); | |||||
} | |||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, | |||||
int nb_samples) | |||||
{ | |||||
return compat_read(ctx, buf, nb_samples, 0); | |||||
} | |||||
int av_buffersink_get_buffer_ref(AVFilterContext *ctx, | |||||
AVFilterBufferRef **bufref, int flags) | |||||
{ | |||||
BufferSinkContext *buf = ctx->priv; | |||||
AVFilterLink *inlink = ctx->inputs[0]; | |||||
int ret; | |||||
*bufref = NULL; | |||||
av_assert0( !strcmp(ctx->filter->name, "buffersink") | |||||
|| !strcmp(ctx->filter->name, "abuffersink") | |||||
|| !strcmp(ctx->filter->name, "ffbuffersink") | |||||
|| !strcmp(ctx->filter->name, "ffabuffersink")); | |||||
return compat_read(ctx, bufref, 0, flags); | |||||
} | } | ||||
#endif | |||||
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx) | AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx) | ||||
{ | { | ||||
@@ -406,94 +503,3 @@ AVFilter avfilter_asink_abuffersink = { | |||||
.inputs = abuffersink_inputs, | .inputs = abuffersink_inputs, | ||||
.outputs = NULL, | .outputs = NULL, | ||||
}; | }; | ||||
/* Libav compatibility API */ | |||||
extern AVFilter avfilter_vsink_buffer; | |||||
extern AVFilter avfilter_asink_abuffer; | |||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf) | |||||
{ | |||||
AVFilterBufferRef *tbuf; | |||||
int ret; | |||||
if (ctx->filter-> inputs[0].start_frame == | |||||
avfilter_vsink_buffer. inputs[0].start_frame || | |||||
ctx->filter-> inputs[0].filter_frame == | |||||
avfilter_asink_abuffer.inputs[0].filter_frame) | |||||
return ff_buffersink_read_compat(ctx, buf); | |||||
av_assert0(ctx->filter-> inputs[0].end_frame == | |||||
avfilter_vsink_ffbuffersink. inputs[0].end_frame || | |||||
ctx->filter-> inputs[0].filter_frame == | |||||
avfilter_asink_ffabuffersink.inputs[0].filter_frame); | |||||
ret = av_buffersink_get_buffer_ref(ctx, &tbuf, | |||||
buf ? 0 : AV_BUFFERSINK_FLAG_PEEK); | |||||
if (!buf) | |||||
return ret >= 0; | |||||
if (ret < 0) | |||||
return ret; | |||||
*buf = tbuf; | |||||
return 0; | |||||
} | |||||
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, | |||||
int nb_samples) | |||||
{ | |||||
BufferSinkContext *sink = ctx->priv; | |||||
int ret = 0, have_samples = 0, need_samples; | |||||
AVFilterBufferRef *tbuf, *in_buf; | |||||
AVFilterLink *link = ctx->inputs[0]; | |||||
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); | |||||
if (ctx->filter-> inputs[0].filter_frame == | |||||
avfilter_asink_abuffer.inputs[0].filter_frame) | |||||
return ff_buffersink_read_samples_compat(ctx, buf, nb_samples); | |||||
av_assert0(ctx->filter-> inputs[0].filter_frame == | |||||
avfilter_asink_ffabuffersink.inputs[0].filter_frame); | |||||
tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples); | |||||
if (!tbuf) | |||||
return AVERROR(ENOMEM); | |||||
while (have_samples < nb_samples) { | |||||
ret = av_buffersink_get_buffer_ref(ctx, &in_buf, | |||||
AV_BUFFERSINK_FLAG_PEEK); | |||||
if (ret < 0) { | |||||
if (ret == AVERROR_EOF && have_samples) { | |||||
nb_samples = have_samples; | |||||
ret = 0; | |||||
} | |||||
break; | |||||
} | |||||
need_samples = FFMIN(in_buf->audio->nb_samples, | |||||
nb_samples - have_samples); | |||||
av_samples_copy(tbuf->extended_data, in_buf->extended_data, | |||||
have_samples, 0, need_samples, | |||||
nb_channels, in_buf->format); | |||||
have_samples += need_samples; | |||||
if (need_samples < in_buf->audio->nb_samples) { | |||||
in_buf->audio->nb_samples -= need_samples; | |||||
av_samples_copy(in_buf->extended_data, in_buf->extended_data, | |||||
0, need_samples, in_buf->audio->nb_samples, | |||||
nb_channels, in_buf->format); | |||||
} else { | |||||
av_buffersink_get_buffer_ref(ctx, &in_buf, 0); | |||||
avfilter_unref_buffer(in_buf); | |||||
} | |||||
} | |||||
tbuf->audio->nb_samples = have_samples; | |||||
if (ret < 0) { | |||||
av_assert0(!av_fifo_size(sink->fifo)); | |||||
if (have_samples) | |||||
add_buffer_ref(ctx, tbuf); | |||||
else | |||||
avfilter_unref_buffer(tbuf); | |||||
return ret; | |||||
} | |||||
*buf = tbuf; | |||||
return 0; | |||||
} |
@@ -68,17 +68,17 @@ static void split_uninit(AVFilterContext *ctx) | |||||
av_freep(&ctx->output_pads[i].name); | av_freep(&ctx->output_pads[i].name); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
int i, ret = AVERROR_EOF; | int i, ret = AVERROR_EOF; | ||||
for (i = 0; i < ctx->nb_outputs; i++) { | for (i = 0; i < ctx->nb_outputs; i++) { | ||||
AVFilterBufferRef *buf_out; | |||||
AVFrame *buf_out; | |||||
if (ctx->outputs[i]->closed) | if (ctx->outputs[i]->closed) | ||||
continue; | continue; | ||||
buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE); | |||||
buf_out = av_frame_clone(frame); | |||||
if (!buf_out) { | if (!buf_out) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
break; | break; | ||||
@@ -88,7 +88,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
if (ret < 0) | if (ret < 0) | ||||
break; | break; | ||||
} | } | ||||
avfilter_unref_bufferp(&frame); | |||||
av_frame_free(&frame); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -1,123 +0,0 @@ | |||||
/* | |||||
* Copyright (c) 2008 Vitor Sessak | |||||
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram | |||||
* Copyright (c) 2011 Mina Nagy Zaki | |||||
* | |||||
* This file is part of FFmpeg. | |||||
* | |||||
* FFmpeg is free software; you can redistribute it and/or | |||||
* modify it under the terms of the GNU Lesser General Public | |||||
* License as published by the Free Software Foundation; either | |||||
* version 2.1 of the License, or (at your option) any later version. | |||||
* | |||||
* FFmpeg is distributed in the hope that it will be useful, | |||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
* Lesser General Public License for more details. | |||||
* | |||||
* You should have received a copy of the GNU Lesser General Public | |||||
* License along with FFmpeg; if not, write to the Free Software | |||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
*/ | |||||
/** | |||||
* @file | |||||
* memory buffer source filter | |||||
*/ | |||||
#include "avfilter.h" | |||||
#include "internal.h" | |||||
#include "audio.h" | |||||
#include "avcodec.h" | |||||
#include "buffersrc.h" | |||||
#include "asrc_abuffer.h" | |||||
#include "libavutil/avstring.h" | |||||
#include "libavutil/channel_layout.h" | |||||
#include "libavutil/fifo.h" | |||||
#include "libavutil/imgutils.h" | |||||
typedef struct { | |||||
AVFifoBuffer *fifo; | |||||
AVRational time_base; ///< time_base to set in the output link | |||||
int eof; | |||||
unsigned nb_failed_requests; | |||||
/* Video only */ | |||||
AVFilterContext *scale; | |||||
int h, w; | |||||
enum AVPixelFormat pix_fmt; | |||||
AVRational sample_aspect_ratio; | |||||
char sws_param[256]; | |||||
/* Audio only */ | |||||
// Audio format of incoming buffers | |||||
int sample_rate; | |||||
unsigned int sample_format; | |||||
int64_t channel_layout; | |||||
// Normalization filters | |||||
AVFilterContext *aconvert; | |||||
AVFilterContext *aresample; | |||||
} BufferSourceContext; | |||||
static void buf_free(AVFilterBuffer *ptr) | |||||
{ | |||||
av_free(ptr); | |||||
return; | |||||
} | |||||
int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx, | |||||
AVFilterBufferRef *samplesref, | |||||
int av_unused flags) | |||||
{ | |||||
return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY); | |||||
} | |||||
int av_asrc_buffer_add_samples(AVFilterContext *ctx, | |||||
uint8_t *data[8], int linesize[8], | |||||
int nb_samples, int sample_rate, | |||||
int sample_fmt, int64_t channel_layout, int planar, | |||||
int64_t pts, int av_unused flags) | |||||
{ | |||||
AVFilterBufferRef *samplesref; | |||||
if (!channel_layout) | |||||
return AVERROR(EINVAL); | |||||
samplesref = avfilter_get_audio_buffer_ref_from_arrays( | |||||
data, linesize[0], AV_PERM_WRITE, | |||||
nb_samples, | |||||
sample_fmt, channel_layout); | |||||
if (!samplesref) | |||||
return AVERROR(ENOMEM); | |||||
samplesref->buf->free = buf_free; | |||||
samplesref->pts = pts; | |||||
samplesref->audio->sample_rate = sample_rate; | |||||
AV_NOWARN_DEPRECATED( | |||||
return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0); | |||||
) | |||||
} | |||||
int av_asrc_buffer_add_buffer(AVFilterContext *ctx, | |||||
uint8_t *buf, int buf_size, int sample_rate, | |||||
int sample_fmt, int64_t channel_layout, int planar, | |||||
int64_t pts, int av_unused flags) | |||||
{ | |||||
uint8_t *data[8] = {0}; | |||||
int linesize[8]; | |||||
int nb_channels = av_get_channel_layout_nb_channels(channel_layout), | |||||
nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt); | |||||
av_samples_fill_arrays(data, linesize, | |||||
buf, nb_channels, nb_samples, | |||||
sample_fmt, 16); | |||||
AV_NOWARN_DEPRECATED( | |||||
return av_asrc_buffer_add_samples(ctx, | |||||
data, linesize, nb_samples, | |||||
sample_rate, | |||||
sample_fmt, channel_layout, planar, | |||||
pts, flags); | |||||
) | |||||
} |
@@ -313,11 +313,6 @@ static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, con | |||||
} | } | ||||
} | } | ||||
if (!(movie->frame = avcodec_alloc_frame()) ) { | |||||
av_log(log, AV_LOG_ERROR, "Failed to alloc frame\n"); | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", | av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", | ||||
movie->seek_point, movie->format_name, movie->file_name, | movie->seek_point, movie->format_name, movie->file_name, | ||||
movie->stream_index); | movie->stream_index); | ||||
@@ -339,7 +334,7 @@ static av_cold void movie_uninit(AVFilterContext *ctx) | |||||
av_freep(&movie->file_name); | av_freep(&movie->file_name); | ||||
av_freep(&movie->st); | av_freep(&movie->st); | ||||
av_freep(&movie->out_index); | av_freep(&movie->out_index); | ||||
avcodec_free_frame(&movie->frame); | |||||
av_frame_free(&movie->frame); | |||||
if (movie->format_ctx) | if (movie->format_ctx) | ||||
avformat_close_input(&movie->format_ctx); | avformat_close_input(&movie->format_ctx); | ||||
} | } | ||||
@@ -399,54 +394,34 @@ static int movie_config_output_props(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static AVFilterBufferRef *frame_to_buf(enum AVMediaType type, AVFrame *frame, | |||||
AVFilterLink *outlink) | |||||
{ | |||||
AVFilterBufferRef *buf, *copy; | |||||
buf = avfilter_get_buffer_ref_from_frame(type, frame, | |||||
AV_PERM_WRITE | | |||||
AV_PERM_PRESERVE | | |||||
AV_PERM_REUSE2); | |||||
if (!buf) | |||||
return NULL; | |||||
buf->pts = av_frame_get_best_effort_timestamp(frame); | |||||
copy = ff_copy_buffer_ref(outlink, buf); | |||||
if (!copy) | |||||
return NULL; | |||||
buf->buf->data[0] = NULL; /* it belongs to the frame */ | |||||
avfilter_unref_buffer(buf); | |||||
return copy; | |||||
} | |||||
static char *describe_bufref_to_str(char *dst, size_t dst_size, | |||||
AVFilterBufferRef *buf, | |||||
static char *describe_frame_to_str(char *dst, size_t dst_size, | |||||
AVFrame *frame, | |||||
AVFilterLink *link) | AVFilterLink *link) | ||||
{ | { | ||||
switch (buf->type) { | |||||
switch (frame->type) { | |||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
snprintf(dst, dst_size, | snprintf(dst, dst_size, | ||||
"video pts:%s time:%s pos:%"PRId64" size:%dx%d aspect:%d/%d", | |||||
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), | |||||
buf->pos, buf->video->w, buf->video->h, | |||||
buf->video->sample_aspect_ratio.num, | |||||
buf->video->sample_aspect_ratio.den); | |||||
"video pts:%s time:%s size:%dx%d aspect:%d/%d", | |||||
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base), | |||||
frame->width, frame->height, | |||||
frame->sample_aspect_ratio.num, | |||||
frame->sample_aspect_ratio.den); | |||||
break; | break; | ||||
case AVMEDIA_TYPE_AUDIO: | case AVMEDIA_TYPE_AUDIO: | ||||
snprintf(dst, dst_size, | snprintf(dst, dst_size, | ||||
"audio pts:%s time:%s pos:%"PRId64" samples:%d", | |||||
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base), | |||||
buf->pos, buf->audio->nb_samples); | |||||
"audio pts:%s time:%s samples:%d", | |||||
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base), | |||||
frame->nb_samples); | |||||
break; | break; | ||||
default: | default: | ||||
snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(buf->type)); | |||||
snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame->type)); | |||||
break; | break; | ||||
} | } | ||||
return dst; | return dst; | ||||
} | } | ||||
#define describe_bufref(buf, link) \ | |||||
describe_bufref_to_str((char[1024]){0}, 1024, buf, link) | |||||
#define describe_frameref(f, link) \ | |||||
describe_frame_to_str((char[1024]){0}, 1024, f, link) | |||||
static int rewind_file(AVFilterContext *ctx) | static int rewind_file(AVFilterContext *ctx) | ||||
{ | { | ||||
@@ -489,7 +464,6 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) | |||||
MovieStream *st; | MovieStream *st; | ||||
int ret, got_frame = 0, pkt_out_id; | int ret, got_frame = 0, pkt_out_id; | ||||
AVFilterLink *outlink; | AVFilterLink *outlink; | ||||
AVFilterBufferRef *buf; | |||||
if (!pkt->size) { | if (!pkt->size) { | ||||
if (movie->eof) { | if (movie->eof) { | ||||
@@ -532,6 +506,10 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) | |||||
st = &movie->st[pkt_out_id]; | st = &movie->st[pkt_out_id]; | ||||
outlink = ctx->outputs[pkt_out_id]; | outlink = ctx->outputs[pkt_out_id]; | ||||
movie->frame = av_frame_alloc(); | |||||
if (!movie->frame) | |||||
return AVERROR(ENOMEM); | |||||
switch (st->st->codec->codec_type) { | switch (st->st->codec->codec_type) { | ||||
case AVMEDIA_TYPE_VIDEO: | case AVMEDIA_TYPE_VIDEO: | ||||
ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt); | ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt); | ||||
@@ -545,6 +523,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) | |||||
} | } | ||||
if (ret < 0) { | if (ret < 0) { | ||||
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret)); | av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret)); | ||||
av_frame_free(&movie->frame); | |||||
return 0; | return 0; | ||||
} | } | ||||
if (!ret) | if (!ret) | ||||
@@ -560,23 +539,16 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id) | |||||
if (!got_frame) { | if (!got_frame) { | ||||
if (!ret) | if (!ret) | ||||
st->done = 1; | st->done = 1; | ||||
av_frame_free(&movie->frame); | |||||
return 0; | return 0; | ||||
} | } | ||||
buf = frame_to_buf(st->st->codec->codec_type, movie->frame, outlink); | |||||
if (!buf) | |||||
return AVERROR(ENOMEM); | |||||
av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name, | av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name, | ||||
describe_bufref(buf, outlink)); | |||||
switch (st->st->codec->codec_type) { | |||||
case AVMEDIA_TYPE_VIDEO: | |||||
if (!movie->frame->sample_aspect_ratio.num) | |||||
buf->video->sample_aspect_ratio = st->st->sample_aspect_ratio; | |||||
/* Fall through */ | |||||
case AVMEDIA_TYPE_AUDIO: | |||||
ff_filter_frame(outlink, buf); | |||||
break; | |||||
} | |||||
describe_frameref(movie->frame, outlink)); | |||||
movie->frame->pts = av_frame_get_best_effort_timestamp(movie->frame); | |||||
ff_filter_frame(outlink, movie->frame); // FIXME: raise error properly | |||||
movie->frame = NULL; | |||||
return pkt_out_id == out_id; | return pkt_out_id == out_id; | ||||
} | } | ||||
@@ -60,5 +60,8 @@ | |||||
#ifndef FF_API_BUFFERSRC_BUFFER | #ifndef FF_API_BUFFERSRC_BUFFER | ||||
#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) | #define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) | ||||
#endif | #endif | ||||
#ifndef FF_API_AVFILTERBUFFER | |||||
#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) | |||||
#endif | |||||
#endif /* AVFILTER_VERSION_H */ | #endif /* AVFILTER_VERSION_H */ |
@@ -60,19 +60,18 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *cur_buf) | |||||
{ | { | ||||
AlphaExtractContext *extract = inlink->dst->priv; | AlphaExtractContext *extract = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out_buf = | |||||
ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
AVFrame *out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
int ret; | int ret; | ||||
if (!out_buf) { | if (!out_buf) { | ||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
goto end; | goto end; | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out_buf, cur_buf); | |||||
av_frame_copy_props(out_buf, cur_buf); | |||||
if (extract->is_packed_rgb) { | if (extract->is_packed_rgb) { | ||||
int x, y; | int x, y; | ||||
@@ -99,7 +98,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf) | |||||
ret = ff_filter_frame(outlink, out_buf); | ret = ff_filter_frame(outlink, out_buf); | ||||
end: | end: | ||||
avfilter_unref_buffer(cur_buf); | |||||
av_frame_unref(cur_buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -109,7 +108,6 @@ static const AVFilterPad alphaextract_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -96,11 +96,11 @@ static int config_output(AVFilterLink *outlink) | |||||
} | } | ||||
static void draw_frame(AVFilterContext *ctx, | static void draw_frame(AVFilterContext *ctx, | ||||
AVFilterBufferRef *main_buf, | |||||
AVFilterBufferRef *alpha_buf) | |||||
AVFrame *main_buf, | |||||
AVFrame *alpha_buf) | |||||
{ | { | ||||
AlphaMergeContext *merge = ctx->priv; | AlphaMergeContext *merge = ctx->priv; | ||||
int h = main_buf->video->h; | |||||
int h = main_buf->height; | |||||
if (merge->is_packed_rgb) { | if (merge->is_packed_rgb) { | ||||
int x, y; | int x, y; | ||||
@@ -108,7 +108,7 @@ static void draw_frame(AVFilterContext *ctx, | |||||
for (y = 0; y < h; y++) { | for (y = 0; y < h; y++) { | ||||
pin = alpha_buf->data[0] + y * alpha_buf->linesize[0]; | pin = alpha_buf->data[0] + y * alpha_buf->linesize[0]; | ||||
pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A]; | pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A]; | ||||
for (x = 0; x < main_buf->video->w; x++) { | |||||
for (x = 0; x < main_buf->width; x++) { | |||||
*pout = *pin; | *pout = *pin; | ||||
pin += 1; | pin += 1; | ||||
pout += 4; | pout += 4; | ||||
@@ -118,7 +118,7 @@ static void draw_frame(AVFilterContext *ctx, | |||||
int y; | int y; | ||||
const int main_linesize = main_buf->linesize[A]; | const int main_linesize = main_buf->linesize[A]; | ||||
const int alpha_linesize = alpha_buf->linesize[Y]; | const int alpha_linesize = alpha_buf->linesize[Y]; | ||||
for (y = 0; y < h && y < alpha_buf->video->h; y++) { | |||||
for (y = 0; y < h && y < alpha_buf->height; y++) { | |||||
memcpy(main_buf->data[A] + y * main_linesize, | memcpy(main_buf->data[A] + y * main_linesize, | ||||
alpha_buf->data[Y] + y * alpha_linesize, | alpha_buf->data[Y] + y * alpha_linesize, | ||||
FFMIN(main_linesize, alpha_linesize)); | FFMIN(main_linesize, alpha_linesize)); | ||||
@@ -126,7 +126,7 @@ static void draw_frame(AVFilterContext *ctx, | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AlphaMergeContext *merge = ctx->priv; | AlphaMergeContext *merge = ctx->priv; | ||||
@@ -137,7 +137,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
ff_bufqueue_add(ctx, queue, buf); | ff_bufqueue_add(ctx, queue, buf); | ||||
while (1) { | while (1) { | ||||
AVFilterBufferRef *main_buf, *alpha_buf; | |||||
AVFrame *main_buf, *alpha_buf; | |||||
if (!ff_bufqueue_peek(&merge->queue_main, 0) || | if (!ff_bufqueue_peek(&merge->queue_main, 0) || | ||||
!ff_bufqueue_peek(&merge->queue_alpha, 0)) break; | !ff_bufqueue_peek(&merge->queue_alpha, 0)) break; | ||||
@@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
merge->frame_requested = 0; | merge->frame_requested = 0; | ||||
draw_frame(ctx, main_buf, alpha_buf); | draw_frame(ctx, main_buf, alpha_buf); | ||||
ff_filter_frame(ctx->outputs[0], main_buf); | ff_filter_frame(ctx->outputs[0], main_buf); | ||||
avfilter_unref_buffer(alpha_buf); | |||||
av_frame_free(&alpha_buf); | |||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -80,11 +80,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *c | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
AspectContext *aspect = link->dst->priv; | AspectContext *aspect = link->dst->priv; | ||||
frame->video->sample_aspect_ratio = aspect->ratio; | |||||
frame->sample_aspect_ratio = aspect->ratio; | |||||
return ff_filter_frame(link->dst->outputs[0], frame); | return ff_filter_frame(link->dst->outputs[0], frame); | ||||
} | } | ||||
@@ -56,7 +56,7 @@ static int query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
BBoxContext *bbox = ctx->priv; | BBoxContext *bbox = ctx->priv; | ||||
@@ -65,14 +65,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) | |||||
has_bbox = | has_bbox = | ||||
ff_calculate_bounding_box(&box, | ff_calculate_bounding_box(&box, | ||||
picref->data[0], picref->linesize[0], | |||||
frame->data[0], frame->linesize[0], | |||||
inlink->w, inlink->h, 16); | inlink->w, inlink->h, 16); | ||||
w = box.x2 - box.x1 + 1; | w = box.x2 - box.x1 + 1; | ||||
h = box.y2 - box.y1 + 1; | h = box.y2 - box.y1 + 1; | ||||
av_log(ctx, AV_LOG_INFO, | av_log(ctx, AV_LOG_INFO, | ||||
"n:%d pts:%s pts_time:%s", bbox->frame, | "n:%d pts:%s pts_time:%s", bbox->frame, | ||||
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base)); | |||||
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); | |||||
if (has_bbox) { | if (has_bbox) { | ||||
av_log(ctx, AV_LOG_INFO, | av_log(ctx, AV_LOG_INFO, | ||||
@@ -85,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) | |||||
av_log(ctx, AV_LOG_INFO, "\n"); | av_log(ctx, AV_LOG_INFO, "\n"); | ||||
bbox->frame++; | bbox->frame++; | ||||
return ff_filter_frame(inlink->dst->outputs[0], picref); | |||||
return ff_filter_frame(inlink->dst->outputs[0], frame); | |||||
} | } | ||||
static const AVFilterPad bbox_inputs[] = { | static const AVFilterPad bbox_inputs[] = { | ||||
@@ -146,7 +146,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *picref) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
BlackDetectContext *blackdetect = ctx->priv; | BlackDetectContext *blackdetect = ctx->priv; | ||||
@@ -163,10 +163,10 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) | |||||
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h); | picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h); | ||||
av_log(ctx, AV_LOG_DEBUG, | av_log(ctx, AV_LOG_DEBUG, | ||||
"frame:%u picture_black_ratio:%f pos:%"PRId64" pts:%s t:%s type:%c\n", | |||||
"frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n", | |||||
blackdetect->frame_count, picture_black_ratio, | blackdetect->frame_count, picture_black_ratio, | ||||
picref->pos, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), | |||||
av_get_picture_type_char(picref->video->pict_type)); | |||||
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), | |||||
av_get_picture_type_char(picref->pict_type)); | |||||
if (picture_black_ratio >= blackdetect->picture_black_ratio_th) { | if (picture_black_ratio >= blackdetect->picture_black_ratio_th) { | ||||
if (!blackdetect->black_started) { | if (!blackdetect->black_started) { | ||||
@@ -81,7 +81,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
BlackFrameContext *blackframe = ctx->priv; | BlackFrameContext *blackframe = ctx->priv; | ||||
@@ -89,22 +89,22 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
int pblack = 0; | int pblack = 0; | ||||
uint8_t *p = frame->data[0]; | uint8_t *p = frame->data[0]; | ||||
for (i = 0; i < frame->video->h; i++) { | |||||
for (i = 0; i < frame->height; i++) { | |||||
for (x = 0; x < inlink->w; x++) | for (x = 0; x < inlink->w; x++) | ||||
blackframe->nblack += p[x] < blackframe->bthresh; | blackframe->nblack += p[x] < blackframe->bthresh; | ||||
p += frame->linesize[0]; | p += frame->linesize[0]; | ||||
} | } | ||||
if (frame->video->key_frame) | |||||
if (frame->key_frame) | |||||
blackframe->last_keyframe = blackframe->frame; | blackframe->last_keyframe = blackframe->frame; | ||||
pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); | pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); | ||||
if (pblack >= blackframe->bamount) | if (pblack >= blackframe->bamount) | ||||
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f " | |||||
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f " | |||||
"type:%c last_keyframe:%d\n", | "type:%c last_keyframe:%d\n", | ||||
blackframe->frame, pblack, frame->pos, frame->pts, | |||||
blackframe->frame, pblack, frame->pts, | |||||
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), | frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), | ||||
av_get_picture_type_char(frame->video->pict_type), blackframe->last_keyframe); | |||||
av_get_picture_type_char(frame->pict_type), blackframe->last_keyframe); | |||||
blackframe->frame++; | blackframe->frame++; | ||||
blackframe->nblack = 0; | blackframe->nblack = 0; | ||||
@@ -368,9 +368,9 @@ static int request_frame(AVFilterLink *outlink) | |||||
} | } | ||||
static void blend_frame(AVFilterContext *ctx, | static void blend_frame(AVFilterContext *ctx, | ||||
AVFilterBufferRef *top_buf, | |||||
AVFilterBufferRef *bottom_buf, | |||||
AVFilterBufferRef *dst_buf) | |||||
AVFrame *top_buf, | |||||
AVFrame *bottom_buf, | |||||
AVFrame *dst_buf) | |||||
{ | { | ||||
BlendContext *b = ctx->priv; | BlendContext *b = ctx->priv; | ||||
AVFilterLink *inlink = ctx->inputs[0]; | AVFilterLink *inlink = ctx->inputs[0]; | ||||
@@ -380,8 +380,8 @@ static void blend_frame(AVFilterContext *ctx, | |||||
for (plane = 0; dst_buf->data[plane]; plane++) { | for (plane = 0; dst_buf->data[plane]; plane++) { | ||||
int hsub = plane == 1 || plane == 2 ? b->hsub : 0; | int hsub = plane == 1 || plane == 2 ? b->hsub : 0; | ||||
int vsub = plane == 1 || plane == 2 ? b->vsub : 0; | int vsub = plane == 1 || plane == 2 ? b->vsub : 0; | ||||
int outw = dst_buf->video->w >> hsub; | |||||
int outh = dst_buf->video->h >> vsub; | |||||
int outw = dst_buf->width >> hsub; | |||||
int outh = dst_buf->height >> vsub; | |||||
uint8_t *dst = dst_buf->data[plane]; | uint8_t *dst = dst_buf->data[plane]; | ||||
uint8_t *top = top_buf->data[plane]; | uint8_t *top = top_buf->data[plane]; | ||||
uint8_t *bottom = bottom_buf->data[plane]; | uint8_t *bottom = bottom_buf->data[plane]; | ||||
@@ -390,15 +390,15 @@ static void blend_frame(AVFilterContext *ctx, | |||||
param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base); | param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base); | ||||
param->values[VAR_W] = outw; | param->values[VAR_W] = outw; | ||||
param->values[VAR_H] = outh; | param->values[VAR_H] = outh; | ||||
param->values[VAR_SW] = outw / dst_buf->video->w; | |||||
param->values[VAR_SH] = outh / dst_buf->video->h; | |||||
param->values[VAR_SW] = outw / dst_buf->width; | |||||
param->values[VAR_SH] = outh / dst_buf->height; | |||||
param->blend(top, top_buf->linesize[plane], | param->blend(top, top_buf->linesize[plane], | ||||
bottom, bottom_buf->linesize[plane], | bottom, bottom_buf->linesize[plane], | ||||
dst, dst_buf->linesize[plane], outw, outh, param); | dst, dst_buf->linesize[plane], outw, outh, param); | ||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
@@ -411,7 +411,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
ff_bufqueue_add(ctx, queue, buf); | ff_bufqueue_add(ctx, queue, buf); | ||||
while (1) { | while (1) { | ||||
AVFilterBufferRef *top_buf, *bottom_buf, *out_buf; | |||||
AVFrame *top_buf, *bottom_buf, *out_buf; | |||||
if (!ff_bufqueue_peek(&b->queue_top, TOP) || | if (!ff_bufqueue_peek(&b->queue_top, TOP) || | ||||
!ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break; | !ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break; | ||||
@@ -419,18 +419,17 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
top_buf = ff_bufqueue_get(&b->queue_top); | top_buf = ff_bufqueue_get(&b->queue_top); | ||||
bottom_buf = ff_bufqueue_get(&b->queue_bottom); | bottom_buf = ff_bufqueue_get(&b->queue_bottom); | ||||
out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, | |||||
outlink->w, outlink->h); | |||||
out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out_buf) { | if (!out_buf) { | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out_buf, top_buf); | |||||
av_frame_copy_props(out_buf, top_buf); | |||||
b->frame_requested = 0; | b->frame_requested = 0; | ||||
blend_frame(ctx, top_buf, bottom_buf, out_buf); | blend_frame(ctx, top_buf, bottom_buf, out_buf); | ||||
ret = ff_filter_frame(ctx->outputs[0], out_buf); | ret = ff_filter_frame(ctx->outputs[0], out_buf); | ||||
avfilter_unref_buffer(top_buf); | |||||
avfilter_unref_buffer(bottom_buf); | |||||
av_frame_free(&top_buf); | |||||
av_frame_free(&bottom_buf); | |||||
} | } | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -441,12 +440,10 @@ static const AVFilterPad blend_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input_top, | .config_props = config_input_top, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
},{ | },{ | ||||
.name = "bottom", | .name = "bottom", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -328,23 +328,23 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li | |||||
h, radius, power, temp); | h, radius, power, temp); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
BoxBlurContext *boxblur = ctx->priv; | BoxBlurContext *boxblur = ctx->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int plane; | int plane; | ||||
int cw = inlink->w >> boxblur->hsub, ch = in->video->h >> boxblur->vsub; | |||||
int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub; | |||||
int w[4] = { inlink->w, cw, cw, inlink->w }; | int w[4] = { inlink->w, cw, cw, inlink->w }; | ||||
int h[4] = { in->video->h, ch, ch, in->video->h }; | |||||
int h[4] = { in->height, ch, ch, in->height }; | |||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
for (plane = 0; in->data[plane] && plane < 4; plane++) | for (plane = 0; in->data[plane] && plane < 4; plane++) | ||||
hblur(out->data[plane], out->linesize[plane], | hblur(out->data[plane], out->linesize[plane], | ||||
@@ -358,7 +358,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], | w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], | ||||
boxblur->temp); | boxblur->temp); | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -369,7 +369,6 @@ static const AVFilterPad avfilter_vf_boxblur_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -183,12 +183,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
} | } | ||||
static void process_frame_uyvy422(ColorMatrixContext *color, | static void process_frame_uyvy422(ColorMatrixContext *color, | ||||
AVFilterBufferRef *dst, AVFilterBufferRef *src) | |||||
AVFrame *dst, AVFrame *src) | |||||
{ | { | ||||
const unsigned char *srcp = src->data[0]; | const unsigned char *srcp = src->data[0]; | ||||
const int src_pitch = src->linesize[0]; | const int src_pitch = src->linesize[0]; | ||||
const int height = src->video->h; | |||||
const int width = src->video->w*2; | |||||
const int height = src->height; | |||||
const int width = src->width*2; | |||||
unsigned char *dstp = dst->data[0]; | unsigned char *dstp = dst->data[0]; | ||||
const int dst_pitch = dst->linesize[0]; | const int dst_pitch = dst->linesize[0]; | ||||
const int c2 = color->yuv_convert[color->mode][0][1]; | const int c2 = color->yuv_convert[color->mode][0][1]; | ||||
@@ -215,15 +215,15 @@ static void process_frame_uyvy422(ColorMatrixContext *color, | |||||
} | } | ||||
static void process_frame_yuv422p(ColorMatrixContext *color, | static void process_frame_yuv422p(ColorMatrixContext *color, | ||||
AVFilterBufferRef *dst, AVFilterBufferRef *src) | |||||
AVFrame *dst, AVFrame *src) | |||||
{ | { | ||||
const unsigned char *srcpU = src->data[1]; | const unsigned char *srcpU = src->data[1]; | ||||
const unsigned char *srcpV = src->data[2]; | const unsigned char *srcpV = src->data[2]; | ||||
const unsigned char *srcpY = src->data[0]; | const unsigned char *srcpY = src->data[0]; | ||||
const int src_pitchY = src->linesize[0]; | const int src_pitchY = src->linesize[0]; | ||||
const int src_pitchUV = src->linesize[1]; | const int src_pitchUV = src->linesize[1]; | ||||
const int height = src->video->h; | |||||
const int width = src->video->w; | |||||
const int height = src->height; | |||||
const int width = src->width; | |||||
unsigned char *dstpU = dst->data[1]; | unsigned char *dstpU = dst->data[1]; | ||||
unsigned char *dstpV = dst->data[2]; | unsigned char *dstpV = dst->data[2]; | ||||
unsigned char *dstpY = dst->data[0]; | unsigned char *dstpY = dst->data[0]; | ||||
@@ -257,7 +257,7 @@ static void process_frame_yuv422p(ColorMatrixContext *color, | |||||
} | } | ||||
static void process_frame_yuv420p(ColorMatrixContext *color, | static void process_frame_yuv420p(ColorMatrixContext *color, | ||||
AVFilterBufferRef *dst, AVFilterBufferRef *src) | |||||
AVFrame *dst, AVFrame *src) | |||||
{ | { | ||||
const unsigned char *srcpU = src->data[1]; | const unsigned char *srcpU = src->data[1]; | ||||
const unsigned char *srcpV = src->data[2]; | const unsigned char *srcpV = src->data[2]; | ||||
@@ -265,8 +265,8 @@ static void process_frame_yuv420p(ColorMatrixContext *color, | |||||
const unsigned char *srcpN = src->data[0] + src->linesize[0]; | const unsigned char *srcpN = src->data[0] + src->linesize[0]; | ||||
const int src_pitchY = src->linesize[0]; | const int src_pitchY = src->linesize[0]; | ||||
const int src_pitchUV = src->linesize[1]; | const int src_pitchUV = src->linesize[1]; | ||||
const int height = src->video->h; | |||||
const int width = src->video->w; | |||||
const int height = src->height; | |||||
const int width = src->width; | |||||
unsigned char *dstpU = dst->data[1]; | unsigned char *dstpU = dst->data[1]; | ||||
unsigned char *dstpV = dst->data[2]; | unsigned char *dstpV = dst->data[2]; | ||||
unsigned char *dstpY = dst->data[0]; | unsigned char *dstpY = dst->data[0]; | ||||
@@ -332,19 +332,19 @@ static int query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
ColorMatrixContext *color = ctx->priv; | ColorMatrixContext *color = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
if (in->format == AV_PIX_FMT_YUV422P) | if (in->format == AV_PIX_FMT_YUV422P) | ||||
process_frame_yuv422p(color, out, in); | process_frame_yuv422p(color, out, in); | ||||
@@ -353,7 +353,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
else | else | ||||
process_frame_uyvy422(color, out, in); | process_frame_uyvy422(color, out, in); | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -362,7 +362,6 @@ static const AVFilterPad colormatrix_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.min_perms = AV_PERM_READ, | |||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
@@ -21,17 +21,35 @@ | |||||
* copy video filter | * copy video filter | ||||
*/ | */ | ||||
#include "libavutil/imgutils.h" | |||||
#include "libavutil/internal.h" | #include "libavutil/internal.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | |||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | |||||
AVFrame *out = ff_get_video_buffer(outlink, in->width, in->height); | |||||
if (!out) { | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
av_frame_copy_props(out, in); | |||||
av_image_copy(out->data, out->linesize, in->data, in->linesize, | |||||
in->format, in->width, in->height); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | |||||
} | |||||
static const AVFilterPad avfilter_vf_copy_inputs[] = { | static const AVFilterPad avfilter_vf_copy_inputs[] = { | ||||
{ | { | ||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.rej_perms = ~0 | |||||
.filter_frame = filter_frame, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -70,7 +70,6 @@ enum var_name { | |||||
VAR_X, | VAR_X, | ||||
VAR_Y, | VAR_Y, | ||||
VAR_N, | VAR_N, | ||||
VAR_POS, | |||||
VAR_T, | VAR_T, | ||||
VAR_VARS_NB | VAR_VARS_NB | ||||
}; | }; | ||||
@@ -198,7 +197,6 @@ static int config_input(AVFilterLink *link) | |||||
crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN; | crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN; | ||||
crop->var_values[VAR_N] = 0; | crop->var_values[VAR_N] = 0; | ||||
crop->var_values[VAR_T] = NAN; | crop->var_values[VAR_T] = NAN; | ||||
crop->var_values[VAR_POS] = NAN; | |||||
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); | av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); | ||||
crop->hsub = pix_desc->log2_chroma_w; | crop->hsub = pix_desc->log2_chroma_w; | ||||
@@ -277,19 +275,18 @@ static int config_output(AVFilterLink *link) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
CropContext *crop = ctx->priv; | CropContext *crop = ctx->priv; | ||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | ||||
int i; | int i; | ||||
frame->video->w = crop->w; | |||||
frame->video->h = crop->h; | |||||
frame->width = crop->w; | |||||
frame->height = crop->h; | |||||
crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? | crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? | ||||
NAN : frame->pts * av_q2d(link->time_base); | NAN : frame->pts * av_q2d(link->time_base); | ||||
crop->var_values[VAR_POS] = frame->pos == -1 ? NAN : frame->pos; | |||||
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); | crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); | ||||
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL); | crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL); | ||||
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); | crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL); | ||||
@@ -117,7 +117,7 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
CropDetectContext *cd = ctx->priv; | CropDetectContext *cd = ctx->priv; | ||||
@@ -128,36 +128,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
if (++cd->frame_nb > 0) { | if (++cd->frame_nb > 0) { | ||||
// Reset the crop area every reset_count frames, if reset_count is > 0 | // Reset the crop area every reset_count frames, if reset_count is > 0 | ||||
if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) { | if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) { | ||||
cd->x1 = frame->video->w-1; | |||||
cd->y1 = frame->video->h-1; | |||||
cd->x1 = frame->width - 1; | |||||
cd->y1 = frame->height - 1; | |||||
cd->x2 = 0; | cd->x2 = 0; | ||||
cd->y2 = 0; | cd->y2 = 0; | ||||
cd->frame_nb = 1; | cd->frame_nb = 1; | ||||
} | } | ||||
for (y = 0; y < cd->y1; y++) { | for (y = 0; y < cd->y1; y++) { | ||||
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { | |||||
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) { | |||||
cd->y1 = y; | cd->y1 = y; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
for (y = frame->video->h-1; y > cd->y2; y--) { | |||||
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) { | |||||
for (y = frame->height - 1; y > cd->y2; y--) { | |||||
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) { | |||||
cd->y2 = y; | cd->y2 = y; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
for (y = 0; y < cd->x1; y++) { | for (y = 0; y < cd->x1; y++) { | ||||
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { | |||||
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) { | |||||
cd->x1 = y; | cd->x1 = y; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
for (y = frame->video->w-1; y > cd->x2; y--) { | |||||
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) { | |||||
for (y = frame->width - 1; y > cd->x2; y--) { | |||||
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) { | |||||
cd->x2 = y; | cd->x2 = y; | ||||
break; | break; | ||||
} | } | ||||
@@ -187,8 +187,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
y += (shrink_by/2 + 1) & ~1; | y += (shrink_by/2 + 1) & ~1; | ||||
av_log(ctx, AV_LOG_INFO, | av_log(ctx, AV_LOG_INFO, | ||||
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", | |||||
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pos, frame->pts, | |||||
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", | |||||
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pts, | |||||
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), | frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base), | ||||
w, h, x, y); | w, h, x, y); | ||||
} | } | ||||
@@ -47,7 +47,7 @@ typedef struct { | |||||
///< if negative: number of sequential frames which were not dropped | ///< if negative: number of sequential frames which were not dropped | ||||
int hsub, vsub; ///< chroma subsampling values | int hsub, vsub; ///< chroma subsampling values | ||||
AVFilterBufferRef *ref; ///< reference picture | |||||
AVFrame *ref; ///< reference picture | |||||
DSPContext dspctx; ///< context providing optimized diff routines | DSPContext dspctx; ///< context providing optimized diff routines | ||||
AVCodecContext *avctx; ///< codec context required for the DSPContext | AVCodecContext *avctx; ///< codec context required for the DSPContext | ||||
} DecimateContext; | } DecimateContext; | ||||
@@ -105,7 +105,7 @@ static int diff_planes(AVFilterContext *ctx, | |||||
* different with respect to the reference frame ref. | * different with respect to the reference frame ref. | ||||
*/ | */ | ||||
static int decimate_frame(AVFilterContext *ctx, | static int decimate_frame(AVFilterContext *ctx, | ||||
AVFilterBufferRef *cur, AVFilterBufferRef *ref) | |||||
AVFrame *cur, AVFrame *ref) | |||||
{ | { | ||||
DecimateContext *decimate = ctx->priv; | DecimateContext *decimate = ctx->priv; | ||||
int plane; | int plane; | ||||
@@ -122,7 +122,7 @@ static int decimate_frame(AVFilterContext *ctx, | |||||
int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0; | int hsub = plane == 1 || plane == 2 ? decimate->hsub : 0; | ||||
if (diff_planes(ctx, | if (diff_planes(ctx, | ||||
cur->data[plane], ref->data[plane], ref->linesize[plane], | cur->data[plane], ref->data[plane], ref->linesize[plane], | ||||
ref->video->w>>hsub, ref->video->h>>vsub)) | |||||
ref->width>>hsub, ref->height>>vsub)) | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -155,7 +155,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
DecimateContext *decimate = ctx->priv; | DecimateContext *decimate = ctx->priv; | ||||
avfilter_unref_bufferp(&decimate->ref); | |||||
av_frame_free(&decimate->ref); | |||||
avcodec_close(decimate->avctx); | avcodec_close(decimate->avctx); | ||||
av_opt_free(decimate); | av_opt_free(decimate); | ||||
av_freep(&decimate->avctx); | av_freep(&decimate->avctx); | ||||
@@ -189,7 +189,7 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *cur) | |||||
{ | { | ||||
DecimateContext *decimate = inlink->dst->priv; | DecimateContext *decimate = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
@@ -198,11 +198,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) | |||||
if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) { | if (decimate->ref && decimate_frame(inlink->dst, cur, decimate->ref)) { | ||||
decimate->drop_count = FFMAX(1, decimate->drop_count+1); | decimate->drop_count = FFMAX(1, decimate->drop_count+1); | ||||
} else { | } else { | ||||
avfilter_unref_buffer(decimate->ref); | |||||
av_frame_free(&decimate->ref); | |||||
decimate->ref = cur; | decimate->ref = cur; | ||||
decimate->drop_count = FFMIN(-1, decimate->drop_count-1); | decimate->drop_count = FFMIN(-1, decimate->drop_count-1); | ||||
if (ret = ff_filter_frame(outlink, avfilter_ref_buffer(cur, ~AV_PERM_WRITE)) < 0) | |||||
if (ret = ff_filter_frame(outlink, av_frame_clone(cur)) < 0) | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -213,7 +213,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur) | |||||
decimate->drop_count); | decimate->drop_count); | ||||
if (decimate->drop_count > 0) | if (decimate->drop_count > 0) | ||||
avfilter_unref_buffer(cur); | |||||
av_frame_free(&cur); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -238,7 +238,6 @@ static const AVFilterPad decimate_inputs[] = { | |||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -209,27 +209,28 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
DelogoContext *delogo = inlink->dst->priv; | DelogoContext *delogo = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int hsub0 = desc->log2_chroma_w; | int hsub0 = desc->log2_chroma_w; | ||||
int vsub0 = desc->log2_chroma_h; | int vsub0 = desc->log2_chroma_h; | ||||
int direct = 0; | int direct = 0; | ||||
int plane; | int plane; | ||||
if (in->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(in)) { | |||||
direct = 1; | direct = 1; | ||||
out = in; | out = in; | ||||
} else { | } else { | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
} | } | ||||
for (plane = 0; plane < 4 && in->data[plane]; plane++) { | for (plane = 0; plane < 4 && in->data[plane]; plane++) { | ||||
@@ -246,7 +247,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
if (!direct) | if (!direct) | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -257,7 +258,6 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_WRITE | AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -88,7 +88,7 @@ typedef struct { | |||||
typedef struct { | typedef struct { | ||||
const AVClass *class; | const AVClass *class; | ||||
AVFilterBufferRef *ref; ///< Previous frame | |||||
AVFrame *ref; ///< Previous frame | |||||
int rx; ///< Maximum horizontal shift | int rx; ///< Maximum horizontal shift | ||||
int ry; ///< Maximum vertical shift | int ry; ///< Maximum vertical shift | ||||
int edge; ///< Edge fill method | int edge; ///< Edge fill method | ||||
@@ -434,7 +434,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
{ | { | ||||
DeshakeContext *deshake = ctx->priv; | DeshakeContext *deshake = ctx->priv; | ||||
avfilter_unref_buffer(deshake->ref); | |||||
av_frame_free(&deshake->ref); | |||||
if (deshake->fp) | if (deshake->fp) | ||||
fclose(deshake->fp); | fclose(deshake->fp); | ||||
if (deshake->avctx) | if (deshake->avctx) | ||||
@@ -443,22 +443,22 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_opt_free(deshake); | av_opt_free(deshake); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *in) | |||||
{ | { | ||||
DeshakeContext *deshake = link->dst->priv; | DeshakeContext *deshake = link->dst->priv; | ||||
AVFilterLink *outlink = link->dst->outputs[0]; | AVFilterLink *outlink = link->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
Transform t = {{0},0}, orig = {{0},0}; | Transform t = {{0},0}, orig = {{0},0}; | ||||
float matrix[9]; | float matrix[9]; | ||||
float alpha = 2.0 / deshake->refcount; | float alpha = 2.0 / deshake->refcount; | ||||
char tmp[256]; | char tmp[256]; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) { | if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) { | ||||
// Find the most likely global motion for the current frame | // Find the most likely global motion for the current frame | ||||
@@ -545,7 +545,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); | avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); | ||||
// Cleanup the old reference frame | // Cleanup the old reference frame | ||||
avfilter_unref_buffer(deshake->ref); | |||||
av_frame_free(&deshake->ref); | |||||
// Store the current frame as the reference frame for calculating the | // Store the current frame as the reference frame for calculating the | ||||
// motion of the next frame | // motion of the next frame | ||||
@@ -130,13 +130,13 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
DrawBoxContext *drawbox = inlink->dst->priv; | DrawBoxContext *drawbox = inlink->dst->priv; | ||||
int plane, x, y, xb = drawbox->x, yb = drawbox->y; | int plane, x, y, xb = drawbox->x, yb = drawbox->y; | ||||
unsigned char *row[4]; | unsigned char *row[4]; | ||||
for (y = FFMAX(yb, 0); y < frame->video->h && y < (yb + drawbox->h); y++) { | |||||
for (y = FFMAX(yb, 0); y < frame->height && y < (yb + drawbox->h); y++) { | |||||
row[0] = frame->data[0] + y * frame->linesize[0]; | row[0] = frame->data[0] + y * frame->linesize[0]; | ||||
for (plane = 1; plane < 3; plane++) | for (plane = 1; plane < 3; plane++) | ||||
@@ -144,12 +144,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
frame->linesize[plane] * (y >> drawbox->vsub); | frame->linesize[plane] * (y >> drawbox->vsub); | ||||
if (drawbox->invert_color) { | if (drawbox->invert_color) { | ||||
for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) | |||||
for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++) | |||||
if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || | if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || | ||||
(x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) | (x - xb < drawbox->thickness-1) || (xb + drawbox->w - x < drawbox->thickness)) | ||||
row[0][x] = 0xff - row[0][x]; | row[0][x] = 0xff - row[0][x]; | ||||
} else { | } else { | ||||
for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->video->w; x++) { | |||||
for (x = FFMAX(xb, 0); x < xb + drawbox->w && x < frame->width; x++) { | |||||
double alpha = (double)drawbox->yuv_color[A] / 255; | double alpha = (double)drawbox->yuv_color[A] / 255; | ||||
if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || | if ((y - yb < drawbox->thickness-1) || (yb + drawbox->h - y < drawbox->thickness) || | ||||
@@ -172,7 +172,7 @@ static const AVFilterPad avfilter_vf_drawbox_inputs[] = { | |||||
.config_props = config_input, | .config_props = config_input, | ||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_WRITE | AV_PERM_READ, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -784,7 +784,7 @@ static int expand_text(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, | |||||
static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame, | |||||
int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y) | int width, int height, const uint8_t rgbcolor[4], FFDrawColor *color, int x, int y) | ||||
{ | { | ||||
char *text = dtext->expanded_text.str; | char *text = dtext->expanded_text.str; | ||||
@@ -812,7 +812,7 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, | |||||
y1 = dtext->positions[i].y+dtext->y+y; | y1 = dtext->positions[i].y+dtext->y+y; | ||||
ff_blend_mask(&dtext->dc, color, | ff_blend_mask(&dtext->dc, color, | ||||
picref->data, picref->linesize, width, height, | |||||
frame->data, frame->linesize, width, height, | |||||
glyph->bitmap.buffer, glyph->bitmap.pitch, | glyph->bitmap.buffer, glyph->bitmap.pitch, | ||||
glyph->bitmap.width, glyph->bitmap.rows, | glyph->bitmap.width, glyph->bitmap.rows, | ||||
glyph->bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3, | glyph->bitmap.pixel_mode == FT_PIXEL_MODE_MONO ? 0 : 3, | ||||
@@ -822,7 +822,7 @@ static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref, | |||||
return 0; | return 0; | ||||
} | } | ||||
static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, | |||||
static int draw_text(AVFilterContext *ctx, AVFrame *frame, | |||||
int width, int height) | int width, int height) | ||||
{ | { | ||||
DrawTextContext *dtext = ctx->priv; | DrawTextContext *dtext = ctx->priv; | ||||
@@ -845,7 +845,7 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, | |||||
av_bprint_clear(bp); | av_bprint_clear(bp); | ||||
if(dtext->basetime != AV_NOPTS_VALUE) | if(dtext->basetime != AV_NOPTS_VALUE) | ||||
now= picref->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000; | |||||
now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + dtext->basetime/1000000; | |||||
switch (dtext->exp_mode) { | switch (dtext->exp_mode) { | ||||
case EXP_NONE: | case EXP_NONE: | ||||
@@ -962,23 +962,23 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref, | |||||
/* draw box */ | /* draw box */ | ||||
if (dtext->draw_box) | if (dtext->draw_box) | ||||
ff_blend_rectangle(&dtext->dc, &dtext->boxcolor, | ff_blend_rectangle(&dtext->dc, &dtext->boxcolor, | ||||
picref->data, picref->linesize, width, height, | |||||
frame->data, frame->linesize, width, height, | |||||
dtext->x, dtext->y, box_w, box_h); | dtext->x, dtext->y, box_w, box_h); | ||||
if (dtext->shadowx || dtext->shadowy) { | if (dtext->shadowx || dtext->shadowy) { | ||||
if ((ret = draw_glyphs(dtext, picref, width, height, dtext->shadowcolor.rgba, | |||||
if ((ret = draw_glyphs(dtext, frame, width, height, dtext->shadowcolor.rgba, | |||||
&dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0) | &dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0) | ||||
return ret; | return ret; | ||||
} | } | ||||
if ((ret = draw_glyphs(dtext, picref, width, height, dtext->fontcolor.rgba, | |||||
if ((ret = draw_glyphs(dtext, frame, width, height, dtext->fontcolor.rgba, | |||||
&dtext->fontcolor, 0, 0)) < 0) | &dtext->fontcolor, 0, 0)) < 0) | ||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
@@ -992,7 +992,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? | dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? | ||||
NAN : frame->pts * av_q2d(inlink->time_base); | NAN : frame->pts * av_q2d(inlink->time_base); | ||||
draw_text(ctx, frame, frame->video->w, frame->video->h); | |||||
draw_text(ctx, frame, frame->width, frame->height); | |||||
av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n", | av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n", | ||||
(int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], | (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T], | ||||
@@ -1011,8 +1011,7 @@ static const AVFilterPad avfilter_vf_drawtext_inputs[] = { | |||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.min_perms = AV_PERM_WRITE | | |||||
AV_PERM_READ, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -249,21 +249,21 @@ static void double_threshold(AVFilterContext *ctx, int w, int h, | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
EdgeDetectContext *edgedetect = ctx->priv; | EdgeDetectContext *edgedetect = ctx->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
uint8_t *tmpbuf = edgedetect->tmpbuf; | uint8_t *tmpbuf = edgedetect->tmpbuf; | ||||
uint16_t *gradients = edgedetect->gradients; | uint16_t *gradients = edgedetect->gradients; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
/* gaussian filter to reduce noise */ | /* gaussian filter to reduce noise */ | ||||
gaussian_blur(ctx, inlink->w, inlink->h, | gaussian_blur(ctx, inlink->w, inlink->h, | ||||
@@ -287,7 +287,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
out->data[0], out->linesize[0], | out->data[0], out->linesize[0], | ||||
tmpbuf, inlink->w); | tmpbuf, inlink->w); | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -305,7 +305,6 @@ static const AVFilterPad edgedetect_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -178,7 +178,7 @@ static void fade_plane(int y, int h, int w, | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
FadeContext *fade = inlink->dst->priv; | FadeContext *fade = inlink->dst->priv; | ||||
uint8_t *p; | uint8_t *p; | ||||
@@ -189,21 +189,21 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
// alpha only | // alpha only | ||||
plane = fade->is_packed_rgb ? 0 : A; // alpha is on plane 0 for packed formats | plane = fade->is_packed_rgb ? 0 : A; // alpha is on plane 0 for packed formats | ||||
// or plane 3 for planar formats | // or plane 3 for planar formats | ||||
fade_plane(0, frame->video->h, inlink->w, | |||||
fade_plane(0, frame->height, inlink->w, | |||||
fade->factor, fade->black_level, fade->black_level_scaled, | fade->factor, fade->black_level, fade->black_level_scaled, | ||||
fade->is_packed_rgb ? fade->rgba_map[A] : 0, // alpha offset for packed formats | fade->is_packed_rgb ? fade->rgba_map[A] : 0, // alpha offset for packed formats | ||||
fade->is_packed_rgb ? 4 : 1, // pixstep for 8 bit packed formats | fade->is_packed_rgb ? 4 : 1, // pixstep for 8 bit packed formats | ||||
1, frame->data[plane], frame->linesize[plane]); | 1, frame->data[plane], frame->linesize[plane]); | ||||
} else { | } else { | ||||
/* luma or rgb plane */ | /* luma or rgb plane */ | ||||
fade_plane(0, frame->video->h, inlink->w, | |||||
fade_plane(0, frame->height, inlink->w, | |||||
fade->factor, fade->black_level, fade->black_level_scaled, | fade->factor, fade->black_level, fade->black_level_scaled, | ||||
0, 1, // offset & pixstep for Y plane or RGB packed format | 0, 1, // offset & pixstep for Y plane or RGB packed format | ||||
fade->bpp, frame->data[0], frame->linesize[0]); | fade->bpp, frame->data[0], frame->linesize[0]); | ||||
if (frame->data[1] && frame->data[2]) { | if (frame->data[1] && frame->data[2]) { | ||||
/* chroma planes */ | /* chroma planes */ | ||||
for (plane = 1; plane < 3; plane++) { | for (plane = 1; plane < 3; plane++) { | ||||
for (i = 0; i < frame->video->h; i++) { | |||||
for (i = 0; i < frame->height; i++) { | |||||
p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; | p = frame->data[plane] + (i >> fade->vsub) * frame->linesize[plane]; | ||||
for (j = 0; j < inlink->w >> fade->hsub; j++) { | for (j = 0; j < inlink->w >> fade->hsub; j++) { | ||||
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer | /* 8421367 = ((128 << 1) + 1) << 15. It is an integer | ||||
@@ -234,7 +234,7 @@ static const AVFilterPad avfilter_vf_fade_inputs[] = { | |||||
.config_props = config_props, | .config_props = config_props, | ||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_WRITE, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -82,14 +82,14 @@ static int config_props_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
FieldContext *field = inlink->dst->priv; | FieldContext *field = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
int i; | int i; | ||||
inpicref->video->h = outlink->h; | |||||
inpicref->video->interlaced = 0; | |||||
inpicref->height = outlink->h; | |||||
inpicref->interlaced_frame = 0; | |||||
for (i = 0; i < field->nb_planes; i++) { | for (i = 0; i < field->nb_planes; i++) { | ||||
if (field->type == FIELD_TYPE_BOTTOM) | if (field->type == FIELD_TYPE_BOTTOM) | ||||
@@ -113,15 +113,15 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) | |||||
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
return ff_get_video_buffer(outlink, perms, w, h); | |||||
return ff_get_video_buffer(outlink, w, h); | |||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
FieldOrderContext *s = ctx->priv; | FieldOrderContext *s = ctx->priv; | ||||
@@ -129,14 +129,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
int h, plane, line_step, line_size, line; | int h, plane, line_step, line_size, line; | ||||
uint8_t *data; | uint8_t *data; | ||||
if (!frame->video->interlaced || | |||||
frame->video->top_field_first == s->dst_tff) | |||||
if (!frame->interlaced_frame || | |||||
frame->top_field_first == s->dst_tff) | |||||
return ff_filter_frame(outlink, frame); | return ff_filter_frame(outlink, frame); | ||||
av_dlog(ctx, | av_dlog(ctx, | ||||
"picture will move %s one line\n", | "picture will move %s one line\n", | ||||
s->dst_tff ? "up" : "down"); | s->dst_tff ? "up" : "down"); | ||||
h = frame->video->h; | |||||
h = frame->height; | |||||
for (plane = 0; plane < 4 && frame->data[plane]; plane++) { | for (plane = 0; plane < 4 && frame->data[plane]; plane++) { | ||||
line_step = frame->linesize[plane]; | line_step = frame->linesize[plane]; | ||||
line_size = s->line_size[plane]; | line_size = s->line_size[plane]; | ||||
@@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
* The new last line is created as a copy of the | * The new last line is created as a copy of the | ||||
* penultimate line from that field. */ | * penultimate line from that field. */ | ||||
for (line = 0; line < h; line++) { | for (line = 0; line < h; line++) { | ||||
if (1 + line < frame->video->h) { | |||||
if (1 + line < frame->height) { | |||||
memcpy(data, data + line_step, line_size); | memcpy(data, data + line_step, line_size); | ||||
} else { | } else { | ||||
memcpy(data, data - line_step - line_step, line_size); | memcpy(data, data - line_step - line_step, line_size); | ||||
@@ -172,7 +172,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame) | |||||
} | } | ||||
} | } | ||||
} | } | ||||
frame->video->top_field_first = s->dst_tff; | |||||
frame->top_field_first = s->dst_tff; | |||||
return ff_filter_frame(outlink, frame); | return ff_filter_frame(outlink, frame); | ||||
} | } | ||||
@@ -184,7 +184,7 @@ static const AVFilterPad avfilter_vf_fieldorder_inputs[] = { | |||||
.config_props = config_input, | .config_props = config_input, | ||||
.get_video_buffer = get_video_buffer, | .get_video_buffer = get_video_buffer, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | AV_PERM_WRITE, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -89,7 +89,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
} | } | ||||
av_opt_free(s); | av_opt_free(s); | ||||
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFilterBufferRef*)))) | |||||
if (!(s->fifo = av_fifo_alloc(2*sizeof(AVFrame*)))) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den); | av_log(ctx, AV_LOG_VERBOSE, "fps=%d/%d\n", s->framerate.num, s->framerate.den); | ||||
@@ -99,9 +99,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
static void flush_fifo(AVFifoBuffer *fifo) | static void flush_fifo(AVFifoBuffer *fifo) | ||||
{ | { | ||||
while (av_fifo_size(fifo)) { | while (av_fifo_size(fifo)) { | ||||
AVFilterBufferRef *tmp; | |||||
AVFrame *tmp; | |||||
av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); | av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); | ||||
avfilter_unref_buffer(tmp); | |||||
av_frame_free(&tmp); | |||||
} | } | ||||
} | } | ||||
@@ -109,7 +109,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
{ | { | ||||
FPSContext *s = ctx->priv; | FPSContext *s = ctx->priv; | ||||
if (s->fifo) { | if (s->fifo) { | ||||
s->drop += av_fifo_size(s->fifo) / sizeof(AVFilterBufferRef*); | |||||
s->drop += av_fifo_size(s->fifo) / sizeof(AVFrame*); | |||||
flush_fifo(s->fifo); | flush_fifo(s->fifo); | ||||
av_fifo_free(s->fifo); | av_fifo_free(s->fifo); | ||||
} | } | ||||
@@ -145,7 +145,7 @@ static int request_frame(AVFilterLink *outlink) | |||||
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { | if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { | ||||
int i; | int i; | ||||
for (i = 0; av_fifo_size(s->fifo); i++) { | for (i = 0; av_fifo_size(s->fifo); i++) { | ||||
AVFilterBufferRef *buf; | |||||
AVFrame *buf; | |||||
av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); | av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); | ||||
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, | buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, | ||||
@@ -162,13 +162,13 @@ static int request_frame(AVFilterLink *outlink) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf) | |||||
static int write_to_fifo(AVFifoBuffer *fifo, AVFrame *buf) | |||||
{ | { | ||||
int ret; | int ret; | ||||
if (!av_fifo_space(fifo) && | if (!av_fifo_space(fifo) && | ||||
(ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) { | (ret = av_fifo_realloc2(fifo, 2*av_fifo_size(fifo)))) { | ||||
avfilter_unref_bufferp(&buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -176,7 +176,7 @@ static int write_to_fifo(AVFifoBuffer *fifo, AVFilterBufferRef *buf) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
FPSContext *s = ctx->priv; | FPSContext *s = ctx->priv; | ||||
@@ -196,7 +196,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
} else { | } else { | ||||
av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " | av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no " | ||||
"timestamp.\n"); | "timestamp.\n"); | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
s->drop++; | s->drop++; | ||||
} | } | ||||
return 0; | return 0; | ||||
@@ -213,8 +213,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
if (delta < 1) { | if (delta < 1) { | ||||
/* drop the frame and everything buffered except the first */ | /* drop the frame and everything buffered except the first */ | ||||
AVFilterBufferRef *tmp; | |||||
int drop = av_fifo_size(s->fifo)/sizeof(AVFilterBufferRef*); | |||||
AVFrame *tmp; | |||||
int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*); | |||||
av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); | av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop); | ||||
s->drop += drop; | s->drop += drop; | ||||
@@ -223,18 +223,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
flush_fifo(s->fifo); | flush_fifo(s->fifo); | ||||
ret = write_to_fifo(s->fifo, tmp); | ret = write_to_fifo(s->fifo, tmp); | ||||
avfilter_unref_buffer(buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
/* can output >= 1 frames */ | /* can output >= 1 frames */ | ||||
for (i = 0; i < delta; i++) { | for (i = 0; i < delta; i++) { | ||||
AVFilterBufferRef *buf_out; | |||||
AVFrame *buf_out; | |||||
av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); | av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL); | ||||
/* duplicate the frame if needed */ | /* duplicate the frame if needed */ | ||||
if (!av_fifo_size(s->fifo) && i < delta - 1) { | if (!av_fifo_size(s->fifo) && i < delta - 1) { | ||||
AVFilterBufferRef *dup = avfilter_ref_buffer(buf_out, ~0); | |||||
AVFrame *dup = av_frame_clone(buf_out); | |||||
av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); | av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n"); | ||||
if (dup) | if (dup) | ||||
@@ -243,8 +243,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
ret = AVERROR(ENOMEM); | ret = AVERROR(ENOMEM); | ||||
if (ret < 0) { | if (ret < 0) { | ||||
avfilter_unref_bufferp(&buf_out); | |||||
avfilter_unref_bufferp(&buf); | |||||
av_frame_free(&buf_out); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -255,7 +255,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) | |||||
outlink->time_base) + s->frames_out; | outlink->time_base) + s->frames_out; | ||||
if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { | if ((ret = ff_filter_frame(outlink, buf_out)) < 0) { | ||||
avfilter_unref_bufferp(&buf); | |||||
av_frame_free(&buf); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -66,7 +66,7 @@ static int config_output_props(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *ref) | |||||
{ | { | ||||
FrameStepContext *framestep = inlink->dst->priv; | FrameStepContext *framestep = inlink->dst->priv; | ||||
@@ -75,7 +75,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref) | |||||
return ff_filter_frame(inlink->dst->outputs[0], ref); | return ff_filter_frame(inlink->dst->outputs[0], ref); | ||||
} else { | } else { | ||||
framestep->frame_selected = 0; | framestep->frame_selected = 0; | ||||
avfilter_unref_buffer(ref); | |||||
av_frame_free(&ref); | |||||
return 0; | return 0; | ||||
} | } | ||||
} | } | ||||
@@ -379,24 +379,24 @@ static int query_formats(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
Frei0rContext *frei0r = inlink->dst->priv; | Frei0rContext *frei0r = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000, | frei0r->update(frei0r->instance, in->pts * av_q2d(inlink->time_base) * 1000, | ||||
(const uint32_t *)in->data[0], | (const uint32_t *)in->data[0], | ||||
(uint32_t *)out->data[0]); | (uint32_t *)out->data[0]); | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -407,7 +407,6 @@ static const AVFilterPad avfilter_vf_frei0r_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input_props, | .config_props = config_input_props, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -487,19 +486,18 @@ static int source_config_props(AVFilterLink *outlink) | |||||
static int source_request_frame(AVFilterLink *outlink) | static int source_request_frame(AVFilterLink *outlink) | ||||
{ | { | ||||
Frei0rContext *frei0r = outlink->src->priv; | Frei0rContext *frei0r = outlink->src->priv; | ||||
AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!picref) | |||||
if (!frame) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
picref->video->sample_aspect_ratio = (AVRational) {1, 1}; | |||||
picref->pts = frei0r->pts++; | |||||
picref->pos = -1; | |||||
frame->sample_aspect_ratio = (AVRational) {1, 1}; | |||||
frame->pts = frei0r->pts++; | |||||
frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), | |||||
NULL, (uint32_t *)picref->data[0]); | |||||
frei0r->update(frei0r->instance, av_rescale_q(frame->pts, frei0r->time_base, (AVRational){1,1000}), | |||||
NULL, (uint32_t *)frame->data[0]); | |||||
return ff_filter_frame(outlink, picref); | |||||
return ff_filter_frame(outlink, frame); | |||||
} | } | ||||
static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = { | static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = { | ||||
@@ -37,7 +37,7 @@ typedef struct { | |||||
AVExpr *e[4]; ///< expressions for each plane | AVExpr *e[4]; ///< expressions for each plane | ||||
char *expr_str[4]; ///< expression strings for each plane | char *expr_str[4]; ///< expression strings for each plane | ||||
int framenum; ///< frame counter | int framenum; ///< frame counter | ||||
AVFilterBufferRef *picref; ///< current input buffer | |||||
AVFrame *picref; ///< current input buffer | |||||
int hsub, vsub; ///< chroma subsampling | int hsub, vsub; ///< chroma subsampling | ||||
int planes; ///< number of planes | int planes; ///< number of planes | ||||
} GEQContext; | } GEQContext; | ||||
@@ -59,11 +59,11 @@ static inline double getpix(void *priv, double x, double y, int plane) | |||||
{ | { | ||||
int xi, yi; | int xi, yi; | ||||
GEQContext *geq = priv; | GEQContext *geq = priv; | ||||
AVFilterBufferRef *picref = geq->picref; | |||||
AVFrame *picref = geq->picref; | |||||
const uint8_t *src = picref->data[plane]; | const uint8_t *src = picref->data[plane]; | ||||
const int linesize = picref->linesize[plane]; | const int linesize = picref->linesize[plane]; | ||||
const int w = picref->video->w >> ((plane == 1 || plane == 2) ? geq->hsub : 0); | |||||
const int h = picref->video->h >> ((plane == 1 || plane == 2) ? geq->vsub : 0); | |||||
const int w = picref->width >> ((plane == 1 || plane == 2) ? geq->hsub : 0); | |||||
const int h = picref->height >> ((plane == 1 || plane == 2) ? geq->vsub : 0); | |||||
if (!src) | if (!src) | ||||
return 0; | return 0; | ||||
@@ -163,24 +163,24 @@ static int geq_config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int geq_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
int plane; | int plane; | ||||
GEQContext *geq = inlink->dst->priv; | GEQContext *geq = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
double values[VAR_VARS_NB] = { | double values[VAR_VARS_NB] = { | ||||
[VAR_N] = geq->framenum++, | [VAR_N] = geq->framenum++, | ||||
[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base), | [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base), | ||||
}; | }; | ||||
geq->picref = in; | geq->picref = in; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
for (plane = 0; plane < geq->planes && out->data[plane]; plane++) { | for (plane = 0; plane < geq->planes && out->data[plane]; plane++) { | ||||
int x, y; | int x, y; | ||||
@@ -204,7 +204,7 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
} | } | ||||
avfilter_unref_bufferp(&geq->picref); | |||||
av_frame_free(&geq->picref); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -224,7 +224,6 @@ static const AVFilterPad geq_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = geq_config_props, | .config_props = geq_config_props, | ||||
.filter_frame = geq_filter_frame, | .filter_frame = geq_filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -197,23 +197,23 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
GradFunContext *gf = inlink->dst->priv; | GradFunContext *gf = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int p, direct = 0; | int p, direct = 0; | ||||
if (in->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(in)) { | |||||
direct = 1; | direct = 1; | ||||
out = in; | out = in; | ||||
} else { | } else { | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
} | } | ||||
for (p = 0; p < 4 && in->data[p]; p++) { | for (p = 0; p < 4 && in->data[p]; p++) { | ||||
@@ -233,7 +233,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
if (!direct) | if (!direct) | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -244,7 +244,6 @@ static const AVFilterPad avfilter_vf_gradfun_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -70,21 +70,21 @@ static int config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
FlipContext *flip = ctx->priv; | FlipContext *flip = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
uint8_t *inrow, *outrow; | uint8_t *inrow, *outrow; | ||||
int i, j, plane, step, hsub, vsub; | int i, j, plane, step, hsub, vsub; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
/* copy palette if required */ | /* copy palette if required */ | ||||
if (av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_PAL) | if (av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_PAL) | ||||
@@ -97,7 +97,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
outrow = out->data[plane]; | outrow = out->data[plane]; | ||||
inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step; | inrow = in ->data[plane] + ((inlink->w >> hsub) - 1) * step; | ||||
for (i = 0; i < in->video->h >> vsub; i++) { | |||||
for (i = 0; i < in->height >> vsub; i++) { | |||||
switch (step) { | switch (step) { | ||||
case 1: | case 1: | ||||
for (j = 0; j < (inlink->w >> hsub); j++) | for (j = 0; j < (inlink->w >> hsub); j++) | ||||
@@ -143,7 +143,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
} | } | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -153,7 +153,6 @@ static const AVFilterPad avfilter_vf_hflip_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -142,7 +142,7 @@ static int config_input(AVFilterLink *inlink) | |||||
b = src[x + map[B]]; \ | b = src[x + map[B]]; \ | ||||
} while (0) | } while (0) | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
HisteqContext *histeq = ctx->priv; | HisteqContext *histeq = ctx->priv; | ||||
@@ -150,16 +150,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
int strength = histeq->strength * 1000; | int strength = histeq->strength * 1000; | ||||
int intensity = histeq->intensity * 1000; | int intensity = histeq->intensity * 1000; | ||||
int x, y, i, luthi, lutlo, lut, luma, oluma, m; | int x, y, i, luthi, lutlo, lut, luma, oluma, m; | ||||
AVFilterBufferRef *outpic; | |||||
AVFrame *outpic; | |||||
unsigned int r, g, b, jran; | unsigned int r, g, b, jran; | ||||
uint8_t *src, *dst; | uint8_t *src, *dst; | ||||
outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); | |||||
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpic) { | if (!outpic) { | ||||
avfilter_unref_bufferp(&inpic); | |||||
av_frame_free(&inpic); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outpic, inpic); | |||||
av_frame_copy_props(outpic, inpic); | |||||
/* Seed random generator for antibanding. */ | /* Seed random generator for antibanding. */ | ||||
jran = LCG_SEED; | jran = LCG_SEED; | ||||
@@ -261,7 +261,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]); | av_dlog(ctx, "out[%d]: %u\n", x, histeq->out_histogram[x]); | ||||
#endif | #endif | ||||
avfilter_unref_bufferp(&inpic); | |||||
av_frame_free(&inpic); | |||||
return ff_filter_frame(outlink, outpic); | return ff_filter_frame(outlink, outpic); | ||||
} | } | ||||
@@ -271,7 +271,6 @@ static const AVFilterPad histeq_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -174,24 +174,23 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
HistogramContext *h = inlink->dst->priv; | HistogramContext *h = inlink->dst->priv; | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
const uint8_t *src; | const uint8_t *src; | ||||
uint8_t *dst; | uint8_t *dst; | ||||
int i, j, k, l, ret; | int i, j, k, l, ret; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
out->pts = in->pts; | out->pts = in->pts; | ||||
out->pos = in->pos; | |||||
for (k = 0; k < h->ncomp; k++) | for (k = 0; k < h->ncomp; k++) | ||||
for (i = 0; i < outlink->h; i++) | for (i = 0; i < outlink->h; i++) | ||||
@@ -202,9 +201,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
for (k = 0; k < h->ncomp; k++) { | for (k = 0; k < h->ncomp; k++) { | ||||
int start = k * (h->level_height + h->scale_height) * h->display_mode; | int start = k * (h->level_height + h->scale_height) * h->display_mode; | ||||
for (i = 0; i < in->video->h; i++) { | |||||
for (i = 0; i < in->height; i++) { | |||||
src = in->data[k] + i * in->linesize[k]; | src = in->data[k] + i * in->linesize[k]; | ||||
for (j = 0; j < in->video->w; j++) | |||||
for (j = 0; j < in->width; j++) | |||||
h->histogram[src[j]]++; | h->histogram[src[j]]++; | ||||
} | } | ||||
@@ -301,7 +300,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
ret = ff_filter_frame(outlink, out); | ret = ff_filter_frame(outlink, out); | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
@@ -320,7 +319,6 @@ static const AVFilterPad inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_input, | .config_props = config_input, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -304,37 +304,38 @@ static int config_input(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
HQDN3DContext *hqdn3d = inlink->dst->priv; | HQDN3DContext *hqdn3d = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int direct = 0, c; | int direct = 0, c; | ||||
if (in->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(in)) { | |||||
direct = 1; | direct = 1; | ||||
out = in; | out = in; | ||||
} else { | } else { | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
} | } | ||||
for (c = 0; c < 3; c++) { | for (c = 0; c < 3; c++) { | ||||
denoise(hqdn3d, in->data[c], out->data[c], | denoise(hqdn3d, in->data[c], out->data[c], | ||||
hqdn3d->line, &hqdn3d->frame_prev[c], | hqdn3d->line, &hqdn3d->frame_prev[c], | ||||
in->video->w >> (!!c * hqdn3d->hsub), | |||||
in->video->h >> (!!c * hqdn3d->vsub), | |||||
in->width >> (!!c * hqdn3d->hsub), | |||||
in->height >> (!!c * hqdn3d->vsub), | |||||
in->linesize[c], out->linesize[c], | in->linesize[c], out->linesize[c], | ||||
hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); | hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); | ||||
} | } | ||||
if (!direct) | if (!direct) | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -276,18 +276,18 @@ static void process_chrominance(uint8_t *udst, uint8_t *vdst, const int dst_line | |||||
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) | ||||
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) | #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) | |||||
{ | { | ||||
HueContext *hue = inlink->dst->priv; | HueContext *hue = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outpic; | |||||
AVFrame *outpic; | |||||
outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpic) { | if (!outpic) { | ||||
avfilter_unref_bufferp(&inpic); | |||||
av_frame_free(&inpic); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outpic, inpic); | |||||
av_frame_copy_props(outpic, inpic); | |||||
if (!hue->flat_syntax) { | if (!hue->flat_syntax) { | ||||
hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base); | hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base); | ||||
@@ -330,7 +330,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
inlink->w >> hue->hsub, inlink->h >> hue->vsub, | inlink->w >> hue->hsub, inlink->h >> hue->vsub, | ||||
hue->hue_cos, hue->hue_sin); | hue->hue_cos, hue->hue_sin); | ||||
avfilter_unref_bufferp(&inpic); | |||||
av_frame_free(&inpic); | |||||
return ff_filter_frame(outlink, outpic); | return ff_filter_frame(outlink, outpic); | ||||
} | } | ||||
@@ -349,7 +349,6 @@ static const AVFilterPad hue_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -47,9 +47,9 @@ typedef struct { | |||||
uint8_t history[HIST_SIZE]; | uint8_t history[HIST_SIZE]; | ||||
AVFilterBufferRef *cur; | |||||
AVFilterBufferRef *next; | |||||
AVFilterBufferRef *prev; | |||||
AVFrame *cur; | |||||
AVFrame *next; | |||||
AVFrame *prev; | |||||
int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w); | int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w); | ||||
const AVPixFmtDescriptor *csp; | const AVPixFmtDescriptor *csp; | ||||
@@ -113,8 +113,8 @@ static void filter(AVFilterContext *ctx) | |||||
int match = 0; | int match = 0; | ||||
for (i = 0; i < idet->csp->nb_components; i++) { | for (i = 0; i < idet->csp->nb_components; i++) { | ||||
int w = idet->cur->video->w; | |||||
int h = idet->cur->video->h; | |||||
int w = idet->cur->width; | |||||
int h = idet->cur->height; | |||||
int refs = idet->cur->linesize[i]; | int refs = idet->cur->linesize[i]; | ||||
if (i && i<3) { | if (i && i<3) { | ||||
@@ -165,13 +165,13 @@ static void filter(AVFilterContext *ctx) | |||||
} | } | ||||
if (idet->last_type == TFF){ | if (idet->last_type == TFF){ | ||||
idet->cur->video->top_field_first = 1; | |||||
idet->cur->video->interlaced = 1; | |||||
idet->cur->top_field_first = 1; | |||||
idet->cur->interlaced_frame = 1; | |||||
}else if(idet->last_type == BFF){ | }else if(idet->last_type == BFF){ | ||||
idet->cur->video->top_field_first = 0; | |||||
idet->cur->video->interlaced = 1; | |||||
idet->cur->top_field_first = 0; | |||||
idet->cur->interlaced_frame = 1; | |||||
}else if(idet->last_type == PROGRSSIVE){ | }else if(idet->last_type == PROGRSSIVE){ | ||||
idet->cur->video->interlaced = 0; | |||||
idet->cur->interlaced_frame = 0; | |||||
} | } | ||||
idet->prestat [ type] ++; | idet->prestat [ type] ++; | ||||
@@ -179,13 +179,13 @@ static void filter(AVFilterContext *ctx) | |||||
av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type)); | av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type)); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *picref) | |||||
{ | { | ||||
AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
IDETContext *idet = ctx->priv; | IDETContext *idet = ctx->priv; | ||||
if (idet->prev) | if (idet->prev) | ||||
avfilter_unref_buffer(idet->prev); | |||||
av_frame_free(&idet->prev); | |||||
idet->prev = idet->cur; | idet->prev = idet->cur; | ||||
idet->cur = idet->next; | idet->cur = idet->next; | ||||
idet->next = picref; | idet->next = picref; | ||||
@@ -194,7 +194,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
return 0; | return 0; | ||||
if (!idet->prev) | if (!idet->prev) | ||||
idet->prev = avfilter_ref_buffer(idet->cur, ~0); | |||||
idet->prev = av_frame_clone(idet->cur); | |||||
if (!idet->csp) | if (!idet->csp) | ||||
idet->csp = av_pix_fmt_desc_get(link->format); | idet->csp = av_pix_fmt_desc_get(link->format); | ||||
@@ -203,7 +203,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
filter(ctx); | filter(ctx); | ||||
return ff_filter_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, ~0)); | |||||
return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur)); | |||||
} | } | ||||
static int request_frame(AVFilterLink *link) | static int request_frame(AVFilterLink *link) | ||||
@@ -238,9 +238,9 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
idet->poststat[UNDETERMINED] | idet->poststat[UNDETERMINED] | ||||
); | ); | ||||
avfilter_unref_bufferp(&idet->prev); | |||||
avfilter_unref_bufferp(&idet->cur ); | |||||
avfilter_unref_bufferp(&idet->next); | |||||
av_frame_free(&idet->prev); | |||||
av_frame_free(&idet->cur ); | |||||
av_frame_free(&idet->next); | |||||
} | } | ||||
static int query_formats(AVFilterContext *ctx) | static int query_formats(AVFilterContext *ctx) | ||||
@@ -160,19 +160,19 @@ static void interleave(uint8_t *dst, uint8_t *src, int w, int h, | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
IlContext *il = inlink->dst->priv; | IlContext *il = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int ret, comp; | int ret, comp; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&inpicref); | |||||
av_frame_free(&inpicref); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, inpicref); | |||||
av_frame_copy_props(out, inpicref); | |||||
interleave(out->data[0], inpicref->data[0], | interleave(out->data[0], inpicref->data[0], | ||||
il->linesize[0], inlink->h, | il->linesize[0], inlink->h, | ||||
@@ -195,7 +195,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
} | } | ||||
ret = ff_filter_frame(outlink, out); | ret = ff_filter_frame(outlink, out); | ||||
avfilter_unref_bufferp(&inpicref); | |||||
av_frame_free(&inpicref); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -116,11 +116,11 @@ static int config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) | |||||
{ | { | ||||
KerndeintContext *kerndeint = inlink->dst->priv; | KerndeintContext *kerndeint = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outpic; | |||||
AVFrame *outpic; | |||||
const uint8_t *prvp; ///< Previous field's pixel line number n | const uint8_t *prvp; ///< Previous field's pixel line number n | ||||
const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1) | const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1) | ||||
const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1) | const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1) | ||||
@@ -154,13 +154,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
const int is_packed_rgb = kerndeint->is_packed_rgb; | const int is_packed_rgb = kerndeint->is_packed_rgb; | ||||
outpic = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); | |||||
outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpic) { | if (!outpic) { | ||||
avfilter_unref_bufferp(&inpic); | |||||
av_frame_free(&inpic); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outpic, inpic); | |||||
outpic->video->interlaced = 0; | |||||
av_frame_copy_props(outpic, inpic); | |||||
outpic->interlaced_frame = 0; | |||||
for (plane = 0; inpic->data[plane] && plane < 4; plane++) { | for (plane = 0; inpic->data[plane] && plane < 4; plane++) { | ||||
h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub; | h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub; | ||||
@@ -295,7 +295,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h); | av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h); | ||||
} | } | ||||
avfilter_unref_buffer(inpic); | |||||
av_frame_free(&inpic); | |||||
return ff_filter_frame(outlink, outpic); | return ff_filter_frame(outlink, outpic); | ||||
} | } | ||||
@@ -305,7 +305,6 @@ static const AVFilterPad kerndeint_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -35,7 +35,7 @@ | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *picref, enum AVPixelFormat pixfmt) | |||||
static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt) | |||||
{ | { | ||||
IplImage *tmpimg; | IplImage *tmpimg; | ||||
int depth, channels_nb; | int depth, channels_nb; | ||||
@@ -45,18 +45,18 @@ static void fill_iplimage_from_picref(IplImage *img, const AVFilterBufferRef *pi | |||||
else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; } | else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; } | ||||
else return; | else return; | ||||
tmpimg = cvCreateImageHeader((CvSize){picref->video->w, picref->video->h}, depth, channels_nb); | |||||
tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb); | |||||
*img = *tmpimg; | *img = *tmpimg; | ||||
img->imageData = img->imageDataOrigin = picref->data[0]; | |||||
img->imageData = img->imageDataOrigin = frame->data[0]; | |||||
img->dataOrder = IPL_DATA_ORDER_PIXEL; | img->dataOrder = IPL_DATA_ORDER_PIXEL; | ||||
img->origin = IPL_ORIGIN_TL; | img->origin = IPL_ORIGIN_TL; | ||||
img->widthStep = picref->linesize[0]; | |||||
img->widthStep = frame->linesize[0]; | |||||
} | } | ||||
static void fill_picref_from_iplimage(AVFilterBufferRef *picref, const IplImage *img, enum AVPixelFormat pixfmt) | |||||
static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt) | |||||
{ | { | ||||
picref->linesize[0] = img->widthStep; | |||||
picref->data[0] = img->imageData; | |||||
frame->linesize[0] = img->widthStep; | |||||
frame->data[0] = img->imageData; | |||||
} | } | ||||
static int query_formats(AVFilterContext *ctx) | static int query_formats(AVFilterContext *ctx) | ||||
@@ -351,27 +351,27 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
memset(ocv, 0, sizeof(*ocv)); | memset(ocv, 0, sizeof(*ocv)); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
OCVContext *ocv = ctx->priv; | OCVContext *ocv = ctx->priv; | ||||
AVFilterLink *outlink= inlink->dst->outputs[0]; | AVFilterLink *outlink= inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
IplImage inimg, outimg; | IplImage inimg, outimg; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
fill_iplimage_from_picref(&inimg , in , inlink->format); | |||||
fill_iplimage_from_picref(&outimg, out, inlink->format); | |||||
fill_iplimage_from_frame(&inimg , in , inlink->format); | |||||
fill_iplimage_from_frame(&outimg, out, inlink->format); | |||||
ocv->end_frame_filter(ctx, &inimg, &outimg); | ocv->end_frame_filter(ctx, &inimg, &outimg); | ||||
fill_picref_from_iplimage(out, &outimg, inlink->format); | |||||
fill_frame_from_iplimage(out, &outimg, inlink->format); | |||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -381,7 +381,6 @@ static const AVFilterPad avfilter_vf_ocv_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -253,28 +253,28 @@ static int config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
LutContext *lut = ctx->priv; | LutContext *lut = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
uint8_t *inrow, *outrow, *inrow0, *outrow0; | uint8_t *inrow, *outrow, *inrow0, *outrow0; | ||||
int i, j, plane; | int i, j, plane; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
if (lut->is_rgb) { | if (lut->is_rgb) { | ||||
/* packed */ | /* packed */ | ||||
inrow0 = in ->data[0]; | inrow0 = in ->data[0]; | ||||
outrow0 = out->data[0]; | outrow0 = out->data[0]; | ||||
for (i = 0; i < in->video->h; i ++) { | |||||
for (i = 0; i < in->height; i ++) { | |||||
int w = inlink->w; | int w = inlink->w; | ||||
const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut; | const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut; | ||||
inrow = inrow0; | inrow = inrow0; | ||||
@@ -305,7 +305,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
inrow = in ->data[plane]; | inrow = in ->data[plane]; | ||||
outrow = out->data[plane]; | outrow = out->data[plane]; | ||||
for (i = 0; i < (in->video->h + (1<<vsub) - 1)>>vsub; i ++) { | |||||
for (i = 0; i < (in->height + (1<<vsub) - 1)>>vsub; i ++) { | |||||
const uint8_t *tab = lut->lut[plane]; | const uint8_t *tab = lut->lut[plane]; | ||||
int w = (inlink->w + (1<<hsub) - 1)>>hsub; | int w = (inlink->w + (1<<hsub) - 1)>>hsub; | ||||
for (j = 0; j < w; j++) | for (j = 0; j < w; j++) | ||||
@@ -316,7 +316,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
} | } | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -325,7 +325,7 @@ static const AVFilterPad inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, }, | |||||
}, | |||||
{ .name = NULL} | { .name = NULL} | ||||
}; | }; | ||||
static const AVFilterPad outputs[] = { | static const AVFilterPad outputs[] = { | ||||
@@ -536,45 +536,38 @@ mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgty | |||||
return mpi; | return mpi; | ||||
} | } | ||||
static void dummy_free(void *opaque, uint8_t *data){} | |||||
int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ | int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ | ||||
MPContext *m= (void*)vf; | MPContext *m= (void*)vf; | ||||
AVFilterLink *outlink = m->avfctx->outputs[0]; | AVFilterLink *outlink = m->avfctx->outputs[0]; | ||||
AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer)); | |||||
AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef)); | |||||
AVFrame *picref = av_frame_alloc(); | |||||
int i; | int i; | ||||
av_assert0(vf->next); | av_assert0(vf->next); | ||||
av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); | av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); | ||||
if (!pic || !picref) | |||||
if (!picref) | |||||
goto fail; | goto fail; | ||||
picref->buf = pic; | |||||
picref->buf->free= (void*)av_free; | |||||
if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps)))) | |||||
goto fail; | |||||
pic->w = picref->video->w = mpi->w; | |||||
pic->h = picref->video->h = mpi->h; | |||||
/* make sure the buffer gets read permission or it's useless for output */ | |||||
picref->perms = AV_PERM_READ | AV_PERM_REUSE2; | |||||
// av_assert0(mpi->flags&MP_IMGFLAG_READABLE); | |||||
if(!(mpi->flags&MP_IMGFLAG_PRESERVE)) | |||||
picref->perms |= AV_PERM_WRITE; | |||||
picref->width = mpi->w; | |||||
picref->height = mpi->h; | |||||
pic->refcount = 1; | |||||
picref->type = AVMEDIA_TYPE_VIDEO; | picref->type = AVMEDIA_TYPE_VIDEO; | ||||
for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); | for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); | ||||
pic->format = picref->format = conversion_map[i].pix_fmt; | |||||
picref->format = conversion_map[i].pix_fmt; | |||||
memcpy(pic->data, mpi->planes, FFMIN(sizeof(pic->data) , sizeof(mpi->planes))); | |||||
memcpy(pic->linesize, mpi->stride, FFMIN(sizeof(pic->linesize), sizeof(mpi->stride))); | |||||
memcpy(picref->data, pic->data, sizeof(picref->data)); | |||||
memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize)); | |||||
memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride))); | |||||
for(i=0; i<4 && mpi->stride[i]; i++){ | |||||
picref->buf[i] = av_buffer_create(mpi->planes[i], mpi->stride[i], dummy_free, NULL, | |||||
(mpi->flags & MP_IMGFLAG_PRESERVE) ? AV_BUFFER_FLAG_READONLY : 0); | |||||
if (!picref->buf[i]) | |||||
goto fail; | |||||
picref->data[i] = picref->buf[i]->data; | |||||
} | |||||
if(pts != MP_NOPTS_VALUE) | if(pts != MP_NOPTS_VALUE) | ||||
picref->pts= pts * av_q2d(outlink->time_base); | picref->pts= pts * av_q2d(outlink->time_base); | ||||
@@ -584,10 +577,7 @@ int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ | |||||
return 1; | return 1; | ||||
fail: | fail: | ||||
if (picref && picref->video) | |||||
av_free(picref->video); | |||||
av_free(picref); | |||||
av_free(pic); | |||||
av_frame_free(&picref); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -793,12 +783,12 @@ static int request_frame(AVFilterLink *outlink) | |||||
return ret; | return ret; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) | |||||
{ | { | ||||
MPContext *m = inlink->dst->priv; | MPContext *m = inlink->dst->priv; | ||||
int i; | int i; | ||||
double pts= MP_NOPTS_VALUE; | double pts= MP_NOPTS_VALUE; | ||||
mp_image_t* mpi = ff_new_mp_image(inpic->video->w, inpic->video->h); | |||||
mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height); | |||||
if(inpic->pts != AV_NOPTS_VALUE) | if(inpic->pts != AV_NOPTS_VALUE) | ||||
pts= inpic->pts / av_q2d(inlink->time_base); | pts= inpic->pts / av_q2d(inlink->time_base); | ||||
@@ -813,12 +803,12 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpic) | |||||
// mpi->flags|=MP_IMGFLAG_ALLOCATED; ? | // mpi->flags|=MP_IMGFLAG_ALLOCATED; ? | ||||
mpi->flags |= MP_IMGFLAG_READABLE; | mpi->flags |= MP_IMGFLAG_READABLE; | ||||
if(!(inpic->perms & AV_PERM_WRITE)) | |||||
if(!av_frame_is_writable(inpic)) | |||||
mpi->flags |= MP_IMGFLAG_PRESERVE; | mpi->flags |= MP_IMGFLAG_PRESERVE; | ||||
if(m->vf.put_image(&m->vf, mpi, pts) == 0){ | if(m->vf.put_image(&m->vf, mpi, pts) == 0){ | ||||
av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); | av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); | ||||
}else{ | }else{ | ||||
avfilter_unref_buffer(inpic); | |||||
av_frame_free(&inpic); | |||||
} | } | ||||
ff_free_mp_image(mpi); | ff_free_mp_image(mpi); | ||||
return 0; | return 0; | ||||
@@ -830,7 +820,6 @@ static const AVFilterPad mp_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_inprops, | .config_props = config_inprops, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -298,22 +298,22 @@ static void noise(uint8_t *dst, const uint8_t *src, | |||||
n->param[comp].shiftptr = 0; | n->param[comp].shiftptr = 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
NoiseContext *n = inlink->dst->priv; | NoiseContext *n = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int ret, i; | int ret, i; | ||||
if (inpicref->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(inpicref)) { | |||||
out = inpicref; | out = inpicref; | ||||
} else { | } else { | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&inpicref); | |||||
av_frame_free(&inpicref); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, inpicref); | |||||
av_frame_copy_props(out, inpicref); | |||||
} | } | ||||
for (i = 0; i < n->nb_planes; i++) | for (i = 0; i < n->nb_planes; i++) | ||||
@@ -322,7 +322,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
ret = ff_filter_frame(outlink, out); | ret = ff_filter_frame(outlink, out); | ||||
if (inpicref != out) | if (inpicref != out) | ||||
avfilter_unref_buffer(inpicref); | |||||
av_frame_free(&inpicref); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -85,7 +85,7 @@ typedef struct { | |||||
uint8_t overlay_has_alpha; | uint8_t overlay_has_alpha; | ||||
enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format; | enum OverlayFormat { OVERLAY_FORMAT_YUV420, OVERLAY_FORMAT_YUV444, OVERLAY_FORMAT_RGB, OVERLAY_FORMAT_NB} format; | ||||
AVFilterBufferRef *overpicref; | |||||
AVFrame *overpicref; | |||||
struct FFBufQueue queue_main; | struct FFBufQueue queue_main; | ||||
struct FFBufQueue queue_over; | struct FFBufQueue queue_over; | ||||
@@ -143,7 +143,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_opt_free(over); | av_opt_free(over); | ||||
avfilter_unref_bufferp(&over->overpicref); | |||||
av_frame_free(&over->overpicref); | |||||
ff_bufqueue_discard_all(&over->queue_main); | ff_bufqueue_discard_all(&over->queue_main); | ||||
ff_bufqueue_discard_all(&over->queue_over); | ff_bufqueue_discard_all(&over->queue_over); | ||||
} | } | ||||
@@ -316,15 +316,15 @@ static int config_output(AVFilterLink *outlink) | |||||
* Blend image in src to destination buffer dst at position (x, y). | * Blend image in src to destination buffer dst at position (x, y). | ||||
*/ | */ | ||||
static void blend_image(AVFilterContext *ctx, | static void blend_image(AVFilterContext *ctx, | ||||
AVFilterBufferRef *dst, AVFilterBufferRef *src, | |||||
AVFrame *dst, AVFrame *src, | |||||
int x, int y) | int x, int y) | ||||
{ | { | ||||
OverlayContext *over = ctx->priv; | OverlayContext *over = ctx->priv; | ||||
int i, imax, j, jmax, k, kmax; | int i, imax, j, jmax, k, kmax; | ||||
const int src_w = src->video->w; | |||||
const int src_h = src->video->h; | |||||
const int dst_w = dst->video->w; | |||||
const int dst_h = dst->video->h; | |||||
const int src_w = src->width; | |||||
const int src_h = src->height; | |||||
const int dst_w = dst->width; | |||||
const int dst_h = dst->height; | |||||
if (x >= dst_w || x+dst_w < 0 || | if (x >= dst_w || x+dst_w < 0 || | ||||
y >= dst_h || y+dst_h < 0) | y >= dst_h || y+dst_h < 0) | ||||
@@ -503,11 +503,11 @@ static void blend_image(AVFilterContext *ctx, | |||||
} | } | ||||
} | } | ||||
static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) | |||||
static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic) | |||||
{ | { | ||||
OverlayContext *over = ctx->priv; | OverlayContext *over = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
AVFilterBufferRef *next_overpic; | |||||
AVFrame *next_overpic; | |||||
int ret; | int ret; | ||||
/* Discard obsolete overlay frames: if there is a next overlay frame with pts | /* Discard obsolete overlay frames: if there is a next overlay frame with pts | ||||
@@ -518,7 +518,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) | |||||
mainpic->pts , ctx->inputs[MAIN]->time_base) > 0) | mainpic->pts , ctx->inputs[MAIN]->time_base) > 0) | ||||
break; | break; | ||||
ff_bufqueue_get(&over->queue_over); | ff_bufqueue_get(&over->queue_over); | ||||
avfilter_unref_buffer(over->overpicref); | |||||
av_frame_free(&over->overpicref); | |||||
over->overpicref = next_overpic; | over->overpicref = next_overpic; | ||||
} | } | ||||
@@ -549,7 +549,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) | |||||
static int try_filter_next_frame(AVFilterContext *ctx) | static int try_filter_next_frame(AVFilterContext *ctx) | ||||
{ | { | ||||
OverlayContext *over = ctx->priv; | OverlayContext *over = ctx->priv; | ||||
AVFilterBufferRef *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); | |||||
AVFrame *next_mainpic = ff_bufqueue_peek(&over->queue_main, 0); | |||||
int ret; | int ret; | ||||
if (!next_mainpic) | if (!next_mainpic) | ||||
@@ -568,7 +568,7 @@ static int flush_frames(AVFilterContext *ctx) | |||||
return ret == AVERROR(EAGAIN) ? 0 : ret; | return ret == AVERROR(EAGAIN) ? 0 : ret; | ||||
} | } | ||||
static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
OverlayContext *over = ctx->priv; | OverlayContext *over = ctx->priv; | ||||
@@ -589,7 +589,7 @@ static int filter_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
OverlayContext *over = ctx->priv; | OverlayContext *over = ctx->priv; | ||||
@@ -639,14 +639,13 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = { | |||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.config_props = config_input_main, | .config_props = config_input_main, | ||||
.filter_frame = filter_frame_main, | .filter_frame = filter_frame_main, | ||||
.min_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE, | |||||
.needs_writable = 1, | |||||
}, | }, | ||||
{ | { | ||||
.name = "overlay", | .name = "overlay", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = config_input_overlay, | .config_props = config_input_overlay, | ||||
.filter_frame = filter_frame_over, | .filter_frame = filter_frame_over, | ||||
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -238,98 +238,126 @@ static int config_output(AVFilterLink *outlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int w, int h) | |||||
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) | |||||
{ | { | ||||
PadContext *pad = inlink->dst->priv; | PadContext *pad = inlink->dst->priv; | ||||
int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1; | |||||
AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms, | |||||
w + (pad->w - pad->in_w) + 4*align, | |||||
h + (pad->h - pad->in_h)); | |||||
AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0], | |||||
w + (pad->w - pad->in_w), | |||||
h + (pad->h - pad->in_h)); | |||||
int plane; | int plane; | ||||
if (!picref) | |||||
if (!frame) | |||||
return NULL; | return NULL; | ||||
picref->video->w = w; | |||||
picref->video->h = h; | |||||
frame->width = w; | |||||
frame->height = h; | |||||
for (plane = 0; plane < 4 && picref->data[plane]; plane++) | |||||
picref->data[plane] += FFALIGN(pad->x >> pad->draw.hsub[plane], align) * pad->draw.pixelstep[plane] + | |||||
(pad->y >> pad->draw.vsub[plane]) * picref->linesize[plane]; | |||||
for (plane = 0; plane < 4 && frame->data[plane]; plane++) { | |||||
int hsub = pad->draw.hsub[plane]; | |||||
int vsub = pad->draw.vsub[plane]; | |||||
frame->data[plane] += (pad->x >> hsub) * pad->draw.pixelstep[plane] + | |||||
(pad->y >> vsub) * frame->linesize[plane]; | |||||
} | |||||
return picref; | |||||
return frame; | |||||
} | } | ||||
static int does_clip(PadContext *pad, AVFilterBufferRef *outpicref, int plane, int hsub, int vsub, int x, int y) | |||||
/* check whether each plane in this buffer can be padded without copying */ | |||||
static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf) | |||||
{ | { | ||||
int64_t x_in_buf, y_in_buf; | |||||
int planes[4] = { -1, -1, -1, -1}, *p = planes; | |||||
int i, j; | |||||
x_in_buf = outpicref->data[plane] - outpicref->buf->data[plane] | |||||
+ (x >> hsub) * pad->draw.pixelstep[plane] | |||||
+ (y >> vsub) * outpicref->linesize[plane]; | |||||
/* get all planes in this buffer */ | |||||
for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) { | |||||
if (av_frame_get_plane_buffer(frame, i) == buf) | |||||
*p++ = i; | |||||
} | |||||
if(x_in_buf < 0 || x_in_buf % pad->draw.pixelstep[plane]) | |||||
return 1; | |||||
x_in_buf /= pad->draw.pixelstep[plane]; | |||||
/* for each plane in this buffer, check that it can be padded without | |||||
* going over buffer bounds or other planes */ | |||||
for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) { | |||||
int hsub = s->draw.hsub[planes[i]]; | |||||
int vsub = s->draw.vsub[planes[i]]; | |||||
uint8_t *start = frame->data[planes[i]]; | |||||
uint8_t *end = start + (frame->height >> hsub) * | |||||
frame->linesize[planes[i]]; | |||||
/* amount of free space needed before the start and after the end | |||||
* of the plane */ | |||||
ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] + | |||||
(s->y >> vsub) * frame->linesize[planes[i]]; | |||||
ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) * | |||||
s->draw.pixelstep[planes[i]] + | |||||
(s->y >> vsub) * frame->linesize[planes[i]]; | |||||
if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]]) | |||||
return 1; | |||||
if (start - buf->data < req_start || | |||||
(buf->data + buf->size) - end < req_end) | |||||
return 1; | |||||
#define SIGN(x) ((x) > 0 ? 1 : -1) | |||||
for (j = 0; j < FF_ARRAY_ELEMS(planes) & planes[j] >= 0; j++) { | |||||
int hsub1 = s->draw.hsub[planes[j]]; | |||||
uint8_t *start1 = frame->data[planes[j]]; | |||||
uint8_t *end1 = start1 + (frame->height >> hsub1) * | |||||
frame->linesize[planes[j]]; | |||||
if (i == j) | |||||
continue; | |||||
if (SIGN(start - end1) != SIGN(start - end1 - req_start) || | |||||
SIGN(end - start1) != SIGN(end - start1 + req_end)) | |||||
return 1; | |||||
} | |||||
} | |||||
av_assert0(outpicref->buf->linesize[plane]>0); //while reference can use negative linesize the main buffer should not | |||||
return 0; | |||||
} | |||||
y_in_buf = x_in_buf / outpicref->buf->linesize[plane]; | |||||
x_in_buf %= outpicref->buf->linesize[plane]; | |||||
static int frame_needs_copy(PadContext *s, AVFrame *frame) | |||||
{ | |||||
int i; | |||||
if( y_in_buf<<vsub >= outpicref->buf->h | |||||
|| x_in_buf<<hsub >= outpicref->buf->w) | |||||
if (!av_frame_is_writable(frame)) | |||||
return 1; | return 1; | ||||
for (i = 0; i < 4 && frame->buf[i]; i++) | |||||
if (buffer_needs_copy(s, frame, frame->buf[i])) | |||||
return 1; | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
PadContext *pad = inlink->dst->priv; | PadContext *pad = inlink->dst->priv; | ||||
AVFilterBufferRef *out = avfilter_ref_buffer(in, ~0); | |||||
int plane, needs_copy; | |||||
if (!out) { | |||||
avfilter_unref_bufferp(&in); | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
for (plane = 0; plane < 4 && out->data[plane] && pad->draw.pixelstep[plane]; plane++) { | |||||
int hsub = pad->draw.hsub[plane]; | |||||
int vsub = pad->draw.vsub[plane]; | |||||
av_assert0(out->buf->w > 0 && out->buf->h > 0); | |||||
if (out->format != out->buf->format) //unsupported currently | |||||
break; | |||||
AVFrame *out; | |||||
int needs_copy = frame_needs_copy(pad, in); | |||||
out->data[plane] -= (pad->x >> hsub) * pad->draw.pixelstep[plane] + | |||||
(pad->y >> vsub) * out->linesize[plane]; | |||||
if (does_clip(pad, out, plane, hsub, vsub, 0, 0) || | |||||
does_clip(pad, out, plane, hsub, vsub, 0, pad->h - 1) || | |||||
does_clip(pad, out, plane, hsub, vsub, pad->w - 1, 0) || | |||||
does_clip(pad, out, plane, hsub, vsub, pad->w - 1, pad->h - 1)) | |||||
break; | |||||
} | |||||
needs_copy = plane < 4 && out->data[plane] || !(out->perms & AV_PERM_WRITE); | |||||
if (needs_copy) { | if (needs_copy) { | ||||
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); | av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); | ||||
avfilter_unref_buffer(out); | |||||
out = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, | |||||
out = ff_get_video_buffer(inlink->dst->outputs[0], | |||||
FFMAX(inlink->w, pad->w), | FFMAX(inlink->w, pad->w), | ||||
FFMAX(inlink->h, pad->h)); | FFMAX(inlink->h, pad->h)); | ||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
} | |||||
av_frame_copy_props(out, in); | |||||
} else { | |||||
int i; | |||||
out->video->w = pad->w; | |||||
out->video->h = pad->h; | |||||
out = in; | |||||
for (i = 0; i < 4 && out->data[i]; i++) { | |||||
int hsub = pad->draw.hsub[i]; | |||||
int vsub = pad->draw.vsub[i]; | |||||
out->data[i] -= (pad->x >> hsub) * pad->draw.pixelstep[i] + | |||||
(pad->y >> vsub) * out->linesize[i]; | |||||
} | |||||
} | |||||
/* top bar */ | /* top bar */ | ||||
if (pad->y) { | if (pad->y) { | ||||
@@ -347,20 +375,24 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
/* left border */ | /* left border */ | ||||
ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, | ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, | ||||
0, pad->y, pad->x, in->video->h); | |||||
0, pad->y, pad->x, in->height); | |||||
if (needs_copy) { | if (needs_copy) { | ||||
ff_copy_rectangle2(&pad->draw, | ff_copy_rectangle2(&pad->draw, | ||||
out->data, out->linesize, in->data, in->linesize, | out->data, out->linesize, in->data, in->linesize, | ||||
pad->x, pad->y, 0, 0, in->video->w, in->video->h); | |||||
pad->x, pad->y, 0, 0, in->width, in->height); | |||||
} | } | ||||
/* right border */ | /* right border */ | ||||
ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, | ff_fill_rectangle(&pad->draw, &pad->color, out->data, out->linesize, | ||||
pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w, | pad->x + pad->in_w, pad->y, pad->w - pad->x - pad->in_w, | ||||
in->video->h); | |||||
in->height); | |||||
out->width = pad->w; | |||||
out->height = pad->h; | |||||
avfilter_unref_bufferp(&in); | |||||
if (in != out) | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(inlink->dst->outputs[0], out); | return ff_filter_frame(inlink->dst->outputs[0], out); | ||||
} | } | ||||
@@ -52,21 +52,20 @@ static int config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
{ | { | ||||
PixdescTestContext *priv = inlink->dst->priv; | PixdescTestContext *priv = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
int i, c, w = inlink->w, h = inlink->h; | int i, c, w = inlink->w, h = inlink->h; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, | |||||
outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
av_frame_copy_props(out, in); | |||||
for (i = 0; i < 4; i++) { | for (i = 0; i < 4; i++) { | ||||
int h = outlink->h; | int h = outlink->h; | ||||
@@ -102,7 +101,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) | |||||
} | } | ||||
} | } | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -112,7 +111,6 @@ static const AVFilterPad avfilter_vf_pixdesctest_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.config_props = config_props, | .config_props = config_props, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -100,32 +100,32 @@ static int pp_config_props(AVFilterLink *inlink) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int pp_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inbuf) | |||||
static int pp_filter_frame(AVFilterLink *inlink, AVFrame *inbuf) | |||||
{ | { | ||||
AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
PPFilterContext *pp = ctx->priv; | PPFilterContext *pp = ctx->priv; | ||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
const int aligned_w = FFALIGN(outlink->w, 8); | const int aligned_w = FFALIGN(outlink->w, 8); | ||||
const int aligned_h = FFALIGN(outlink->h, 8); | const int aligned_h = FFALIGN(outlink->h, 8); | ||||
AVFilterBufferRef *outbuf; | |||||
AVFrame *outbuf; | |||||
outbuf = ff_get_video_buffer(outlink, AV_PERM_WRITE, aligned_w, aligned_h); | |||||
outbuf = ff_get_video_buffer(outlink, aligned_w, aligned_h); | |||||
if (!outbuf) { | if (!outbuf) { | ||||
avfilter_unref_buffer(inbuf); | |||||
av_frame_free(&inbuf); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outbuf, inbuf); | |||||
av_frame_copy_props(outbuf, inbuf); | |||||
pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize, | pp_postprocess((const uint8_t **)inbuf->data, inbuf->linesize, | ||||
outbuf->data, outbuf->linesize, | outbuf->data, outbuf->linesize, | ||||
aligned_w, outlink->h, | aligned_w, outlink->h, | ||||
outbuf->video->qp_table, | |||||
outbuf->video->qp_table_linesize, | |||||
outbuf->qscale_table, | |||||
outbuf->qstride, | |||||
pp->modes[pp->mode_id], | pp->modes[pp->mode_id], | ||||
pp->pp_ctx, | pp->pp_ctx, | ||||
outbuf->video->pict_type); | |||||
outbuf->pict_type); | |||||
avfilter_unref_buffer(inbuf); | |||||
av_frame_free(&inbuf); | |||||
return ff_filter_frame(outlink, outbuf); | return ff_filter_frame(outlink, outbuf); | ||||
} | } | ||||
@@ -146,7 +146,6 @@ static const AVFilterPad pp_inputs[] = { | |||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.config_props = pp_config_props, | .config_props = pp_config_props, | ||||
.filter_frame = pp_filter_frame, | .filter_frame = pp_filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -473,23 +473,23 @@ static void blur_image(int ***mask, | |||||
} | } | ||||
} | } | ||||
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||||
{ | { | ||||
RemovelogoContext *removelogo = inlink->dst->priv; | RemovelogoContext *removelogo = inlink->dst->priv; | ||||
AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
AVFilterBufferRef *outpicref; | |||||
AVFrame *outpicref; | |||||
int direct = 0; | int direct = 0; | ||||
if (inpicref->perms & AV_PERM_WRITE) { | |||||
if (av_frame_is_writable(inpicref)) { | |||||
direct = 1; | direct = 1; | ||||
outpicref = inpicref; | outpicref = inpicref; | ||||
} else { | } else { | ||||
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); | |||||
outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!outpicref) { | if (!outpicref) { | ||||
avfilter_unref_bufferp(&inpicref); | |||||
av_frame_free(&inpicref); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(outpicref, inpicref); | |||||
av_frame_copy_props(outpicref, inpicref); | |||||
} | } | ||||
blur_image(removelogo->mask, | blur_image(removelogo->mask, | ||||
@@ -509,7 +509,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) | |||||
inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); | inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); | ||||
if (!direct) | if (!direct) | ||||
avfilter_unref_bufferp(&inpicref); | |||||
av_frame_free(&inpicref); | |||||
return ff_filter_frame(outlink, outpicref); | return ff_filter_frame(outlink, outpicref); | ||||
} | } | ||||
@@ -543,7 +543,6 @@ static const AVFilterPad removelogo_inputs[] = { | |||||
.get_video_buffer = ff_null_get_video_buffer, | .get_video_buffer = ff_null_get_video_buffer, | ||||
.config_props = config_props_input, | .config_props = config_props_input, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||
@@ -329,7 +329,7 @@ fail: | |||||
return ret; | return ret; | ||||
} | } | ||||
static int scale_slice(AVFilterLink *link, AVFilterBufferRef *out_buf, AVFilterBufferRef *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) | |||||
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field) | |||||
{ | { | ||||
ScaleContext *scale = link->dst->priv; | ScaleContext *scale = link->dst->priv; | ||||
const uint8_t *in[4]; | const uint8_t *in[4]; | ||||
@@ -353,17 +353,17 @@ static int scale_slice(AVFilterLink *link, AVFilterBufferRef *out_buf, AVFilterB | |||||
out,out_stride); | out,out_stride); | ||||
} | } | ||||
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
static int filter_frame(AVFilterLink *link, AVFrame *in) | |||||
{ | { | ||||
ScaleContext *scale = link->dst->priv; | ScaleContext *scale = link->dst->priv; | ||||
AVFilterLink *outlink = link->dst->outputs[0]; | AVFilterLink *outlink = link->dst->outputs[0]; | ||||
AVFilterBufferRef *out; | |||||
AVFrame *out; | |||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | ||||
char buf[32]; | char buf[32]; | ||||
if( in->video->w != link->w | |||||
|| in->video->h != link->h | |||||
|| in->format != link->format) { | |||||
if( in->width != link->w | |||||
|| in->height != link->h | |||||
|| in->format != link->format) { | |||||
int ret; | int ret; | ||||
snprintf(buf, sizeof(buf)-1, "%d", outlink->w); | snprintf(buf, sizeof(buf)-1, "%d", outlink->w); | ||||
av_opt_set(scale, "w", buf, 0); | av_opt_set(scale, "w", buf, 0); | ||||
@@ -371,8 +371,8 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
av_opt_set(scale, "h", buf, 0); | av_opt_set(scale, "h", buf, 0); | ||||
link->dst->inputs[0]->format = in->format; | link->dst->inputs[0]->format = in->format; | ||||
link->dst->inputs[0]->w = in->video->w; | |||||
link->dst->inputs[0]->h = in->video->h; | |||||
link->dst->inputs[0]->w = in->width; | |||||
link->dst->inputs[0]->h = in->height; | |||||
if ((ret = config_props(outlink)) < 0) | if ((ret = config_props(outlink)) < 0) | ||||
return ret; | return ret; | ||||
@@ -384,32 +384,32 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in) | |||||
scale->hsub = desc->log2_chroma_w; | scale->hsub = desc->log2_chroma_w; | ||||
scale->vsub = desc->log2_chroma_h; | scale->vsub = desc->log2_chroma_h; | ||||
out = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); | |||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |||||
if (!out) { | if (!out) { | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
} | } | ||||
avfilter_copy_buffer_ref_props(out, in); | |||||
out->video->w = outlink->w; | |||||
out->video->h = outlink->h; | |||||
av_frame_copy_props(out, in); | |||||
out->width = outlink->w; | |||||
out->height = outlink->h; | |||||
if(scale->output_is_pal) | if(scale->output_is_pal) | ||||
avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format); | avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format); | ||||
av_reduce(&out->video->sample_aspect_ratio.num, &out->video->sample_aspect_ratio.den, | |||||
(int64_t)in->video->sample_aspect_ratio.num * outlink->h * link->w, | |||||
(int64_t)in->video->sample_aspect_ratio.den * outlink->w * link->h, | |||||
av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den, | |||||
(int64_t)in->sample_aspect_ratio.num * outlink->h * link->w, | |||||
(int64_t)in->sample_aspect_ratio.den * outlink->w * link->h, | |||||
INT_MAX); | INT_MAX); | ||||
if(scale->interlaced>0 || (scale->interlaced<0 && in->video->interlaced)){ | |||||
if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){ | |||||
scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0); | scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0); | ||||
scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1); | scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1); | ||||
}else{ | }else{ | ||||
scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0); | scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0); | ||||
} | } | ||||
avfilter_unref_bufferp(&in); | |||||
av_frame_free(&in); | |||||
return ff_filter_frame(outlink, out); | return ff_filter_frame(outlink, out); | ||||
} | } | ||||
@@ -418,7 +418,6 @@ static const AVFilterPad avfilter_vf_scale_inputs[] = { | |||||
.name = "default", | .name = "default", | ||||
.type = AVMEDIA_TYPE_VIDEO, | .type = AVMEDIA_TYPE_VIDEO, | ||||
.filter_frame = filter_frame, | .filter_frame = filter_frame, | ||||
.min_perms = AV_PERM_READ, | |||||
}, | }, | ||||
{ NULL } | { NULL } | ||||
}; | }; | ||||