|
@@ -24,6 +24,7 @@ |
|
|
#include "libavutil/opt.h" |
|
|
#include "libavutil/opt.h" |
|
|
#include "libavutil/parseutils.h" |
|
|
#include "libavutil/parseutils.h" |
|
|
#include "avfilter.h" |
|
|
#include "avfilter.h" |
|
|
|
|
|
#include "filters.h" |
|
|
#include "formats.h" |
|
|
#include "formats.h" |
|
|
#include "audio.h" |
|
|
#include "audio.h" |
|
|
#include "video.h" |
|
|
#include "video.h" |
|
@@ -36,6 +37,7 @@ typedef struct AudioBitScopeContext { |
|
|
char *colors; |
|
|
char *colors; |
|
|
|
|
|
|
|
|
int nb_channels; |
|
|
int nb_channels; |
|
|
|
|
|
int nb_samples; |
|
|
int depth; |
|
|
int depth; |
|
|
uint8_t *fg; |
|
|
uint8_t *fg; |
|
|
|
|
|
|
|
@@ -91,13 +93,10 @@ static int config_input(AVFilterLink *inlink) |
|
|
{ |
|
|
{ |
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
AudioBitScopeContext *s = ctx->priv; |
|
|
AudioBitScopeContext *s = ctx->priv; |
|
|
int ch, nb_samples; |
|
|
|
|
|
|
|
|
int ch; |
|
|
char *colors, *saveptr = NULL; |
|
|
char *colors, *saveptr = NULL; |
|
|
|
|
|
|
|
|
nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5); |
|
|
|
|
|
inlink->partial_buf_size = |
|
|
|
|
|
inlink->min_samples = |
|
|
|
|
|
inlink->max_samples = nb_samples; |
|
|
|
|
|
|
|
|
s->nb_samples = FFMAX(1, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5); |
|
|
s->nb_channels = inlink->channels; |
|
|
s->nb_channels = inlink->channels; |
|
|
s->depth = inlink->format == AV_SAMPLE_FMT_S16P ? 16 : 32; |
|
|
s->depth = inlink->format == AV_SAMPLE_FMT_S16P ? 16 : 32; |
|
|
|
|
|
|
|
@@ -222,12 +221,33 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
return ff_filter_frame(outlink, outpicref); |
|
|
return ff_filter_frame(outlink, outpicref); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int activate(AVFilterContext *ctx) |
|
|
|
|
|
{ |
|
|
|
|
|
AVFilterLink *inlink = ctx->inputs[0]; |
|
|
|
|
|
AVFilterLink *outlink = ctx->outputs[0]; |
|
|
|
|
|
AudioBitScopeContext *s = ctx->priv; |
|
|
|
|
|
AVFrame *in; |
|
|
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
|
|
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); |
|
|
|
|
|
|
|
|
|
|
|
ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in); |
|
|
|
|
|
if (ret < 0) |
|
|
|
|
|
return ret; |
|
|
|
|
|
if (ret > 0) |
|
|
|
|
|
return filter_frame(inlink, in); |
|
|
|
|
|
|
|
|
|
|
|
FF_FILTER_FORWARD_STATUS(inlink, outlink); |
|
|
|
|
|
FF_FILTER_FORWARD_WANTED(outlink, inlink); |
|
|
|
|
|
|
|
|
|
|
|
return FFERROR_NOT_READY; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
static const AVFilterPad inputs[] = { |
|
|
static const AVFilterPad inputs[] = { |
|
|
{ |
|
|
{ |
|
|
.name = "default", |
|
|
.name = "default", |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
|
|
.config_props = config_input, |
|
|
.config_props = config_input, |
|
|
.filter_frame = filter_frame, |
|
|
|
|
|
}, |
|
|
}, |
|
|
{ NULL } |
|
|
{ NULL } |
|
|
}; |
|
|
}; |
|
@@ -248,5 +268,6 @@ AVFilter ff_avf_abitscope = { |
|
|
.priv_size = sizeof(AudioBitScopeContext), |
|
|
.priv_size = sizeof(AudioBitScopeContext), |
|
|
.inputs = inputs, |
|
|
.inputs = inputs, |
|
|
.outputs = outputs, |
|
|
.outputs = outputs, |
|
|
|
|
|
.activate = activate, |
|
|
.priv_class = &abitscope_class, |
|
|
.priv_class = &abitscope_class, |
|
|
}; |
|
|
}; |