|
|
|
@@ -37,6 +37,7 @@ |
|
|
|
typedef struct AudioPhaseMeterContext { |
|
|
|
const AVClass *class; |
|
|
|
AVFrame *out; |
|
|
|
int do_video; |
|
|
|
int w, h; |
|
|
|
AVRational frame_rate; |
|
|
|
int contrast[4]; |
|
|
|
@@ -57,6 +58,7 @@ static const AVOption aphasemeter_options[] = { |
|
|
|
{ "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=7}, 0, 255, FLAGS }, |
|
|
|
{ "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=1}, 0, 255, FLAGS }, |
|
|
|
{ "mpc", "set median phase color", OFFSET(mpc_str), AV_OPT_TYPE_STRING, {.str = "none"}, 0, 0, FLAGS }, |
|
|
|
{ "video", "set video output", OFFSET(do_video), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS }, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
@@ -64,6 +66,7 @@ AVFILTER_DEFINE_CLASS(aphasemeter); |
|
|
|
|
|
|
|
static int query_formats(AVFilterContext *ctx) |
|
|
|
{ |
|
|
|
AudioPhaseMeterContext *s = ctx->priv; |
|
|
|
AVFilterFormats *formats = NULL; |
|
|
|
AVFilterChannelLayouts *layout = NULL; |
|
|
|
AVFilterLink *inlink = ctx->inputs[0]; |
|
|
|
@@ -74,17 +77,24 @@ static int query_formats(AVFilterContext *ctx) |
|
|
|
|
|
|
|
formats = ff_make_format_list(sample_fmts); |
|
|
|
if ((ret = ff_formats_ref (formats, &inlink->out_formats )) < 0 || |
|
|
|
(ret = ff_formats_ref (formats, &outlink->in_formats )) < 0 || |
|
|
|
(ret = ff_add_channel_layout (&layout, AV_CH_LAYOUT_STEREO )) < 0 || |
|
|
|
(ret = ff_channel_layouts_ref (layout , &inlink->out_channel_layouts)) < 0) |
|
|
|
(ret = ff_channel_layouts_ref (layout , &inlink->out_channel_layouts)) < 0 || |
|
|
|
(ret = ff_channel_layouts_ref (layout , &outlink->in_channel_layouts)) < 0) |
|
|
|
return ret; |
|
|
|
|
|
|
|
formats = ff_all_samplerates(); |
|
|
|
if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0) |
|
|
|
if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 || |
|
|
|
(ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0) |
|
|
|
return ret; |
|
|
|
|
|
|
|
formats = ff_make_format_list(pix_fmts); |
|
|
|
if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0) |
|
|
|
return ret; |
|
|
|
if (s->do_video) { |
|
|
|
AVFilterLink *outlink = ctx->outputs[1]; |
|
|
|
|
|
|
|
formats = ff_make_format_list(pix_fmts); |
|
|
|
if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0) |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
@@ -95,15 +105,17 @@ static int config_input(AVFilterLink *inlink) |
|
|
|
AudioPhaseMeterContext *s = ctx->priv; |
|
|
|
int nb_samples; |
|
|
|
|
|
|
|
nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5); |
|
|
|
inlink->partial_buf_size = |
|
|
|
inlink->min_samples = |
|
|
|
inlink->max_samples = nb_samples; |
|
|
|
if (s->do_video) { |
|
|
|
nb_samples = FFMAX(1024, ((double)inlink->sample_rate / av_q2d(s->frame_rate)) + 0.5); |
|
|
|
inlink->partial_buf_size = |
|
|
|
inlink->min_samples = |
|
|
|
inlink->max_samples = nb_samples; |
|
|
|
} |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static int config_output(AVFilterLink *outlink) |
|
|
|
static int config_video_output(AVFilterLink *outlink) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = outlink->src; |
|
|
|
AudioPhaseMeterContext *s = ctx->priv; |
|
|
|
@@ -131,8 +143,9 @@ static inline int get_x(float phase, int w) |
|
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
AVFilterLink *outlink = ctx->outputs[0]; |
|
|
|
AudioPhaseMeterContext *s = ctx->priv; |
|
|
|
AVFilterLink *outlink = s->do_video ? ctx->outputs[1] : NULL; |
|
|
|
AVFilterLink *aoutlink = ctx->outputs[0]; |
|
|
|
AVDictionary **metadata; |
|
|
|
const int rc = s->contrast[0]; |
|
|
|
const int gc = s->contrast[1]; |
|
|
|
@@ -142,8 +155,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
uint8_t *dst; |
|
|
|
int i; |
|
|
|
|
|
|
|
if (!s->out || s->out->width != outlink->w || |
|
|
|
s->out->height != outlink->h) { |
|
|
|
if (s->do_video && (!s->out || s->out->width != outlink->w || |
|
|
|
s->out->height != outlink->h)) { |
|
|
|
av_frame_free(&s->out); |
|
|
|
s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
|
|
if (!s->out) { |
|
|
|
@@ -154,7 +167,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
out = s->out; |
|
|
|
for (i = 0; i < outlink->h; i++) |
|
|
|
memset(out->data[0] + i * out->linesize[0], 0, outlink->w * 4); |
|
|
|
} else { |
|
|
|
} else if (s->do_video) { |
|
|
|
out = s->out; |
|
|
|
for (i = outlink->h - 1; i >= 10; i--) |
|
|
|
memmove(out->data[0] + (i ) * out->linesize[0], |
|
|
|
@@ -163,7 +176,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
for (i = 0; i < outlink->w; i++) |
|
|
|
AV_WL32(out->data[0] + i * 4, 0); |
|
|
|
} |
|
|
|
s->out->pts = in->pts; |
|
|
|
|
|
|
|
for (i = 0; i < in->nb_samples; i++) { |
|
|
|
const float *src = (float *)in->data[0] + i * 2; |
|
|
|
@@ -171,24 +183,28 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
const float phase = isnan(f) ? 1 : f; |
|
|
|
const int x = get_x(phase, s->w); |
|
|
|
|
|
|
|
dst = out->data[0] + x * 4; |
|
|
|
dst[0] = FFMIN(255, dst[0] + rc); |
|
|
|
dst[1] = FFMIN(255, dst[1] + gc); |
|
|
|
dst[2] = FFMIN(255, dst[2] + bc); |
|
|
|
dst[3] = 255; |
|
|
|
if (s->do_video) { |
|
|
|
dst = out->data[0] + x * 4; |
|
|
|
dst[0] = FFMIN(255, dst[0] + rc); |
|
|
|
dst[1] = FFMIN(255, dst[1] + gc); |
|
|
|
dst[2] = FFMIN(255, dst[2] + bc); |
|
|
|
dst[3] = 255; |
|
|
|
} |
|
|
|
fphase += phase; |
|
|
|
} |
|
|
|
fphase /= in->nb_samples; |
|
|
|
|
|
|
|
if (s->draw_median_phase) { |
|
|
|
dst = out->data[0] + get_x(fphase, s->w) * 4; |
|
|
|
AV_WL32(dst, AV_RL32(s->mpc)); |
|
|
|
} |
|
|
|
if (s->do_video) { |
|
|
|
if (s->draw_median_phase) { |
|
|
|
dst = out->data[0] + get_x(fphase, s->w) * 4; |
|
|
|
AV_WL32(dst, AV_RL32(s->mpc)); |
|
|
|
} |
|
|
|
|
|
|
|
for (i = 1; i < 10 && i < outlink->h; i++) |
|
|
|
memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4); |
|
|
|
for (i = 1; i < 10 && i < outlink->h; i++) |
|
|
|
memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4); |
|
|
|
} |
|
|
|
|
|
|
|
metadata = avpriv_frame_get_metadatap(out); |
|
|
|
metadata = avpriv_frame_get_metadatap(in); |
|
|
|
if (metadata) { |
|
|
|
uint8_t value[128]; |
|
|
|
|
|
|
|
@@ -196,8 +212,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
|
|
|
av_dict_set(metadata, "lavfi.aphasemeter.phase", value, 0); |
|
|
|
} |
|
|
|
|
|
|
|
av_frame_free(&in); |
|
|
|
return ff_filter_frame(outlink, av_frame_clone(s->out)); |
|
|
|
if (s->do_video) { |
|
|
|
s->out->pts = in->pts; |
|
|
|
ff_filter_frame(outlink, av_frame_clone(s->out)); |
|
|
|
} |
|
|
|
return ff_filter_frame(aoutlink, in); |
|
|
|
} |
|
|
|
|
|
|
|
static av_cold void uninit(AVFilterContext *ctx) |
|
|
|
@@ -207,6 +226,33 @@ static av_cold void uninit(AVFilterContext *ctx) |
|
|
|
av_frame_free(&s->out); |
|
|
|
} |
|
|
|
|
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
{ |
|
|
|
AudioPhaseMeterContext *s = ctx->priv; |
|
|
|
AVFilterPad pad; |
|
|
|
|
|
|
|
pad = (AVFilterPad){ |
|
|
|
.name = av_strdup("out0"), |
|
|
|
.type = AVMEDIA_TYPE_AUDIO, |
|
|
|
}; |
|
|
|
if (!pad.name) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
ff_insert_outpad(ctx, 0, &pad); |
|
|
|
|
|
|
|
if (s->do_video) { |
|
|
|
pad = (AVFilterPad){ |
|
|
|
.name = av_strdup("out1"), |
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
.config_props = config_video_output, |
|
|
|
}; |
|
|
|
if (!pad.name) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
ff_insert_outpad(ctx, 1, &pad); |
|
|
|
} |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static const AVFilterPad inputs[] = { |
|
|
|
{ |
|
|
|
.name = "default", |
|
|
|
@@ -217,22 +263,15 @@ static const AVFilterPad inputs[] = { |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
static const AVFilterPad outputs[] = { |
|
|
|
{ |
|
|
|
.name = "default", |
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
.config_props = config_output, |
|
|
|
}, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
AVFilter ff_avf_aphasemeter = { |
|
|
|
.name = "aphasemeter", |
|
|
|
.description = NULL_IF_CONFIG_SMALL("Convert input audio to phase meter video output."), |
|
|
|
.init = init, |
|
|
|
.uninit = uninit, |
|
|
|
.query_formats = query_formats, |
|
|
|
.priv_size = sizeof(AudioPhaseMeterContext), |
|
|
|
.inputs = inputs, |
|
|
|
.outputs = outputs, |
|
|
|
.outputs = NULL, |
|
|
|
.priv_class = &aphasemeter_class, |
|
|
|
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS, |
|
|
|
}; |