|
|
|
@@ -23,6 +23,7 @@ |
|
|
|
* audio to video multimedia filter |
|
|
|
*/ |
|
|
|
|
|
|
|
#include "libavutil/avassert.h" |
|
|
|
#include "libavutil/channel_layout.h" |
|
|
|
#include "libavutil/opt.h" |
|
|
|
#include "libavutil/parseutils.h" |
|
|
|
@@ -40,6 +41,11 @@ enum ShowWavesMode { |
|
|
|
MODE_NB, |
|
|
|
}; |
|
|
|
|
|
|
|
struct frame_node { |
|
|
|
AVFrame *frame; |
|
|
|
struct frame_node *next; |
|
|
|
}; |
|
|
|
|
|
|
|
typedef struct { |
|
|
|
const AVClass *class; |
|
|
|
int w, h; |
|
|
|
@@ -54,6 +60,13 @@ typedef struct { |
|
|
|
int split_channels; |
|
|
|
void (*draw_sample)(uint8_t *buf, int height, int linesize, |
|
|
|
int16_t sample, int16_t *prev_y, int intensity); |
|
|
|
|
|
|
|
/* single picture */ |
|
|
|
int single_pic; |
|
|
|
struct frame_node *audio_frames; |
|
|
|
struct frame_node *last_frame; |
|
|
|
int64_t total_samples; |
|
|
|
int64_t *sum; /* abs sum of the samples per channel */ |
|
|
|
} ShowWavesContext; |
|
|
|
|
|
|
|
#define OFFSET(x) offsetof(ShowWavesContext, x) |
|
|
|
@@ -82,6 +95,19 @@ static av_cold void uninit(AVFilterContext *ctx) |
|
|
|
|
|
|
|
av_frame_free(&showwaves->outpicref); |
|
|
|
av_freep(&showwaves->buf_idy); |
|
|
|
|
|
|
|
if (showwaves->single_pic) { |
|
|
|
struct frame_node *node = showwaves->audio_frames; |
|
|
|
while (node) { |
|
|
|
struct frame_node *tmp = node; |
|
|
|
|
|
|
|
node = node->next; |
|
|
|
av_frame_free(&tmp->frame); |
|
|
|
av_freep(&tmp); |
|
|
|
} |
|
|
|
av_freep(&showwaves->sum); |
|
|
|
showwaves->last_frame = NULL; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
static int query_formats(AVFilterContext *ctx) |
|
|
|
@@ -162,6 +188,55 @@ inline static int push_frame(AVFilterLink *outlink) |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
static int push_single_pic(AVFilterLink *outlink) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = outlink->src; |
|
|
|
AVFilterLink *inlink = ctx->inputs[0]; |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
int64_t n = 0, max_samples = showwaves->total_samples / outlink->w; |
|
|
|
AVFrame *out = showwaves->outpicref; |
|
|
|
struct frame_node *node; |
|
|
|
const int nb_channels = inlink->channels; |
|
|
|
const int x = 255 / (showwaves->split_channels ? 1 : nb_channels); |
|
|
|
const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; |
|
|
|
const int linesize = out->linesize[0]; |
|
|
|
int col = 0; |
|
|
|
int64_t *sum = showwaves->sum; |
|
|
|
|
|
|
|
av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples); |
|
|
|
|
|
|
|
memset(sum, 0, nb_channels); |
|
|
|
|
|
|
|
for (node = showwaves->audio_frames; node; node = node->next) { |
|
|
|
int i; |
|
|
|
const AVFrame *frame = node->frame; |
|
|
|
const int16_t *p = (const int16_t *)frame->data[0]; |
|
|
|
|
|
|
|
for (i = 0; i < frame->nb_samples; i++) { |
|
|
|
int ch; |
|
|
|
|
|
|
|
for (ch = 0; ch < nb_channels; ch++) |
|
|
|
sum[ch] += abs(p[ch + i*nb_channels]) << 1; |
|
|
|
if (n++ == max_samples) { |
|
|
|
for (ch = 0; ch < nb_channels; ch++) { |
|
|
|
int16_t sample = sum[ch] / max_samples; |
|
|
|
uint8_t *buf = out->data[0] + col; |
|
|
|
if (showwaves->split_channels) |
|
|
|
buf += ch*ch_height*linesize; |
|
|
|
av_assert0(col < outlink->w); |
|
|
|
showwaves->draw_sample(buf, ch_height, linesize, sample, &showwaves->buf_idy[ch], x); |
|
|
|
sum[ch] = 0; |
|
|
|
} |
|
|
|
col++; |
|
|
|
n = 0; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
return push_frame(outlink); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static int request_frame(AVFilterLink *outlink) |
|
|
|
{ |
|
|
|
ShowWavesContext *showwaves = outlink->src->priv; |
|
|
|
@@ -173,8 +248,13 @@ static int request_frame(AVFilterLink *outlink) |
|
|
|
ret = ff_request_frame(inlink); |
|
|
|
} while (!showwaves->req_fullfilled && ret >= 0); |
|
|
|
|
|
|
|
if (ret == AVERROR_EOF && showwaves->outpicref) |
|
|
|
push_frame(outlink); |
|
|
|
if (ret == AVERROR_EOF && showwaves->outpicref) { |
|
|
|
if (showwaves->single_pic) |
|
|
|
push_single_pic(outlink); |
|
|
|
else |
|
|
|
push_frame(outlink); |
|
|
|
} |
|
|
|
|
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
@@ -229,14 +309,56 @@ static void draw_sample_cline(uint8_t *buf, int height, int linesize, |
|
|
|
buf[k * linesize] += intensity; |
|
|
|
} |
|
|
|
|
|
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
|
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, |
|
|
|
const AVFilterLink *inlink, AVFilterLink *outlink, |
|
|
|
const AVFrame *in) |
|
|
|
{ |
|
|
|
if (!showwaves->outpicref) { |
|
|
|
int j; |
|
|
|
AVFrame *out = showwaves->outpicref = |
|
|
|
ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
|
|
if (!out) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
out->width = outlink->w; |
|
|
|
out->height = outlink->h; |
|
|
|
out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels, |
|
|
|
av_make_q(1, inlink->sample_rate), |
|
|
|
outlink->time_base); |
|
|
|
for (j = 0; j < outlink->h; j++) |
|
|
|
memset(out->data[0] + j*out->linesize[0], 0, outlink->w); |
|
|
|
} |
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
{ |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
|
|
|
|
if (!strcmp(ctx->filter->name, "showwavespic")) { |
|
|
|
showwaves->single_pic = 1; |
|
|
|
showwaves->mode = MODE_CENTERED_LINE; |
|
|
|
} |
|
|
|
|
|
|
|
switch (showwaves->mode) { |
|
|
|
case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; |
|
|
|
case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; |
|
|
|
case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; |
|
|
|
case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; |
|
|
|
default: |
|
|
|
return AVERROR_BUG; |
|
|
|
} |
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
#if CONFIG_SHOWWAVES_FILTER |
|
|
|
|
|
|
|
static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
AVFilterLink *outlink = ctx->outputs[0]; |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
const int nb_samples = insamples->nb_samples; |
|
|
|
AVFrame *outpicref = showwaves->outpicref; |
|
|
|
int linesize = outpicref ? outpicref->linesize[0] : 0; |
|
|
|
int16_t *p = (int16_t *)insamples->data[0]; |
|
|
|
int nb_channels = inlink->channels; |
|
|
|
int i, j, ret = 0; |
|
|
|
@@ -246,23 +368,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
|
|
|
|
|
/* draw data in the buffer */ |
|
|
|
for (i = 0; i < nb_samples; i++) { |
|
|
|
if (!showwaves->outpicref) { |
|
|
|
showwaves->outpicref = outpicref = |
|
|
|
ff_get_video_buffer(outlink, outlink->w, outlink->h); |
|
|
|
if (!outpicref) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
outpicref->width = outlink->w; |
|
|
|
outpicref->height = outlink->h; |
|
|
|
outpicref->pts = insamples->pts + |
|
|
|
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, |
|
|
|
(AVRational){ 1, inlink->sample_rate }, |
|
|
|
outlink->time_base); |
|
|
|
linesize = outpicref->linesize[0]; |
|
|
|
for (j = 0; j < outlink->h; j++) |
|
|
|
memset(outpicref->data[0] + j * linesize, 0, outlink->w); |
|
|
|
} |
|
|
|
|
|
|
|
ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
|
|
|
if (ret < 0) |
|
|
|
goto end; |
|
|
|
outpicref = showwaves->outpicref; |
|
|
|
|
|
|
|
for (j = 0; j < nb_channels; j++) { |
|
|
|
uint8_t *buf = outpicref->data[0] + showwaves->buf_idx; |
|
|
|
const int linesize = outpicref->linesize[0]; |
|
|
|
if (showwaves->split_channels) |
|
|
|
buf += j*ch_height*linesize; |
|
|
|
showwaves->draw_sample(buf, ch_height, linesize, *p++, |
|
|
|
@@ -280,30 +394,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
|
outpicref = showwaves->outpicref; |
|
|
|
} |
|
|
|
|
|
|
|
end: |
|
|
|
av_frame_free(&insamples); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
static av_cold int init(AVFilterContext *ctx) |
|
|
|
{ |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
|
|
|
|
switch (showwaves->mode) { |
|
|
|
case MODE_POINT: showwaves->draw_sample = draw_sample_point; break; |
|
|
|
case MODE_LINE: showwaves->draw_sample = draw_sample_line; break; |
|
|
|
case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break; |
|
|
|
case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break; |
|
|
|
default: |
|
|
|
return AVERROR_BUG; |
|
|
|
} |
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static const AVFilterPad showwaves_inputs[] = { |
|
|
|
{ |
|
|
|
.name = "default", |
|
|
|
.type = AVMEDIA_TYPE_AUDIO, |
|
|
|
.filter_frame = filter_frame, |
|
|
|
.filter_frame = showwaves_filter_frame, |
|
|
|
}, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
@@ -329,3 +429,107 @@ AVFilter ff_avf_showwaves = { |
|
|
|
.outputs = showwaves_outputs, |
|
|
|
.priv_class = &showwaves_class, |
|
|
|
}; |
|
|
|
|
|
|
|
#endif // CONFIG_SHOWWAVES_FILTER |
|
|
|
|
|
|
|
#if CONFIG_SHOWWAVESPIC_FILTER |
|
|
|
|
|
|
|
#define OFFSET(x) offsetof(ShowWavesContext, x) |
|
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
|
|
|
|
|
|
|
static const AVOption showwavespic_options[] = { |
|
|
|
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
|
|
|
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
|
|
|
{ "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS }, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
AVFILTER_DEFINE_CLASS(showwavespic); |
|
|
|
|
|
|
|
static int showwavespic_config_input(AVFilterLink *inlink) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
|
|
|
|
if (showwaves->single_pic) { |
|
|
|
showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum)); |
|
|
|
if (!showwaves->sum) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
} |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
|
|
|
{ |
|
|
|
AVFilterContext *ctx = inlink->dst; |
|
|
|
AVFilterLink *outlink = ctx->outputs[0]; |
|
|
|
ShowWavesContext *showwaves = ctx->priv; |
|
|
|
int16_t *p = (int16_t *)insamples->data[0]; |
|
|
|
int ret = 0; |
|
|
|
|
|
|
|
if (showwaves->single_pic) { |
|
|
|
struct frame_node *f; |
|
|
|
|
|
|
|
ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
|
|
|
if (ret < 0) |
|
|
|
goto end; |
|
|
|
|
|
|
|
/* queue the audio frame */ |
|
|
|
f = av_malloc(sizeof(*f)); |
|
|
|
if (!f) { |
|
|
|
ret = AVERROR(ENOMEM); |
|
|
|
goto end; |
|
|
|
} |
|
|
|
f->frame = insamples; |
|
|
|
f->next = NULL; |
|
|
|
if (!showwaves->last_frame) { |
|
|
|
showwaves->audio_frames = |
|
|
|
showwaves->last_frame = f; |
|
|
|
} else { |
|
|
|
showwaves->last_frame->next = f; |
|
|
|
showwaves->last_frame = f; |
|
|
|
} |
|
|
|
showwaves->total_samples += insamples->nb_samples; |
|
|
|
|
|
|
|
return 0; |
|
|
|
} |
|
|
|
|
|
|
|
end: |
|
|
|
av_frame_free(&insamples); |
|
|
|
return ret; |
|
|
|
} |
|
|
|
|
|
|
|
static const AVFilterPad showwavespic_inputs[] = { |
|
|
|
{ |
|
|
|
.name = "default", |
|
|
|
.type = AVMEDIA_TYPE_AUDIO, |
|
|
|
.config_props = showwavespic_config_input, |
|
|
|
.filter_frame = showwavespic_filter_frame, |
|
|
|
}, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
static const AVFilterPad showwavespic_outputs[] = { |
|
|
|
{ |
|
|
|
.name = "default", |
|
|
|
.type = AVMEDIA_TYPE_VIDEO, |
|
|
|
.config_props = config_output, |
|
|
|
.request_frame = request_frame, |
|
|
|
}, |
|
|
|
{ NULL } |
|
|
|
}; |
|
|
|
|
|
|
|
AVFilter ff_avf_showwavespic = { |
|
|
|
.name = "showwavespic", |
|
|
|
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."), |
|
|
|
.init = init, |
|
|
|
.uninit = uninit, |
|
|
|
.query_formats = query_formats, |
|
|
|
.priv_size = sizeof(ShowWavesContext), |
|
|
|
.inputs = showwavespic_inputs, |
|
|
|
.outputs = showwavespic_outputs, |
|
|
|
.priv_class = &showwavespic_class, |
|
|
|
}; |
|
|
|
|
|
|
|
#endif // CONFIG_SHOWWAVESPIC_FILTER |