You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

285 lines
9.8KB

  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/channel_layout.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/parseutils.h"
  27. #include "avfilter.h"
  28. #include "formats.h"
  29. #include "audio.h"
  30. #include "video.h"
  31. #include "internal.h"
  32. enum ShowWavesMode {
  33. MODE_POINT,
  34. MODE_LINE,
  35. MODE_P2P,
  36. MODE_NB,
  37. };
  38. typedef struct {
  39. const AVClass *class;
  40. int w, h;
  41. AVRational rate;
  42. int buf_idx;
  43. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  44. AVFrame *outpicref;
  45. int req_fullfilled;
  46. int n;
  47. int sample_count_mod;
  48. enum ShowWavesMode mode;
  49. } ShowWavesContext;
  50. #define OFFSET(x) offsetof(ShowWavesContext, x)
  51. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  52. static const AVOption showwaves_options[] = {
  53. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  54. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  55. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  56. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  57. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  58. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  59. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  60. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  61. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  62. { NULL }
  63. };
  64. AVFILTER_DEFINE_CLASS(showwaves);
  65. static av_cold void uninit(AVFilterContext *ctx)
  66. {
  67. ShowWavesContext *showwaves = ctx->priv;
  68. av_frame_free(&showwaves->outpicref);
  69. av_freep(&showwaves->buf_idy);
  70. }
  71. static int query_formats(AVFilterContext *ctx)
  72. {
  73. AVFilterFormats *formats = NULL;
  74. AVFilterChannelLayouts *layouts = NULL;
  75. AVFilterLink *inlink = ctx->inputs[0];
  76. AVFilterLink *outlink = ctx->outputs[0];
  77. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  78. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  79. /* set input audio formats */
  80. formats = ff_make_format_list(sample_fmts);
  81. if (!formats)
  82. return AVERROR(ENOMEM);
  83. ff_formats_ref(formats, &inlink->out_formats);
  84. layouts = ff_all_channel_layouts();
  85. if (!layouts)
  86. return AVERROR(ENOMEM);
  87. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  88. formats = ff_all_samplerates();
  89. if (!formats)
  90. return AVERROR(ENOMEM);
  91. ff_formats_ref(formats, &inlink->out_samplerates);
  92. /* set output video format */
  93. formats = ff_make_format_list(pix_fmts);
  94. if (!formats)
  95. return AVERROR(ENOMEM);
  96. ff_formats_ref(formats, &outlink->in_formats);
  97. return 0;
  98. }
  99. static int config_output(AVFilterLink *outlink)
  100. {
  101. AVFilterContext *ctx = outlink->src;
  102. AVFilterLink *inlink = ctx->inputs[0];
  103. ShowWavesContext *showwaves = ctx->priv;
  104. int nb_channels = inlink->channels;
  105. if (!showwaves->n)
  106. showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  107. showwaves->buf_idx = 0;
  108. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  109. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  110. return AVERROR(ENOMEM);
  111. }
  112. outlink->w = showwaves->w;
  113. outlink->h = showwaves->h;
  114. outlink->sample_aspect_ratio = (AVRational){1,1};
  115. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  116. (AVRational){showwaves->w,1});
  117. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  118. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  119. return 0;
  120. }
  121. inline static int push_frame(AVFilterLink *outlink)
  122. {
  123. AVFilterContext *ctx = outlink->src;
  124. AVFilterLink *inlink = ctx->inputs[0];
  125. ShowWavesContext *showwaves = outlink->src->priv;
  126. int nb_channels = inlink->channels;
  127. int ret, i;
  128. if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0)
  129. showwaves->req_fullfilled = 1;
  130. showwaves->outpicref = NULL;
  131. showwaves->buf_idx = 0;
  132. for (i = 0; i <= nb_channels; i++)
  133. showwaves->buf_idy[i] = 0;
  134. return ret;
  135. }
  136. static int request_frame(AVFilterLink *outlink)
  137. {
  138. ShowWavesContext *showwaves = outlink->src->priv;
  139. AVFilterLink *inlink = outlink->src->inputs[0];
  140. int ret;
  141. showwaves->req_fullfilled = 0;
  142. do {
  143. ret = ff_request_frame(inlink);
  144. } while (!showwaves->req_fullfilled && ret >= 0);
  145. if (ret == AVERROR_EOF && showwaves->outpicref)
  146. push_frame(outlink);
  147. return ret;
  148. }
  149. #define MAX_INT16 ((1<<15) -1)
  150. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  151. {
  152. AVFilterContext *ctx = inlink->dst;
  153. AVFilterLink *outlink = ctx->outputs[0];
  154. ShowWavesContext *showwaves = ctx->priv;
  155. const int nb_samples = insamples->nb_samples;
  156. AVFrame *outpicref = showwaves->outpicref;
  157. int linesize = outpicref ? outpicref->linesize[0] : 0;
  158. int16_t *p = (int16_t *)insamples->data[0];
  159. int nb_channels = inlink->channels;
  160. int i, j, k, h, ret = 0;
  161. const int n = showwaves->n;
  162. const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
  163. /* draw data in the buffer */
  164. for (i = 0; i < nb_samples; i++) {
  165. if (!showwaves->outpicref) {
  166. showwaves->outpicref = outpicref =
  167. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  168. if (!outpicref)
  169. return AVERROR(ENOMEM);
  170. outpicref->width = outlink->w;
  171. outpicref->height = outlink->h;
  172. outpicref->pts = insamples->pts +
  173. av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
  174. (AVRational){ 1, inlink->sample_rate },
  175. outlink->time_base);
  176. linesize = outpicref->linesize[0];
  177. for (j = 0; j < outlink->h; j++)
  178. memset(outpicref->data[0] + j * linesize, 0, outlink->w);
  179. }
  180. for (j = 0; j < nb_channels; j++) {
  181. h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16);
  182. switch (showwaves->mode) {
  183. case MODE_POINT:
  184. if (h >= 0 && h < outlink->h)
  185. *(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x;
  186. break;
  187. case MODE_LINE:
  188. {
  189. int start = showwaves->h/2, end = av_clip(h, 0, outlink->h-1);
  190. if (start > end) FFSWAP(int16_t, start, end);
  191. for (k = start; k < end; k++)
  192. *(outpicref->data[0] + showwaves->buf_idx + k * linesize) += x;
  193. break;
  194. }
  195. case MODE_P2P:
  196. if (h >= 0 && h < outlink->h) {
  197. *(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x;
  198. if (showwaves->buf_idy[j] && h != showwaves->buf_idy[j]) {
  199. int start = showwaves->buf_idy[j], end = av_clip(h, 0, outlink->h-1);
  200. if (start > end)
  201. FFSWAP(int16_t, start, end);
  202. for (k = start + 1; k < end; k++)
  203. *(outpicref->data[0] + showwaves->buf_idx + k * linesize) += x;
  204. }
  205. }
  206. break;
  207. }
  208. /* store current y coordinate for this channel */
  209. showwaves->buf_idy[j] = h;
  210. }
  211. showwaves->sample_count_mod++;
  212. if (showwaves->sample_count_mod == n) {
  213. showwaves->sample_count_mod = 0;
  214. showwaves->buf_idx++;
  215. }
  216. if (showwaves->buf_idx == showwaves->w)
  217. if ((ret = push_frame(outlink)) < 0)
  218. break;
  219. outpicref = showwaves->outpicref;
  220. }
  221. av_frame_free(&insamples);
  222. return ret;
  223. }
  224. static const AVFilterPad showwaves_inputs[] = {
  225. {
  226. .name = "default",
  227. .type = AVMEDIA_TYPE_AUDIO,
  228. .filter_frame = filter_frame,
  229. },
  230. { NULL }
  231. };
  232. static const AVFilterPad showwaves_outputs[] = {
  233. {
  234. .name = "default",
  235. .type = AVMEDIA_TYPE_VIDEO,
  236. .config_props = config_output,
  237. .request_frame = request_frame,
  238. },
  239. { NULL }
  240. };
  241. AVFilter ff_avf_showwaves = {
  242. .name = "showwaves",
  243. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  244. .uninit = uninit,
  245. .query_formats = query_formats,
  246. .priv_size = sizeof(ShowWavesContext),
  247. .inputs = showwaves_inputs,
  248. .outputs = showwaves_outputs,
  249. .priv_class = &showwaves_class,
  250. };