You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

461 lines
14KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "libavutil/channel_layout.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/fifo.h"
  27. #include "libavutil/imgutils.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/samplefmt.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "buffersrc.h"
  33. #include "formats.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. #include "avcodec.h"
  37. typedef struct {
  38. const AVClass *class;
  39. AVFifoBuffer *fifo;
  40. AVRational time_base; ///< time_base to set in the output link
  41. AVRational frame_rate; ///< frame_rate to set in the output link
  42. unsigned nb_failed_requests;
  43. unsigned warning_limit;
  44. /* video only */
  45. int w, h;
  46. enum AVPixelFormat pix_fmt;
  47. AVRational pixel_aspect;
  48. char *sws_param;
  49. /* audio only */
  50. int sample_rate;
  51. enum AVSampleFormat sample_fmt;
  52. char *sample_fmt_str;
  53. int channels;
  54. uint64_t channel_layout;
  55. char *channel_layout_str;
  56. int eof;
  57. } BufferSourceContext;
  58. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\
  61. }
  62. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  63. if (c->sample_fmt != format || c->sample_rate != srate ||\
  64. c->channel_layout != ch_layout) {\
  65. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  66. return AVERROR(EINVAL);\
  67. }
  68. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  69. const AVFrame *frame, int flags)
  70. {
  71. AVFilterBufferRef *picref;
  72. int ret;
  73. if (!frame) /* NULL for EOF */
  74. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  75. picref = avfilter_get_buffer_ref_from_frame(buffer_src->outputs[0]->type,
  76. frame, AV_PERM_WRITE);
  77. if (!picref)
  78. return AVERROR(ENOMEM);
  79. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  80. picref->buf->data[0] = NULL;
  81. avfilter_unref_buffer(picref);
  82. return ret;
  83. }
  84. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame)
  85. {
  86. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  87. }
  88. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  89. {
  90. BufferSourceContext *c = s->priv;
  91. AVFilterBufferRef *to_free = NULL;
  92. int ret;
  93. if (!buf) {
  94. c->eof = 1;
  95. return 0;
  96. } else if (c->eof)
  97. return AVERROR(EINVAL);
  98. if (!av_fifo_space(c->fifo) &&
  99. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  100. sizeof(buf))) < 0)
  101. return ret;
  102. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  103. switch (s->outputs[0]->type) {
  104. case AVMEDIA_TYPE_VIDEO:
  105. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  106. break;
  107. case AVMEDIA_TYPE_AUDIO:
  108. if (!buf->audio->channel_layout)
  109. buf->audio->channel_layout = c->channel_layout;
  110. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  111. buf->format);
  112. break;
  113. default:
  114. return AVERROR(EINVAL);
  115. }
  116. }
  117. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  118. to_free = buf = ff_copy_buffer_ref(s->outputs[0], buf);
  119. if(!buf)
  120. return -1;
  121. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  122. avfilter_unref_buffer(to_free);
  123. return ret;
  124. }
  125. c->nb_failed_requests = 0;
  126. if (c->warning_limit &&
  127. av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) {
  128. av_log(s, AV_LOG_WARNING,
  129. "%d buffers queued in %s, something may be wrong.\n",
  130. c->warning_limit,
  131. (char *)av_x_if_null(s->name, s->filter->name));
  132. c->warning_limit *= 10;
  133. }
  134. if ((flags & AV_BUFFERSRC_FLAG_PUSH))
  135. if ((ret = s->output_pads[0].request_frame(s->outputs[0])) < 0)
  136. return ret;
  137. return 0;
  138. }
  139. #ifdef FF_API_BUFFERSRC_BUFFER
  140. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  141. {
  142. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  143. }
  144. #endif
  145. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  146. {
  147. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  148. }
  149. #define OFFSET(x) offsetof(BufferSourceContext, x)
  150. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  151. static const AVOption buffer_options[] = {
  152. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  153. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  154. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = FLAGS },
  155. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = FLAGS },
  156. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  157. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = FLAGS },
  158. { NULL },
  159. };
  160. #undef FLAGS
  161. AVFILTER_DEFINE_CLASS(buffer);
  162. static av_cold int init_video(AVFilterContext *ctx, const char *args)
  163. {
  164. BufferSourceContext *c = ctx->priv;
  165. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  166. int ret, n = 0;
  167. c->class = &buffer_class;
  168. if (!args) {
  169. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  170. return AVERROR(EINVAL);
  171. }
  172. colon = strchr(args, ':');
  173. equal = strchr(args, '=');
  174. if (equal && (!colon || equal < colon)) {
  175. av_opt_set_defaults(c);
  176. ret = av_set_options_string(c, args, "=", ":");
  177. if (ret < 0)
  178. goto fail;
  179. } else {
  180. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  181. &c->time_base.num, &c->time_base.den,
  182. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  183. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  184. ret = AVERROR(EINVAL);
  185. goto fail;
  186. }
  187. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n");
  188. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  189. goto fail;
  190. c->sws_param = av_strdup(sws_param);
  191. if (!c->sws_param) {
  192. ret = AVERROR(ENOMEM);
  193. goto fail;
  194. }
  195. }
  196. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  197. ret = AVERROR(ENOMEM);
  198. goto fail;
  199. }
  200. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  201. c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
  202. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  203. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  204. c->warning_limit = 100;
  205. return 0;
  206. fail:
  207. av_opt_free(c);
  208. return ret;
  209. }
  210. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
  211. static const AVOption abuffer_options[] = {
  212. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  213. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
  214. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
  215. { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
  216. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
  217. { NULL },
  218. };
  219. AVFILTER_DEFINE_CLASS(abuffer);
  220. static av_cold int init_audio(AVFilterContext *ctx, const char *args)
  221. {
  222. BufferSourceContext *s = ctx->priv;
  223. int ret = 0;
  224. s->class = &abuffer_class;
  225. av_opt_set_defaults(s);
  226. if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
  227. goto fail;
  228. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  229. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  230. av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
  231. s->sample_fmt_str);
  232. ret = AVERROR(EINVAL);
  233. goto fail;
  234. }
  235. if (s->channel_layout_str) {
  236. int n;
  237. /* TODO reindent */
  238. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  239. if (!s->channel_layout) {
  240. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
  241. s->channel_layout_str);
  242. ret = AVERROR(EINVAL);
  243. goto fail;
  244. }
  245. n = av_get_channel_layout_nb_channels(s->channel_layout);
  246. if (s->channels) {
  247. if (n != s->channels) {
  248. av_log(ctx, AV_LOG_ERROR,
  249. "Mismatching channel count %d and layout '%s' "
  250. "(%d channels)\n",
  251. s->channels, s->channel_layout_str, n);
  252. ret = AVERROR(EINVAL);
  253. goto fail;
  254. }
  255. }
  256. s->channels = n;
  257. } else if (!s->channels) {
  258. av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
  259. "channel layout specified\n");
  260. ret = AVERROR(EINVAL);
  261. goto fail;
  262. }
  263. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  264. ret = AVERROR(ENOMEM);
  265. goto fail;
  266. }
  267. if (!s->time_base.num)
  268. s->time_base = (AVRational){1, s->sample_rate};
  269. av_log(ctx, AV_LOG_VERBOSE,
  270. "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
  271. s->time_base.num, s->time_base.den, s->sample_fmt_str,
  272. s->sample_rate, s->channel_layout_str);
  273. s->warning_limit = 100;
  274. fail:
  275. av_opt_free(s);
  276. return ret;
  277. }
  278. static av_cold void uninit(AVFilterContext *ctx)
  279. {
  280. BufferSourceContext *s = ctx->priv;
  281. while (s->fifo && av_fifo_size(s->fifo)) {
  282. AVFilterBufferRef *buf;
  283. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  284. avfilter_unref_buffer(buf);
  285. }
  286. av_fifo_free(s->fifo);
  287. s->fifo = NULL;
  288. av_freep(&s->sws_param);
  289. }
  290. static int query_formats(AVFilterContext *ctx)
  291. {
  292. BufferSourceContext *c = ctx->priv;
  293. AVFilterChannelLayouts *channel_layouts = NULL;
  294. AVFilterFormats *formats = NULL;
  295. AVFilterFormats *samplerates = NULL;
  296. switch (ctx->outputs[0]->type) {
  297. case AVMEDIA_TYPE_VIDEO:
  298. ff_add_format(&formats, c->pix_fmt);
  299. ff_set_common_formats(ctx, formats);
  300. break;
  301. case AVMEDIA_TYPE_AUDIO:
  302. ff_add_format(&formats, c->sample_fmt);
  303. ff_set_common_formats(ctx, formats);
  304. ff_add_format(&samplerates, c->sample_rate);
  305. ff_set_common_samplerates(ctx, samplerates);
  306. ff_add_channel_layout(&channel_layouts,
  307. c->channel_layout ? c->channel_layout :
  308. FF_COUNT2LAYOUT(c->channels));
  309. ff_set_common_channel_layouts(ctx, channel_layouts);
  310. break;
  311. default:
  312. return AVERROR(EINVAL);
  313. }
  314. return 0;
  315. }
  316. static int config_props(AVFilterLink *link)
  317. {
  318. BufferSourceContext *c = link->src->priv;
  319. switch (link->type) {
  320. case AVMEDIA_TYPE_VIDEO:
  321. link->w = c->w;
  322. link->h = c->h;
  323. link->sample_aspect_ratio = c->pixel_aspect;
  324. break;
  325. case AVMEDIA_TYPE_AUDIO:
  326. if (!c->channel_layout)
  327. c->channel_layout = link->channel_layout;
  328. break;
  329. default:
  330. return AVERROR(EINVAL);
  331. }
  332. link->time_base = c->time_base;
  333. link->frame_rate = c->frame_rate;
  334. return 0;
  335. }
  336. static int request_frame(AVFilterLink *link)
  337. {
  338. BufferSourceContext *c = link->src->priv;
  339. AVFilterBufferRef *buf;
  340. if (!av_fifo_size(c->fifo)) {
  341. if (c->eof)
  342. return AVERROR_EOF;
  343. c->nb_failed_requests++;
  344. return AVERROR(EAGAIN);
  345. }
  346. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  347. return ff_filter_frame(link, buf);
  348. }
  349. static int poll_frame(AVFilterLink *link)
  350. {
  351. BufferSourceContext *c = link->src->priv;
  352. int size = av_fifo_size(c->fifo);
  353. if (!size && c->eof)
  354. return AVERROR_EOF;
  355. return size/sizeof(AVFilterBufferRef*);
  356. }
  357. static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
  358. {
  359. .name = "default",
  360. .type = AVMEDIA_TYPE_VIDEO,
  361. .request_frame = request_frame,
  362. .poll_frame = poll_frame,
  363. .config_props = config_props,
  364. },
  365. { NULL }
  366. };
  367. AVFilter avfilter_vsrc_buffer = {
  368. .name = "buffer",
  369. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  370. .priv_size = sizeof(BufferSourceContext),
  371. .query_formats = query_formats,
  372. .init = init_video,
  373. .uninit = uninit,
  374. .inputs = NULL,
  375. .outputs = avfilter_vsrc_buffer_outputs,
  376. .priv_class = &buffer_class,
  377. };
  378. static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
  379. {
  380. .name = "default",
  381. .type = AVMEDIA_TYPE_AUDIO,
  382. .request_frame = request_frame,
  383. .poll_frame = poll_frame,
  384. .config_props = config_props,
  385. },
  386. { NULL }
  387. };
  388. AVFilter avfilter_asrc_abuffer = {
  389. .name = "abuffer",
  390. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  391. .priv_size = sizeof(BufferSourceContext),
  392. .query_formats = query_formats,
  393. .init = init_audio,
  394. .uninit = uninit,
  395. .inputs = NULL,
  396. .outputs = avfilter_asrc_abuffer_outputs,
  397. .priv_class = &abuffer_class,
  398. };