You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

435 lines
13KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "libavutil/channel_layout.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/fifo.h"
  27. #include "libavutil/imgutils.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/samplefmt.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "buffersrc.h"
  33. #include "formats.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. #include "avcodec.h"
  37. typedef struct {
  38. const AVClass *class;
  39. AVFifoBuffer *fifo;
  40. AVRational time_base; ///< time_base to set in the output link
  41. AVRational frame_rate; ///< frame_rate to set in the output link
  42. unsigned nb_failed_requests;
  43. unsigned warning_limit;
  44. /* video only */
  45. int w, h;
  46. enum AVPixelFormat pix_fmt;
  47. AVRational pixel_aspect;
  48. char *sws_param;
  49. /* audio only */
  50. int sample_rate;
  51. enum AVSampleFormat sample_fmt;
  52. char *sample_fmt_str;
  53. uint64_t channel_layout;
  54. char *channel_layout_str;
  55. int eof;
  56. } BufferSourceContext;
  57. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  58. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  59. av_log(s, AV_LOG_INFO, "Changing frame properties on the fly is not supported by all filters.\n");\
  60. }
  61. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  62. if (c->sample_fmt != format || c->sample_rate != srate ||\
  63. c->channel_layout != ch_layout) {\
  64. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  65. return AVERROR(EINVAL);\
  66. }
  67. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  68. const AVFrame *frame, int flags)
  69. {
  70. AVFilterBufferRef *picref;
  71. int ret;
  72. if (!frame) /* NULL for EOF */
  73. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  74. picref = avfilter_get_buffer_ref_from_frame(buffer_src->outputs[0]->type,
  75. frame, AV_PERM_WRITE);
  76. if (!picref)
  77. return AVERROR(ENOMEM);
  78. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  79. picref->buf->data[0] = NULL;
  80. avfilter_unref_buffer(picref);
  81. return ret;
  82. }
  83. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, const AVFrame *frame)
  84. {
  85. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  86. }
  87. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  88. {
  89. BufferSourceContext *c = s->priv;
  90. AVFilterBufferRef *to_free = NULL;
  91. int ret;
  92. if (!buf) {
  93. c->eof = 1;
  94. return 0;
  95. } else if (c->eof)
  96. return AVERROR(EINVAL);
  97. if (!av_fifo_space(c->fifo) &&
  98. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  99. sizeof(buf))) < 0)
  100. return ret;
  101. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  102. switch (s->outputs[0]->type) {
  103. case AVMEDIA_TYPE_VIDEO:
  104. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  105. break;
  106. case AVMEDIA_TYPE_AUDIO:
  107. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  108. buf->format);
  109. break;
  110. default:
  111. return AVERROR(EINVAL);
  112. }
  113. }
  114. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  115. to_free = buf = ff_copy_buffer_ref(s->outputs[0], buf);
  116. if(!buf)
  117. return -1;
  118. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  119. avfilter_unref_buffer(to_free);
  120. return ret;
  121. }
  122. c->nb_failed_requests = 0;
  123. if (c->warning_limit &&
  124. av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) {
  125. av_log(s, AV_LOG_WARNING,
  126. "%d buffers queued in %s, something may be wrong.\n",
  127. c->warning_limit,
  128. (char *)av_x_if_null(s->name, s->filter->name));
  129. c->warning_limit *= 10;
  130. }
  131. if ((flags & AV_BUFFERSRC_FLAG_PUSH))
  132. if ((ret = s->output_pads[0].request_frame(s->outputs[0])) < 0)
  133. return ret;
  134. return 0;
  135. }
  136. #ifdef FF_API_BUFFERSRC_BUFFER
  137. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  138. {
  139. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  140. }
  141. #endif
  142. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  143. {
  144. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  145. }
  146. #define OFFSET(x) offsetof(BufferSourceContext, x)
  147. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  148. static const AVOption buffer_options[] = {
  149. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  150. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  151. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = FLAGS },
  152. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = FLAGS },
  153. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  154. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = FLAGS },
  155. { NULL },
  156. };
  157. #undef FLAGS
  158. AVFILTER_DEFINE_CLASS(buffer);
  159. static av_cold int init_video(AVFilterContext *ctx, const char *args)
  160. {
  161. BufferSourceContext *c = ctx->priv;
  162. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  163. int ret, n = 0;
  164. c->class = &buffer_class;
  165. if (!args) {
  166. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  167. return AVERROR(EINVAL);
  168. }
  169. colon = strchr(args, ':');
  170. equal = strchr(args, '=');
  171. if (equal && (!colon || equal < colon)) {
  172. av_opt_set_defaults(c);
  173. ret = av_set_options_string(c, args, "=", ":");
  174. if (ret < 0)
  175. goto fail;
  176. } else {
  177. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  178. &c->time_base.num, &c->time_base.den,
  179. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  180. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  181. ret = AVERROR(EINVAL);
  182. goto fail;
  183. }
  184. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n");
  185. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  186. goto fail;
  187. c->sws_param = av_strdup(sws_param);
  188. if (!c->sws_param) {
  189. ret = AVERROR(ENOMEM);
  190. goto fail;
  191. }
  192. }
  193. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  194. ret = AVERROR(ENOMEM);
  195. goto fail;
  196. }
  197. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  198. c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
  199. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  200. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  201. c->warning_limit = 100;
  202. return 0;
  203. fail:
  204. av_opt_free(c);
  205. return ret;
  206. }
  207. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
  208. static const AVOption abuffer_options[] = {
  209. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, FLAGS },
  210. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
  211. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
  212. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = FLAGS },
  213. { NULL },
  214. };
  215. AVFILTER_DEFINE_CLASS(abuffer);
  216. static av_cold int init_audio(AVFilterContext *ctx, const char *args)
  217. {
  218. BufferSourceContext *s = ctx->priv;
  219. int ret = 0;
  220. s->class = &abuffer_class;
  221. av_opt_set_defaults(s);
  222. if ((ret = av_set_options_string(s, args, "=", ":")) < 0)
  223. goto fail;
  224. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  225. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  226. av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
  227. s->sample_fmt_str);
  228. ret = AVERROR(EINVAL);
  229. goto fail;
  230. }
  231. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  232. if (!s->channel_layout) {
  233. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
  234. s->channel_layout_str);
  235. ret = AVERROR(EINVAL);
  236. goto fail;
  237. }
  238. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  239. ret = AVERROR(ENOMEM);
  240. goto fail;
  241. }
  242. if (!s->time_base.num)
  243. s->time_base = (AVRational){1, s->sample_rate};
  244. av_log(ctx, AV_LOG_VERBOSE,
  245. "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
  246. s->time_base.num, s->time_base.den, s->sample_fmt_str,
  247. s->sample_rate, s->channel_layout_str);
  248. s->warning_limit = 100;
  249. fail:
  250. av_opt_free(s);
  251. return ret;
  252. }
  253. static av_cold void uninit(AVFilterContext *ctx)
  254. {
  255. BufferSourceContext *s = ctx->priv;
  256. while (s->fifo && av_fifo_size(s->fifo)) {
  257. AVFilterBufferRef *buf;
  258. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  259. avfilter_unref_buffer(buf);
  260. }
  261. av_fifo_free(s->fifo);
  262. s->fifo = NULL;
  263. av_freep(&s->sws_param);
  264. }
  265. static int query_formats(AVFilterContext *ctx)
  266. {
  267. BufferSourceContext *c = ctx->priv;
  268. AVFilterChannelLayouts *channel_layouts = NULL;
  269. AVFilterFormats *formats = NULL;
  270. AVFilterFormats *samplerates = NULL;
  271. switch (ctx->outputs[0]->type) {
  272. case AVMEDIA_TYPE_VIDEO:
  273. ff_add_format(&formats, c->pix_fmt);
  274. ff_set_common_formats(ctx, formats);
  275. break;
  276. case AVMEDIA_TYPE_AUDIO:
  277. ff_add_format(&formats, c->sample_fmt);
  278. ff_set_common_formats(ctx, formats);
  279. ff_add_format(&samplerates, c->sample_rate);
  280. ff_set_common_samplerates(ctx, samplerates);
  281. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  282. ff_set_common_channel_layouts(ctx, channel_layouts);
  283. break;
  284. default:
  285. return AVERROR(EINVAL);
  286. }
  287. return 0;
  288. }
  289. static int config_props(AVFilterLink *link)
  290. {
  291. BufferSourceContext *c = link->src->priv;
  292. switch (link->type) {
  293. case AVMEDIA_TYPE_VIDEO:
  294. link->w = c->w;
  295. link->h = c->h;
  296. link->sample_aspect_ratio = c->pixel_aspect;
  297. break;
  298. case AVMEDIA_TYPE_AUDIO:
  299. break;
  300. default:
  301. return AVERROR(EINVAL);
  302. }
  303. link->time_base = c->time_base;
  304. link->frame_rate = c->frame_rate;
  305. return 0;
  306. }
  307. static int request_frame(AVFilterLink *link)
  308. {
  309. BufferSourceContext *c = link->src->priv;
  310. AVFilterBufferRef *buf;
  311. int ret = 0;
  312. if (!av_fifo_size(c->fifo)) {
  313. if (c->eof)
  314. return AVERROR_EOF;
  315. c->nb_failed_requests++;
  316. return AVERROR(EAGAIN);
  317. }
  318. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  319. ff_filter_frame(link, buf);
  320. return ret;
  321. }
  322. static int poll_frame(AVFilterLink *link)
  323. {
  324. BufferSourceContext *c = link->src->priv;
  325. int size = av_fifo_size(c->fifo);
  326. if (!size && c->eof)
  327. return AVERROR_EOF;
  328. return size/sizeof(AVFilterBufferRef*);
  329. }
  330. static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
  331. {
  332. .name = "default",
  333. .type = AVMEDIA_TYPE_VIDEO,
  334. .request_frame = request_frame,
  335. .poll_frame = poll_frame,
  336. .config_props = config_props,
  337. },
  338. { NULL }
  339. };
  340. AVFilter avfilter_vsrc_buffer = {
  341. .name = "buffer",
  342. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  343. .priv_size = sizeof(BufferSourceContext),
  344. .query_formats = query_formats,
  345. .init = init_video,
  346. .uninit = uninit,
  347. .inputs = NULL,
  348. .outputs = avfilter_vsrc_buffer_outputs,
  349. .priv_class = &buffer_class,
  350. };
  351. static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
  352. {
  353. .name = "default",
  354. .type = AVMEDIA_TYPE_AUDIO,
  355. .request_frame = request_frame,
  356. .poll_frame = poll_frame,
  357. .config_props = config_props,
  358. },
  359. { NULL }
  360. };
  361. AVFilter avfilter_asrc_abuffer = {
  362. .name = "abuffer",
  363. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  364. .priv_size = sizeof(BufferSourceContext),
  365. .query_formats = query_formats,
  366. .init = init_audio,
  367. .uninit = uninit,
  368. .inputs = NULL,
  369. .outputs = avfilter_asrc_abuffer_outputs,
  370. .priv_class = &abuffer_class,
  371. };