You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

537 lines
18KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include <float.h>
  25. #include "libavutil/channel_layout.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/fifo.h"
  28. #include "libavutil/frame.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/internal.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/samplefmt.h"
  33. #include "libavutil/timestamp.h"
  34. #include "audio.h"
  35. #include "avfilter.h"
  36. #include "buffersrc.h"
  37. #include "formats.h"
  38. #include "internal.h"
  39. #include "video.h"
  40. typedef struct BufferSourceContext {
  41. const AVClass *class;
  42. AVFifoBuffer *fifo;
  43. AVRational time_base; ///< time_base to set in the output link
  44. AVRational frame_rate; ///< frame_rate to set in the output link
  45. unsigned nb_failed_requests;
  46. /* video only */
  47. int w, h;
  48. enum AVPixelFormat pix_fmt;
  49. AVRational pixel_aspect;
  50. char *sws_param;
  51. AVBufferRef *hw_frames_ctx;
  52. /* audio only */
  53. int sample_rate;
  54. enum AVSampleFormat sample_fmt;
  55. int channels;
  56. uint64_t channel_layout;
  57. char *channel_layout_str;
  58. int got_format_from_params;
  59. int eof;
  60. } BufferSourceContext;
  61. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format, pts)\
  62. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  63. av_log(s, AV_LOG_INFO, "filter context - w: %d h: %d fmt: %d, incoming frame - w: %d h: %d fmt: %d pts_time: %s\n",\
  64. c->w, c->h, c->pix_fmt, width, height, format, av_ts2timestr(pts, &s->outputs[0]->time_base));\
  65. av_log(s, AV_LOG_WARNING, "Changing video frame properties on the fly is not supported by all filters.\n");\
  66. }
  67. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, ch_count, format, pts)\
  68. if (c->sample_fmt != format || c->sample_rate != srate ||\
  69. c->channel_layout != ch_layout || c->channels != ch_count) {\
  70. av_log(s, AV_LOG_INFO, "filter context - fmt: %s r: %d layout: %"PRIX64" ch: %d, incoming frame - fmt: %s r: %d layout: %"PRIX64" ch: %d pts_time: %s\n",\
  71. av_get_sample_fmt_name(c->sample_fmt), c->sample_rate, c->channel_layout, c->channels,\
  72. av_get_sample_fmt_name(format), srate, ch_layout, ch_count, av_ts2timestr(pts, &s->outputs[0]->time_base));\
  73. av_log(s, AV_LOG_ERROR, "Changing audio frame properties on the fly is not supported.\n");\
  74. return AVERROR(EINVAL);\
  75. }
  76. AVBufferSrcParameters *av_buffersrc_parameters_alloc(void)
  77. {
  78. AVBufferSrcParameters *par = av_mallocz(sizeof(*par));
  79. if (!par)
  80. return NULL;
  81. par->format = -1;
  82. return par;
  83. }
  84. int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
  85. {
  86. BufferSourceContext *s = ctx->priv;
  87. if (param->time_base.num > 0 && param->time_base.den > 0)
  88. s->time_base = param->time_base;
  89. switch (ctx->filter->outputs[0].type) {
  90. case AVMEDIA_TYPE_VIDEO:
  91. if (param->format != AV_PIX_FMT_NONE) {
  92. s->got_format_from_params = 1;
  93. s->pix_fmt = param->format;
  94. }
  95. if (param->width > 0)
  96. s->w = param->width;
  97. if (param->height > 0)
  98. s->h = param->height;
  99. if (param->sample_aspect_ratio.num > 0 && param->sample_aspect_ratio.den > 0)
  100. s->pixel_aspect = param->sample_aspect_ratio;
  101. if (param->frame_rate.num > 0 && param->frame_rate.den > 0)
  102. s->frame_rate = param->frame_rate;
  103. if (param->hw_frames_ctx) {
  104. av_buffer_unref(&s->hw_frames_ctx);
  105. s->hw_frames_ctx = av_buffer_ref(param->hw_frames_ctx);
  106. if (!s->hw_frames_ctx)
  107. return AVERROR(ENOMEM);
  108. }
  109. break;
  110. case AVMEDIA_TYPE_AUDIO:
  111. if (param->format != AV_SAMPLE_FMT_NONE) {
  112. s->got_format_from_params = 1;
  113. s->sample_fmt = param->format;
  114. }
  115. if (param->sample_rate > 0)
  116. s->sample_rate = param->sample_rate;
  117. if (param->channel_layout)
  118. s->channel_layout = param->channel_layout;
  119. break;
  120. default:
  121. return AVERROR_BUG;
  122. }
  123. return 0;
  124. }
  125. int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame)
  126. {
  127. return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame,
  128. AV_BUFFERSRC_FLAG_KEEP_REF);
  129. }
  130. int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
  131. {
  132. return av_buffersrc_add_frame_flags(ctx, frame, 0);
  133. }
  134. static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
  135. AVFrame *frame, int flags);
  136. int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
  137. {
  138. AVFrame *copy = NULL;
  139. int ret = 0;
  140. if (frame && frame->channel_layout &&
  141. av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
  142. av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
  143. return AVERROR(EINVAL);
  144. }
  145. if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
  146. return av_buffersrc_add_frame_internal(ctx, frame, flags);
  147. if (!(copy = av_frame_alloc()))
  148. return AVERROR(ENOMEM);
  149. ret = av_frame_ref(copy, frame);
  150. if (ret >= 0)
  151. ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
  152. av_frame_free(&copy);
  153. return ret;
  154. }
  155. static int push_frame(AVFilterGraph *graph)
  156. {
  157. int ret;
  158. while (1) {
  159. ret = ff_filter_graph_run_once(graph);
  160. if (ret == AVERROR(EAGAIN))
  161. break;
  162. if (ret < 0)
  163. return ret;
  164. }
  165. return 0;
  166. }
  167. static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
  168. AVFrame *frame, int flags)
  169. {
  170. BufferSourceContext *s = ctx->priv;
  171. AVFrame *copy;
  172. int refcounted, ret;
  173. s->nb_failed_requests = 0;
  174. if (!frame)
  175. return av_buffersrc_close(ctx, AV_NOPTS_VALUE, flags);
  176. if (s->eof)
  177. return AVERROR(EINVAL);
  178. refcounted = !!frame->buf[0];
  179. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  180. switch (ctx->outputs[0]->type) {
  181. case AVMEDIA_TYPE_VIDEO:
  182. CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height,
  183. frame->format, frame->pts);
  184. break;
  185. case AVMEDIA_TYPE_AUDIO:
  186. /* For layouts unknown on input but known on link after negotiation. */
  187. if (!frame->channel_layout)
  188. frame->channel_layout = s->channel_layout;
  189. CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
  190. frame->channels, frame->format, frame->pts);
  191. break;
  192. default:
  193. return AVERROR(EINVAL);
  194. }
  195. }
  196. if (!av_fifo_space(s->fifo) &&
  197. (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) +
  198. sizeof(copy))) < 0)
  199. return ret;
  200. if (!(copy = av_frame_alloc()))
  201. return AVERROR(ENOMEM);
  202. if (refcounted) {
  203. av_frame_move_ref(copy, frame);
  204. } else {
  205. ret = av_frame_ref(copy, frame);
  206. if (ret < 0) {
  207. av_frame_free(&copy);
  208. return ret;
  209. }
  210. }
  211. if ((ret = av_fifo_generic_write(s->fifo, &copy, sizeof(copy), NULL)) < 0) {
  212. if (refcounted)
  213. av_frame_move_ref(frame, copy);
  214. av_frame_free(&copy);
  215. return ret;
  216. }
  217. if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
  218. return ret;
  219. if ((flags & AV_BUFFERSRC_FLAG_PUSH)) {
  220. ret = push_frame(ctx->graph);
  221. if (ret < 0)
  222. return ret;
  223. }
  224. return 0;
  225. }
  226. int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
  227. {
  228. BufferSourceContext *s = ctx->priv;
  229. s->eof = 1;
  230. ff_avfilter_link_set_in_status(ctx->outputs[0], AVERROR_EOF, pts);
  231. return (flags & AV_BUFFERSRC_FLAG_PUSH) ? push_frame(ctx->graph) : 0;
  232. }
  233. static av_cold int init_video(AVFilterContext *ctx)
  234. {
  235. BufferSourceContext *c = ctx->priv;
  236. if (!(c->pix_fmt != AV_PIX_FMT_NONE || c->got_format_from_params) || !c->w || !c->h ||
  237. av_q2d(c->time_base) <= 0) {
  238. av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
  239. return AVERROR(EINVAL);
  240. }
  241. if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*))))
  242. return AVERROR(ENOMEM);
  243. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  244. c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
  245. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  246. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  247. return 0;
  248. }
  249. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  250. {
  251. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  252. }
  253. #define OFFSET(x) offsetof(BufferSourceContext, x)
  254. #define A AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
  255. #define V AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  256. static const AVOption buffer_options[] = {
  257. { "width", NULL, OFFSET(w), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
  258. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
  259. { "height", NULL, OFFSET(h), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V },
  260. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, .min = AV_PIX_FMT_NONE, .max = INT_MAX, .flags = V },
  261. { "sar", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
  262. { "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
  263. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
  264. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
  265. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
  266. { NULL },
  267. };
  268. AVFILTER_DEFINE_CLASS(buffer);
  269. static const AVOption abuffer_options[] = {
  270. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, INT_MAX, A },
  271. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
  272. { "sample_fmt", NULL, OFFSET(sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, { .i64 = AV_SAMPLE_FMT_NONE }, .min = AV_SAMPLE_FMT_NONE, .max = INT_MAX, .flags = A },
  273. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  274. { "channels", NULL, OFFSET(channels), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, A },
  275. { NULL },
  276. };
  277. AVFILTER_DEFINE_CLASS(abuffer);
  278. static av_cold int init_audio(AVFilterContext *ctx)
  279. {
  280. BufferSourceContext *s = ctx->priv;
  281. int ret = 0;
  282. if (!(s->sample_fmt != AV_SAMPLE_FMT_NONE || s->got_format_from_params)) {
  283. av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n");
  284. return AVERROR(EINVAL);
  285. }
  286. if (s->channel_layout_str || s->channel_layout) {
  287. int n;
  288. if (!s->channel_layout) {
  289. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  290. if (!s->channel_layout) {
  291. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
  292. s->channel_layout_str);
  293. return AVERROR(EINVAL);
  294. }
  295. }
  296. n = av_get_channel_layout_nb_channels(s->channel_layout);
  297. if (s->channels) {
  298. if (n != s->channels) {
  299. av_log(ctx, AV_LOG_ERROR,
  300. "Mismatching channel count %d and layout '%s' "
  301. "(%d channels)\n",
  302. s->channels, s->channel_layout_str, n);
  303. return AVERROR(EINVAL);
  304. }
  305. }
  306. s->channels = n;
  307. } else if (!s->channels) {
  308. av_log(ctx, AV_LOG_ERROR, "Neither number of channels nor "
  309. "channel layout specified\n");
  310. return AVERROR(EINVAL);
  311. }
  312. if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*))))
  313. return AVERROR(ENOMEM);
  314. if (!s->time_base.num)
  315. s->time_base = (AVRational){1, s->sample_rate};
  316. av_log(ctx, AV_LOG_VERBOSE,
  317. "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
  318. s->time_base.num, s->time_base.den, av_get_sample_fmt_name(s->sample_fmt),
  319. s->sample_rate, s->channel_layout_str);
  320. return ret;
  321. }
  322. static av_cold void uninit(AVFilterContext *ctx)
  323. {
  324. BufferSourceContext *s = ctx->priv;
  325. while (s->fifo && av_fifo_size(s->fifo)) {
  326. AVFrame *frame;
  327. av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL);
  328. av_frame_free(&frame);
  329. }
  330. av_buffer_unref(&s->hw_frames_ctx);
  331. av_fifo_freep(&s->fifo);
  332. }
  333. static int query_formats(AVFilterContext *ctx)
  334. {
  335. BufferSourceContext *c = ctx->priv;
  336. AVFilterChannelLayouts *channel_layouts = NULL;
  337. AVFilterFormats *formats = NULL;
  338. AVFilterFormats *samplerates = NULL;
  339. int ret;
  340. switch (ctx->outputs[0]->type) {
  341. case AVMEDIA_TYPE_VIDEO:
  342. if ((ret = ff_add_format (&formats, c->pix_fmt)) < 0 ||
  343. (ret = ff_set_common_formats (ctx , formats )) < 0)
  344. return ret;
  345. break;
  346. case AVMEDIA_TYPE_AUDIO:
  347. if ((ret = ff_add_format (&formats , c->sample_fmt )) < 0 ||
  348. (ret = ff_set_common_formats (ctx , formats )) < 0 ||
  349. (ret = ff_add_format (&samplerates, c->sample_rate)) < 0 ||
  350. (ret = ff_set_common_samplerates (ctx , samplerates )) < 0)
  351. return ret;
  352. if ((ret = ff_add_channel_layout(&channel_layouts,
  353. c->channel_layout ? c->channel_layout :
  354. FF_COUNT2LAYOUT(c->channels))) < 0)
  355. return ret;
  356. if ((ret = ff_set_common_channel_layouts(ctx, channel_layouts)) < 0)
  357. return ret;
  358. break;
  359. default:
  360. return AVERROR(EINVAL);
  361. }
  362. return 0;
  363. }
  364. static int config_props(AVFilterLink *link)
  365. {
  366. BufferSourceContext *c = link->src->priv;
  367. switch (link->type) {
  368. case AVMEDIA_TYPE_VIDEO:
  369. link->w = c->w;
  370. link->h = c->h;
  371. link->sample_aspect_ratio = c->pixel_aspect;
  372. if (c->hw_frames_ctx) {
  373. link->hw_frames_ctx = av_buffer_ref(c->hw_frames_ctx);
  374. if (!link->hw_frames_ctx)
  375. return AVERROR(ENOMEM);
  376. }
  377. break;
  378. case AVMEDIA_TYPE_AUDIO:
  379. if (!c->channel_layout)
  380. c->channel_layout = link->channel_layout;
  381. break;
  382. default:
  383. return AVERROR(EINVAL);
  384. }
  385. link->time_base = c->time_base;
  386. link->frame_rate = c->frame_rate;
  387. return 0;
  388. }
  389. static int request_frame(AVFilterLink *link)
  390. {
  391. BufferSourceContext *c = link->src->priv;
  392. AVFrame *frame;
  393. int ret;
  394. if (!av_fifo_size(c->fifo)) {
  395. if (c->eof)
  396. return AVERROR_EOF;
  397. c->nb_failed_requests++;
  398. return AVERROR(EAGAIN);
  399. }
  400. av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL);
  401. ret = ff_filter_frame(link, frame);
  402. return ret;
  403. }
  404. static int poll_frame(AVFilterLink *link)
  405. {
  406. BufferSourceContext *c = link->src->priv;
  407. int size = av_fifo_size(c->fifo);
  408. if (!size && c->eof)
  409. return AVERROR_EOF;
  410. return size/sizeof(AVFrame*);
  411. }
  412. static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
  413. {
  414. .name = "default",
  415. .type = AVMEDIA_TYPE_VIDEO,
  416. .request_frame = request_frame,
  417. .poll_frame = poll_frame,
  418. .config_props = config_props,
  419. },
  420. { NULL }
  421. };
  422. AVFilter ff_vsrc_buffer = {
  423. .name = "buffer",
  424. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  425. .priv_size = sizeof(BufferSourceContext),
  426. .query_formats = query_formats,
  427. .init = init_video,
  428. .uninit = uninit,
  429. .inputs = NULL,
  430. .outputs = avfilter_vsrc_buffer_outputs,
  431. .priv_class = &buffer_class,
  432. };
  433. static const AVFilterPad avfilter_asrc_abuffer_outputs[] = {
  434. {
  435. .name = "default",
  436. .type = AVMEDIA_TYPE_AUDIO,
  437. .request_frame = request_frame,
  438. .poll_frame = poll_frame,
  439. .config_props = config_props,
  440. },
  441. { NULL }
  442. };
  443. AVFilter ff_asrc_abuffer = {
  444. .name = "abuffer",
  445. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  446. .priv_size = sizeof(BufferSourceContext),
  447. .query_formats = query_formats,
  448. .init = init_audio,
  449. .uninit = uninit,
  450. .inputs = NULL,
  451. .outputs = avfilter_asrc_abuffer_outputs,
  452. .priv_class = &abuffer_class,
  453. };