You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

473 lines
14KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * buffer sink
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/channel_layout.h"
  26. #include "libavutil/fifo.h"
  27. #include "avfilter.h"
  28. #include "buffersink.h"
  29. #include "audio.h"
  30. #include "internal.h"
  31. AVBufferSinkParams *av_buffersink_params_alloc(void)
  32. {
  33. static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
  34. AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
  35. if (!params)
  36. return NULL;
  37. params->pixel_fmts = pixel_fmts;
  38. return params;
  39. }
  40. AVABufferSinkParams *av_abuffersink_params_alloc(void)
  41. {
  42. static const int sample_fmts[] = { AV_SAMPLE_FMT_NONE };
  43. static const int64_t channel_layouts[] = { -1 };
  44. AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams));
  45. if (!params)
  46. return NULL;
  47. params->sample_fmts = sample_fmts;
  48. params->channel_layouts = channel_layouts;
  49. return params;
  50. }
  51. typedef struct {
  52. AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
  53. unsigned warning_limit;
  54. /* only used for video */
  55. enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
  56. /* only used for audio */
  57. enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
  58. int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
  59. } BufferSinkContext;
  60. #define FIFO_INIT_SIZE 8
  61. static av_cold int common_init(AVFilterContext *ctx)
  62. {
  63. BufferSinkContext *buf = ctx->priv;
  64. buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
  65. if (!buf->fifo) {
  66. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
  67. return AVERROR(ENOMEM);
  68. }
  69. buf->warning_limit = 100;
  70. return 0;
  71. }
  72. static av_cold void common_uninit(AVFilterContext *ctx)
  73. {
  74. BufferSinkContext *buf = ctx->priv;
  75. AVFilterBufferRef *picref;
  76. if (buf->fifo) {
  77. while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) {
  78. av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL);
  79. avfilter_unref_buffer(picref);
  80. }
  81. av_fifo_free(buf->fifo);
  82. buf->fifo = NULL;
  83. }
  84. }
  85. static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref)
  86. {
  87. BufferSinkContext *buf = ctx->priv;
  88. if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
  89. /* realloc fifo size */
  90. if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
  91. av_log(ctx, AV_LOG_ERROR,
  92. "Cannot buffer more frames. Consume some available frames "
  93. "before adding new ones.\n");
  94. return AVERROR(ENOMEM);
  95. }
  96. }
  97. /* cache frame */
  98. av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL);
  99. return 0;
  100. }
  101. static int end_frame(AVFilterLink *inlink)
  102. {
  103. AVFilterContext *ctx = inlink->dst;
  104. BufferSinkContext *buf = inlink->dst->priv;
  105. int ret;
  106. av_assert1(inlink->cur_buf);
  107. if ((ret = add_buffer_ref(ctx, inlink->cur_buf)) < 0)
  108. return ret;
  109. inlink->cur_buf = NULL;
  110. if (buf->warning_limit &&
  111. av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
  112. av_log(ctx, AV_LOG_WARNING,
  113. "%d buffers queued in %s, something may be wrong.\n",
  114. buf->warning_limit,
  115. (char *)av_x_if_null(ctx->name, ctx->filter->name));
  116. buf->warning_limit *= 10;
  117. }
  118. return 0;
  119. }
  120. void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
  121. {
  122. AVFilterLink *inlink = ctx->inputs[0];
  123. inlink->min_samples = inlink->max_samples =
  124. inlink->partial_buf_size = frame_size;
  125. }
  126. int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
  127. AVFilterBufferRef **bufref, int flags)
  128. {
  129. BufferSinkContext *buf = ctx->priv;
  130. AVFilterLink *inlink = ctx->inputs[0];
  131. int ret;
  132. *bufref = NULL;
  133. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  134. || !strcmp(ctx->filter->name, "abuffersink")
  135. || !strcmp(ctx->filter->name, "ffbuffersink")
  136. || !strcmp(ctx->filter->name, "ffabuffersink"));
  137. /* no picref available, fetch it from the filterchain */
  138. if (!av_fifo_size(buf->fifo)) {
  139. if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
  140. return AVERROR(EAGAIN);
  141. if ((ret = ff_request_frame(inlink)) < 0)
  142. return ret;
  143. }
  144. if (!av_fifo_size(buf->fifo))
  145. return AVERROR(EINVAL);
  146. if (flags & AV_BUFFERSINK_FLAG_PEEK)
  147. *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
  148. else
  149. av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
  150. return 0;
  151. }
  152. AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
  153. {
  154. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  155. || !strcmp(ctx->filter->name, "ffbuffersink"));
  156. return ctx->inputs[0]->frame_rate;
  157. }
  158. int av_buffersink_poll_frame(AVFilterContext *ctx)
  159. {
  160. BufferSinkContext *buf = ctx->priv;
  161. AVFilterLink *inlink = ctx->inputs[0];
  162. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  163. || !strcmp(ctx->filter->name, "abuffersink")
  164. || !strcmp(ctx->filter->name, "ffbuffersink")
  165. || !strcmp(ctx->filter->name, "ffabuffersink"));
  166. return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
  167. }
  168. static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
  169. {
  170. BufferSinkContext *buf = ctx->priv;
  171. AVBufferSinkParams *params = opaque;
  172. if (params && params->pixel_fmts) {
  173. const int *pixel_fmts = params->pixel_fmts;
  174. buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
  175. if (!buf->pixel_fmts)
  176. return AVERROR(ENOMEM);
  177. }
  178. return common_init(ctx);
  179. }
  180. static av_cold void vsink_uninit(AVFilterContext *ctx)
  181. {
  182. BufferSinkContext *buf = ctx->priv;
  183. av_freep(&buf->pixel_fmts);
  184. common_uninit(ctx);
  185. }
  186. static int vsink_query_formats(AVFilterContext *ctx)
  187. {
  188. BufferSinkContext *buf = ctx->priv;
  189. if (buf->pixel_fmts)
  190. ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts));
  191. else
  192. ff_default_query_formats(ctx);
  193. return 0;
  194. }
  195. static const AVFilterPad ffbuffersink_inputs[] = {
  196. {
  197. .name = "default",
  198. .type = AVMEDIA_TYPE_VIDEO,
  199. .end_frame = end_frame,
  200. .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
  201. },
  202. { NULL },
  203. };
  204. AVFilter avfilter_vsink_ffbuffersink = {
  205. .name = "ffbuffersink",
  206. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  207. .priv_size = sizeof(BufferSinkContext),
  208. .init_opaque = vsink_init,
  209. .uninit = vsink_uninit,
  210. .query_formats = vsink_query_formats,
  211. .inputs = ffbuffersink_inputs,
  212. .outputs = NULL,
  213. };
  214. static const AVFilterPad buffersink_inputs[] = {
  215. {
  216. .name = "default",
  217. .type = AVMEDIA_TYPE_VIDEO,
  218. .end_frame = end_frame,
  219. .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
  220. },
  221. { NULL },
  222. };
  223. AVFilter avfilter_vsink_buffersink = {
  224. .name = "buffersink",
  225. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  226. .priv_size = sizeof(BufferSinkContext),
  227. .init_opaque = vsink_init,
  228. .uninit = vsink_uninit,
  229. .query_formats = vsink_query_formats,
  230. .inputs = buffersink_inputs,
  231. .outputs = NULL,
  232. };
  233. static int filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
  234. {
  235. end_frame(link);
  236. return 0;
  237. }
  238. static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
  239. {
  240. BufferSinkContext *buf = ctx->priv;
  241. AVABufferSinkParams *params = opaque;
  242. if (params && params->sample_fmts) {
  243. buf->sample_fmts = ff_copy_int_list (params->sample_fmts);
  244. if (!buf->sample_fmts)
  245. goto fail_enomem;
  246. }
  247. if (params && params->channel_layouts) {
  248. buf->channel_layouts = ff_copy_int64_list(params->channel_layouts);
  249. if (!buf->channel_layouts)
  250. goto fail_enomem;
  251. }
  252. if (!common_init(ctx))
  253. return 0;
  254. fail_enomem:
  255. av_freep(&buf->sample_fmts);
  256. av_freep(&buf->channel_layouts);
  257. return AVERROR(ENOMEM);
  258. }
  259. static av_cold void asink_uninit(AVFilterContext *ctx)
  260. {
  261. BufferSinkContext *buf = ctx->priv;
  262. av_freep(&buf->sample_fmts);
  263. av_freep(&buf->channel_layouts);
  264. common_uninit(ctx);
  265. }
  266. static int asink_query_formats(AVFilterContext *ctx)
  267. {
  268. BufferSinkContext *buf = ctx->priv;
  269. AVFilterFormats *formats = NULL;
  270. AVFilterChannelLayouts *layouts = NULL;
  271. if (buf->sample_fmts) {
  272. if (!(formats = ff_make_format_list(buf->sample_fmts)))
  273. return AVERROR(ENOMEM);
  274. ff_set_common_formats(ctx, formats);
  275. }
  276. if (buf->channel_layouts) {
  277. if (!(layouts = avfilter_make_format64_list(buf->channel_layouts)))
  278. return AVERROR(ENOMEM);
  279. ff_set_common_channel_layouts(ctx, layouts);
  280. }
  281. return 0;
  282. }
  283. static const AVFilterPad ffabuffersink_inputs[] = {
  284. {
  285. .name = "default",
  286. .type = AVMEDIA_TYPE_AUDIO,
  287. .filter_frame = filter_frame,
  288. .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
  289. },
  290. { NULL },
  291. };
  292. AVFilter avfilter_asink_ffabuffersink = {
  293. .name = "ffabuffersink",
  294. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  295. .init_opaque = asink_init,
  296. .uninit = asink_uninit,
  297. .priv_size = sizeof(BufferSinkContext),
  298. .query_formats = asink_query_formats,
  299. .inputs = ffabuffersink_inputs,
  300. .outputs = NULL,
  301. };
  302. static const AVFilterPad abuffersink_inputs[] = {
  303. {
  304. .name = "default",
  305. .type = AVMEDIA_TYPE_AUDIO,
  306. .filter_frame = filter_frame,
  307. .min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
  308. },
  309. { NULL },
  310. };
  311. AVFilter avfilter_asink_abuffersink = {
  312. .name = "abuffersink",
  313. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  314. .init_opaque = asink_init,
  315. .uninit = asink_uninit,
  316. .priv_size = sizeof(BufferSinkContext),
  317. .query_formats = asink_query_formats,
  318. .inputs = abuffersink_inputs,
  319. .outputs = NULL,
  320. };
  321. /* Libav compatibility API */
  322. extern AVFilter avfilter_vsink_buffer;
  323. extern AVFilter avfilter_asink_abuffer;
  324. int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
  325. {
  326. AVFilterBufferRef *tbuf;
  327. int ret;
  328. if (ctx->filter-> inputs[0].start_frame ==
  329. avfilter_vsink_buffer. inputs[0].start_frame ||
  330. ctx->filter-> inputs[0].filter_frame ==
  331. avfilter_asink_abuffer.inputs[0].filter_frame)
  332. return ff_buffersink_read_compat(ctx, buf);
  333. av_assert0(ctx->filter-> inputs[0].end_frame ==
  334. avfilter_vsink_ffbuffersink. inputs[0].end_frame ||
  335. ctx->filter-> inputs[0].filter_frame ==
  336. avfilter_asink_ffabuffersink.inputs[0].filter_frame);
  337. ret = av_buffersink_get_buffer_ref(ctx, &tbuf,
  338. buf ? 0 : AV_BUFFERSINK_FLAG_PEEK);
  339. if (!buf)
  340. return ret >= 0;
  341. if (ret < 0)
  342. return ret;
  343. *buf = tbuf;
  344. return 0;
  345. }
  346. int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
  347. int nb_samples)
  348. {
  349. BufferSinkContext *sink = ctx->priv;
  350. int ret = 0, have_samples = 0, need_samples;
  351. AVFilterBufferRef *tbuf, *in_buf;
  352. AVFilterLink *link = ctx->inputs[0];
  353. int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  354. if (ctx->filter-> inputs[0].filter_frame ==
  355. avfilter_asink_abuffer.inputs[0].filter_frame)
  356. return ff_buffersink_read_samples_compat(ctx, buf, nb_samples);
  357. av_assert0(ctx->filter-> inputs[0].filter_frame ==
  358. avfilter_asink_ffabuffersink.inputs[0].filter_frame);
  359. tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples);
  360. if (!tbuf)
  361. return AVERROR(ENOMEM);
  362. while (have_samples < nb_samples) {
  363. ret = av_buffersink_get_buffer_ref(ctx, &in_buf,
  364. AV_BUFFERSINK_FLAG_PEEK);
  365. if (ret < 0) {
  366. if (ret == AVERROR_EOF && have_samples) {
  367. nb_samples = have_samples;
  368. ret = 0;
  369. }
  370. break;
  371. }
  372. need_samples = FFMIN(in_buf->audio->nb_samples,
  373. nb_samples - have_samples);
  374. av_samples_copy(tbuf->extended_data, in_buf->extended_data,
  375. have_samples, 0, need_samples,
  376. nb_channels, in_buf->format);
  377. have_samples += need_samples;
  378. if (need_samples < in_buf->audio->nb_samples) {
  379. in_buf->audio->nb_samples -= need_samples;
  380. av_samples_copy(in_buf->extended_data, in_buf->extended_data,
  381. 0, need_samples, in_buf->audio->nb_samples,
  382. nb_channels, in_buf->format);
  383. } else {
  384. av_buffersink_get_buffer_ref(ctx, &in_buf, 0);
  385. avfilter_unref_buffer(in_buf);
  386. }
  387. }
  388. tbuf->audio->nb_samples = have_samples;
  389. if (ret < 0) {
  390. av_assert0(!av_fifo_size(sink->fifo));
  391. if (have_samples)
  392. add_buffer_ref(ctx, tbuf);
  393. else
  394. avfilter_unref_buffer(tbuf);
  395. return ret;
  396. }
  397. *buf = tbuf;
  398. return 0;
  399. }