You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

559 lines
17KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * buffer sink
  23. */
  24. #include "libavutil/audio_fifo.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/mathematics.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "buffersink.h"
  32. #include "internal.h"
  33. typedef struct {
  34. AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
  35. unsigned warning_limit;
  36. /* only used for video */
  37. enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
  38. /* only used for audio */
  39. enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
  40. int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
  41. int all_channel_counts;
  42. int *sample_rates; ///< list of accepted sample rates, terminated by -1
  43. /* only used for compat API */
  44. AVAudioFifo *audio_fifo; ///< FIFO for audio samples
  45. int64_t next_pts; ///< interpolating audio pts
  46. } BufferSinkContext;
  47. static av_cold void uninit(AVFilterContext *ctx)
  48. {
  49. BufferSinkContext *sink = ctx->priv;
  50. AVFrame *frame;
  51. if (sink->audio_fifo)
  52. av_audio_fifo_free(sink->audio_fifo);
  53. if (sink->fifo) {
  54. while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) {
  55. av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL);
  56. av_frame_free(&frame);
  57. }
  58. av_fifo_free(sink->fifo);
  59. sink->fifo = NULL;
  60. }
  61. av_freep(&sink->pixel_fmts);
  62. av_freep(&sink->sample_fmts);
  63. av_freep(&sink->sample_rates);
  64. av_freep(&sink->channel_layouts);
  65. }
  66. static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
  67. {
  68. BufferSinkContext *buf = ctx->priv;
  69. if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
  70. /* realloc fifo size */
  71. if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
  72. av_log(ctx, AV_LOG_ERROR,
  73. "Cannot buffer more frames. Consume some available frames "
  74. "before adding new ones.\n");
  75. return AVERROR(ENOMEM);
  76. }
  77. }
  78. /* cache frame */
  79. av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL);
  80. return 0;
  81. }
  82. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  83. {
  84. AVFilterContext *ctx = link->dst;
  85. BufferSinkContext *buf = link->dst->priv;
  86. int ret;
  87. if ((ret = add_buffer_ref(ctx, frame)) < 0)
  88. return ret;
  89. if (buf->warning_limit &&
  90. av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
  91. av_log(ctx, AV_LOG_WARNING,
  92. "%d buffers queued in %s, something may be wrong.\n",
  93. buf->warning_limit,
  94. (char *)av_x_if_null(ctx->name, ctx->filter->name));
  95. buf->warning_limit *= 10;
  96. }
  97. return 0;
  98. }
  99. int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
  100. {
  101. return av_buffersink_get_frame_flags(ctx, frame, 0);
  102. }
  103. int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
  104. {
  105. BufferSinkContext *buf = ctx->priv;
  106. AVFilterLink *inlink = ctx->inputs[0];
  107. int ret;
  108. AVFrame *cur_frame;
  109. /* no picref available, fetch it from the filterchain */
  110. if (!av_fifo_size(buf->fifo)) {
  111. if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
  112. return AVERROR(EAGAIN);
  113. if ((ret = ff_request_frame(inlink)) < 0)
  114. return ret;
  115. }
  116. if (!av_fifo_size(buf->fifo))
  117. return AVERROR(EINVAL);
  118. if (flags & AV_BUFFERSINK_FLAG_PEEK) {
  119. cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
  120. if ((ret = av_frame_ref(frame, cur_frame)) < 0)
  121. return ret;
  122. } else {
  123. av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
  124. av_frame_move_ref(frame, cur_frame);
  125. av_frame_free(&cur_frame);
  126. }
  127. return 0;
  128. }
  129. static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
  130. int nb_samples)
  131. {
  132. BufferSinkContext *s = ctx->priv;
  133. AVFilterLink *link = ctx->inputs[0];
  134. AVFrame *tmp;
  135. if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
  136. return AVERROR(ENOMEM);
  137. av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
  138. tmp->pts = s->next_pts;
  139. s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
  140. link->time_base);
  141. av_frame_move_ref(frame, tmp);
  142. av_frame_free(&tmp);
  143. return 0;
  144. }
  145. int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
  146. {
  147. BufferSinkContext *s = ctx->priv;
  148. AVFilterLink *link = ctx->inputs[0];
  149. AVFrame *cur_frame;
  150. int ret = 0;
  151. if (!s->audio_fifo) {
  152. int nb_channels = link->channels;
  153. if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
  154. return AVERROR(ENOMEM);
  155. }
  156. while (ret >= 0) {
  157. if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
  158. return read_from_fifo(ctx, frame, nb_samples);
  159. if (!(cur_frame = av_frame_alloc()))
  160. return AVERROR(ENOMEM);
  161. ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0);
  162. if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) {
  163. av_frame_free(&cur_frame);
  164. return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
  165. } else if (ret < 0) {
  166. av_frame_free(&cur_frame);
  167. return ret;
  168. }
  169. if (cur_frame->pts != AV_NOPTS_VALUE) {
  170. s->next_pts = cur_frame->pts -
  171. av_rescale_q(av_audio_fifo_size(s->audio_fifo),
  172. (AVRational){ 1, link->sample_rate },
  173. link->time_base);
  174. }
  175. ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data,
  176. cur_frame->nb_samples);
  177. av_frame_free(&cur_frame);
  178. }
  179. return ret;
  180. }
  181. AVBufferSinkParams *av_buffersink_params_alloc(void)
  182. {
  183. static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
  184. AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
  185. if (!params)
  186. return NULL;
  187. params->pixel_fmts = pixel_fmts;
  188. return params;
  189. }
  190. AVABufferSinkParams *av_abuffersink_params_alloc(void)
  191. {
  192. AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams));
  193. if (!params)
  194. return NULL;
  195. return params;
  196. }
  197. #define FIFO_INIT_SIZE 8
  198. static av_cold int common_init(AVFilterContext *ctx)
  199. {
  200. BufferSinkContext *buf = ctx->priv;
  201. buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
  202. if (!buf->fifo) {
  203. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
  204. return AVERROR(ENOMEM);
  205. }
  206. buf->warning_limit = 100;
  207. return 0;
  208. }
  209. void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
  210. {
  211. AVFilterLink *inlink = ctx->inputs[0];
  212. inlink->min_samples = inlink->max_samples =
  213. inlink->partial_buf_size = frame_size;
  214. }
  215. #if FF_API_AVFILTERBUFFER
  216. static void compat_free_buffer(AVFilterBuffer *buf)
  217. {
  218. AVFrame *frame = buf->priv;
  219. av_frame_free(&frame);
  220. av_free(buf);
  221. }
  222. static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags)
  223. {
  224. AVFilterBufferRef *buf;
  225. AVFrame *frame;
  226. int ret;
  227. if (!pbuf)
  228. return ff_poll_frame(ctx->inputs[0]);
  229. frame = av_frame_alloc();
  230. if (!frame)
  231. return AVERROR(ENOMEM);
  232. if (!nb_samples)
  233. ret = av_buffersink_get_frame_flags(ctx, frame, flags);
  234. else
  235. ret = av_buffersink_get_samples(ctx, frame, nb_samples);
  236. if (ret < 0)
  237. goto fail;
  238. if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
  239. buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
  240. AV_PERM_READ,
  241. frame->width, frame->height,
  242. frame->format);
  243. } else {
  244. buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
  245. frame->linesize[0], AV_PERM_READ,
  246. frame->nb_samples,
  247. frame->format,
  248. frame->channel_layout);
  249. }
  250. if (!buf) {
  251. ret = AVERROR(ENOMEM);
  252. goto fail;
  253. }
  254. avfilter_copy_frame_props(buf, frame);
  255. buf->buf->priv = frame;
  256. buf->buf->free = compat_free_buffer;
  257. *pbuf = buf;
  258. return 0;
  259. fail:
  260. av_frame_free(&frame);
  261. return ret;
  262. }
  263. int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
  264. {
  265. return compat_read(ctx, buf, 0, 0);
  266. }
  267. int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
  268. int nb_samples)
  269. {
  270. return compat_read(ctx, buf, nb_samples, 0);
  271. }
  272. int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
  273. AVFilterBufferRef **bufref, int flags)
  274. {
  275. *bufref = NULL;
  276. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  277. || !strcmp(ctx->filter->name, "abuffersink")
  278. || !strcmp(ctx->filter->name, "ffbuffersink")
  279. || !strcmp(ctx->filter->name, "ffabuffersink"));
  280. return compat_read(ctx, bufref, 0, flags);
  281. }
  282. #endif
  283. AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
  284. {
  285. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  286. || !strcmp(ctx->filter->name, "ffbuffersink"));
  287. return ctx->inputs[0]->frame_rate;
  288. }
  289. int attribute_align_arg av_buffersink_poll_frame(AVFilterContext *ctx)
  290. {
  291. BufferSinkContext *buf = ctx->priv;
  292. AVFilterLink *inlink = ctx->inputs[0];
  293. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  294. || !strcmp(ctx->filter->name, "abuffersink")
  295. || !strcmp(ctx->filter->name, "ffbuffersink")
  296. || !strcmp(ctx->filter->name, "ffabuffersink"));
  297. return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
  298. }
  299. static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
  300. {
  301. BufferSinkContext *buf = ctx->priv;
  302. AVBufferSinkParams *params = opaque;
  303. if (params && params->pixel_fmts) {
  304. const int *pixel_fmts = params->pixel_fmts;
  305. buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
  306. if (!buf->pixel_fmts)
  307. return AVERROR(ENOMEM);
  308. }
  309. return common_init(ctx);
  310. }
  311. static int vsink_query_formats(AVFilterContext *ctx)
  312. {
  313. BufferSinkContext *buf = ctx->priv;
  314. if (buf->pixel_fmts)
  315. ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts));
  316. else
  317. ff_default_query_formats(ctx);
  318. return 0;
  319. }
  320. static int64_t *concat_channels_lists(const int64_t *layouts, const int *counts)
  321. {
  322. int nb_layouts = 0, nb_counts = 0, i;
  323. int64_t *list;
  324. if (layouts)
  325. for (; layouts[nb_layouts] != -1; nb_layouts++);
  326. if (counts)
  327. for (; counts[nb_counts] != -1; nb_counts++);
  328. if (nb_counts > INT_MAX - 1 - nb_layouts)
  329. return NULL;
  330. if (!(list = av_calloc(nb_layouts + nb_counts + 1, sizeof(*list))))
  331. return NULL;
  332. for (i = 0; i < nb_layouts; i++)
  333. list[i] = layouts[i];
  334. for (i = 0; i < nb_counts; i++)
  335. list[nb_layouts + i] = FF_COUNT2LAYOUT(counts[i]);
  336. list[nb_layouts + nb_counts] = -1;
  337. return list;
  338. }
  339. static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
  340. {
  341. BufferSinkContext *buf = ctx->priv;
  342. AVABufferSinkParams *params = opaque;
  343. if (params && params->sample_fmts) {
  344. buf->sample_fmts = ff_copy_int_list(params->sample_fmts);
  345. if (!buf->sample_fmts)
  346. return AVERROR(ENOMEM);
  347. }
  348. if (params && params->sample_rates) {
  349. buf->sample_rates = ff_copy_int_list(params->sample_rates);
  350. if (!buf->sample_rates)
  351. return AVERROR(ENOMEM);
  352. }
  353. if (params && (params->channel_layouts || params->channel_counts)) {
  354. if (params->all_channel_counts) {
  355. av_log(ctx, AV_LOG_ERROR,
  356. "Conflicting all_channel_counts and list in parameters\n");
  357. return AVERROR(EINVAL);
  358. }
  359. buf->channel_layouts = concat_channels_lists(params->channel_layouts,
  360. params->channel_counts);
  361. if (!buf->channel_layouts)
  362. return AVERROR(ENOMEM);
  363. }
  364. if (params)
  365. buf->all_channel_counts = params->all_channel_counts;
  366. return common_init(ctx);
  367. }
  368. static int asink_query_formats(AVFilterContext *ctx)
  369. {
  370. BufferSinkContext *buf = ctx->priv;
  371. AVFilterFormats *formats = NULL;
  372. AVFilterChannelLayouts *layouts = NULL;
  373. if (buf->sample_fmts) {
  374. if (!(formats = ff_make_format_list(buf->sample_fmts)))
  375. return AVERROR(ENOMEM);
  376. ff_set_common_formats(ctx, formats);
  377. }
  378. if (buf->channel_layouts || buf->all_channel_counts) {
  379. layouts = buf->all_channel_counts ? ff_all_channel_counts() :
  380. avfilter_make_format64_list(buf->channel_layouts);
  381. if (!layouts)
  382. return AVERROR(ENOMEM);
  383. ff_set_common_channel_layouts(ctx, layouts);
  384. }
  385. if (buf->sample_rates) {
  386. formats = ff_make_format_list(buf->sample_rates);
  387. if (!formats)
  388. return AVERROR(ENOMEM);
  389. ff_set_common_samplerates(ctx, formats);
  390. }
  391. return 0;
  392. }
  393. #if FF_API_AVFILTERBUFFER
  394. static const AVFilterPad ffbuffersink_inputs[] = {
  395. {
  396. .name = "default",
  397. .type = AVMEDIA_TYPE_VIDEO,
  398. .filter_frame = filter_frame,
  399. },
  400. { NULL },
  401. };
  402. AVFilter avfilter_vsink_ffbuffersink = {
  403. .name = "ffbuffersink",
  404. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  405. .priv_size = sizeof(BufferSinkContext),
  406. .init_opaque = vsink_init,
  407. .uninit = uninit,
  408. .query_formats = vsink_query_formats,
  409. .inputs = ffbuffersink_inputs,
  410. .outputs = NULL,
  411. };
  412. static const AVFilterPad ffabuffersink_inputs[] = {
  413. {
  414. .name = "default",
  415. .type = AVMEDIA_TYPE_AUDIO,
  416. .filter_frame = filter_frame,
  417. },
  418. { NULL },
  419. };
  420. AVFilter avfilter_asink_ffabuffersink = {
  421. .name = "ffabuffersink",
  422. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  423. .init_opaque = asink_init,
  424. .uninit = uninit,
  425. .priv_size = sizeof(BufferSinkContext),
  426. .query_formats = asink_query_formats,
  427. .inputs = ffabuffersink_inputs,
  428. .outputs = NULL,
  429. };
  430. #endif /* FF_API_AVFILTERBUFFER */
  431. static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
  432. {
  433. .name = "default",
  434. .type = AVMEDIA_TYPE_VIDEO,
  435. .filter_frame = filter_frame,
  436. },
  437. { NULL }
  438. };
  439. AVFilter avfilter_vsink_buffer = {
  440. .name = "buffersink",
  441. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  442. .priv_size = sizeof(BufferSinkContext),
  443. .init_opaque = vsink_init,
  444. .uninit = uninit,
  445. .query_formats = vsink_query_formats,
  446. .inputs = avfilter_vsink_buffer_inputs,
  447. .outputs = NULL,
  448. };
  449. static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
  450. {
  451. .name = "default",
  452. .type = AVMEDIA_TYPE_AUDIO,
  453. .filter_frame = filter_frame,
  454. },
  455. { NULL }
  456. };
  457. AVFilter avfilter_asink_abuffer = {
  458. .name = "abuffersink",
  459. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  460. .priv_size = sizeof(BufferSinkContext),
  461. .init_opaque = asink_init,
  462. .uninit = uninit,
  463. .query_formats = asink_query_formats,
  464. .inputs = avfilter_asink_abuffer_inputs,
  465. .outputs = NULL,
  466. };