You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

558 lines
17KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * buffer sink
  23. */
  24. #include "libavutil/audio_fifo.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/mathematics.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "buffersink.h"
  32. #include "internal.h"
  33. typedef struct {
  34. AVFifoBuffer *fifo; ///< FIFO buffer of video frame references
  35. unsigned warning_limit;
  36. /* only used for video */
  37. enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1
  38. /* only used for audio */
  39. enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
  40. int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1
  41. int all_channel_counts;
  42. int *sample_rates; ///< list of accepted sample rates, terminated by -1
  43. /* only used for compat API */
  44. AVAudioFifo *audio_fifo; ///< FIFO for audio samples
  45. int64_t next_pts; ///< interpolating audio pts
  46. } BufferSinkContext;
  47. static av_cold void uninit(AVFilterContext *ctx)
  48. {
  49. BufferSinkContext *sink = ctx->priv;
  50. AVFrame *frame;
  51. if (sink->audio_fifo)
  52. av_audio_fifo_free(sink->audio_fifo);
  53. if (sink->fifo) {
  54. while (av_fifo_size(sink->fifo) >= sizeof(AVFilterBufferRef *)) {
  55. av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL);
  56. av_frame_unref(frame);
  57. }
  58. av_fifo_free(sink->fifo);
  59. sink->fifo = NULL;
  60. }
  61. av_freep(&sink->pixel_fmts);
  62. av_freep(&sink->sample_fmts);
  63. av_freep(&sink->sample_rates);
  64. av_freep(&sink->channel_layouts);
  65. }
  66. static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
  67. {
  68. BufferSinkContext *buf = ctx->priv;
  69. if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
  70. /* realloc fifo size */
  71. if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
  72. av_log(ctx, AV_LOG_ERROR,
  73. "Cannot buffer more frames. Consume some available frames "
  74. "before adding new ones.\n");
  75. return AVERROR(ENOMEM);
  76. }
  77. }
  78. /* cache frame */
  79. av_fifo_generic_write(buf->fifo, &ref, sizeof(AVFilterBufferRef *), NULL);
  80. return 0;
  81. }
  82. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  83. {
  84. AVFilterContext *ctx = link->dst;
  85. BufferSinkContext *buf = link->dst->priv;
  86. int ret;
  87. if ((ret = add_buffer_ref(ctx, frame)) < 0)
  88. return ret;
  89. if (buf->warning_limit &&
  90. av_fifo_size(buf->fifo) / sizeof(AVFilterBufferRef *) >= buf->warning_limit) {
  91. av_log(ctx, AV_LOG_WARNING,
  92. "%d buffers queued in %s, something may be wrong.\n",
  93. buf->warning_limit,
  94. (char *)av_x_if_null(ctx->name, ctx->filter->name));
  95. buf->warning_limit *= 10;
  96. }
  97. return 0;
  98. }
  99. int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
  100. {
  101. return av_buffersink_get_frame_flags(ctx, frame, 0);
  102. }
  103. int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
  104. {
  105. BufferSinkContext *buf = ctx->priv;
  106. AVFilterLink *inlink = ctx->inputs[0];
  107. int ret;
  108. AVFrame *cur_frame;
  109. /* no picref available, fetch it from the filterchain */
  110. if (!av_fifo_size(buf->fifo)) {
  111. if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
  112. return AVERROR(EAGAIN);
  113. if ((ret = ff_request_frame(inlink)) < 0)
  114. return ret;
  115. }
  116. if (!av_fifo_size(buf->fifo))
  117. return AVERROR(EINVAL);
  118. if (flags & AV_BUFFERSINK_FLAG_PEEK) {
  119. cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
  120. av_frame_ref(frame, cur_frame); /* TODO check failure */
  121. } else {
  122. av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
  123. av_frame_move_ref(frame, cur_frame);
  124. av_frame_free(&cur_frame);
  125. }
  126. return 0;
  127. }
  128. static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
  129. int nb_samples)
  130. {
  131. BufferSinkContext *s = ctx->priv;
  132. AVFilterLink *link = ctx->inputs[0];
  133. AVFrame *tmp;
  134. if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
  135. return AVERROR(ENOMEM);
  136. av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
  137. tmp->pts = s->next_pts;
  138. s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
  139. link->time_base);
  140. av_frame_move_ref(frame, tmp);
  141. av_frame_free(&tmp);
  142. return 0;
  143. }
  144. int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
  145. {
  146. BufferSinkContext *s = ctx->priv;
  147. AVFilterLink *link = ctx->inputs[0];
  148. AVFrame *cur_frame;
  149. int ret = 0;
  150. if (!s->audio_fifo) {
  151. int nb_channels = link->channels;
  152. if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
  153. return AVERROR(ENOMEM);
  154. }
  155. while (ret >= 0) {
  156. if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
  157. return read_from_fifo(ctx, frame, nb_samples);
  158. if (!(cur_frame = av_frame_alloc()))
  159. return AVERROR(ENOMEM);
  160. ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0);
  161. if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) {
  162. av_frame_free(&cur_frame);
  163. return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
  164. } else if (ret < 0) {
  165. av_frame_free(&cur_frame);
  166. return ret;
  167. }
  168. if (cur_frame->pts != AV_NOPTS_VALUE) {
  169. s->next_pts = cur_frame->pts -
  170. av_rescale_q(av_audio_fifo_size(s->audio_fifo),
  171. (AVRational){ 1, link->sample_rate },
  172. link->time_base);
  173. }
  174. ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data,
  175. cur_frame->nb_samples);
  176. av_frame_free(&cur_frame);
  177. }
  178. return ret;
  179. }
  180. AVBufferSinkParams *av_buffersink_params_alloc(void)
  181. {
  182. static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
  183. AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
  184. if (!params)
  185. return NULL;
  186. params->pixel_fmts = pixel_fmts;
  187. return params;
  188. }
  189. AVABufferSinkParams *av_abuffersink_params_alloc(void)
  190. {
  191. AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams));
  192. if (!params)
  193. return NULL;
  194. return params;
  195. }
  196. #define FIFO_INIT_SIZE 8
  197. static av_cold int common_init(AVFilterContext *ctx)
  198. {
  199. BufferSinkContext *buf = ctx->priv;
  200. buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
  201. if (!buf->fifo) {
  202. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
  203. return AVERROR(ENOMEM);
  204. }
  205. buf->warning_limit = 100;
  206. return 0;
  207. }
  208. void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
  209. {
  210. AVFilterLink *inlink = ctx->inputs[0];
  211. inlink->min_samples = inlink->max_samples =
  212. inlink->partial_buf_size = frame_size;
  213. }
  214. #if FF_API_AVFILTERBUFFER
  215. static void compat_free_buffer(AVFilterBuffer *buf)
  216. {
  217. AVFrame *frame = buf->priv;
  218. av_frame_free(&frame);
  219. av_free(buf);
  220. }
  221. static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags)
  222. {
  223. AVFilterBufferRef *buf;
  224. AVFrame *frame;
  225. int ret;
  226. if (!pbuf)
  227. return ff_poll_frame(ctx->inputs[0]);
  228. frame = av_frame_alloc();
  229. if (!frame)
  230. return AVERROR(ENOMEM);
  231. if (!nb_samples)
  232. ret = av_buffersink_get_frame_flags(ctx, frame, flags);
  233. else
  234. ret = av_buffersink_get_samples(ctx, frame, nb_samples);
  235. if (ret < 0)
  236. goto fail;
  237. if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
  238. buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
  239. AV_PERM_READ,
  240. frame->width, frame->height,
  241. frame->format);
  242. } else {
  243. buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
  244. frame->linesize[0], AV_PERM_READ,
  245. frame->nb_samples,
  246. frame->format,
  247. frame->channel_layout);
  248. }
  249. if (!buf) {
  250. ret = AVERROR(ENOMEM);
  251. goto fail;
  252. }
  253. avfilter_copy_frame_props(buf, frame);
  254. buf->buf->priv = frame;
  255. buf->buf->free = compat_free_buffer;
  256. *pbuf = buf;
  257. return 0;
  258. fail:
  259. av_frame_free(&frame);
  260. return ret;
  261. }
  262. int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
  263. {
  264. return compat_read(ctx, buf, 0, 0);
  265. }
  266. int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
  267. int nb_samples)
  268. {
  269. return compat_read(ctx, buf, nb_samples, 0);
  270. }
  271. int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
  272. AVFilterBufferRef **bufref, int flags)
  273. {
  274. *bufref = NULL;
  275. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  276. || !strcmp(ctx->filter->name, "abuffersink")
  277. || !strcmp(ctx->filter->name, "ffbuffersink")
  278. || !strcmp(ctx->filter->name, "ffabuffersink"));
  279. return compat_read(ctx, bufref, 0, flags);
  280. }
  281. #endif
  282. AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
  283. {
  284. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  285. || !strcmp(ctx->filter->name, "ffbuffersink"));
  286. return ctx->inputs[0]->frame_rate;
  287. }
  288. int av_buffersink_poll_frame(AVFilterContext *ctx)
  289. {
  290. BufferSinkContext *buf = ctx->priv;
  291. AVFilterLink *inlink = ctx->inputs[0];
  292. av_assert0( !strcmp(ctx->filter->name, "buffersink")
  293. || !strcmp(ctx->filter->name, "abuffersink")
  294. || !strcmp(ctx->filter->name, "ffbuffersink")
  295. || !strcmp(ctx->filter->name, "ffabuffersink"));
  296. return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink);
  297. }
  298. static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
  299. {
  300. BufferSinkContext *buf = ctx->priv;
  301. AVBufferSinkParams *params = opaque;
  302. if (params && params->pixel_fmts) {
  303. const int *pixel_fmts = params->pixel_fmts;
  304. buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
  305. if (!buf->pixel_fmts)
  306. return AVERROR(ENOMEM);
  307. }
  308. return common_init(ctx);
  309. }
  310. static int vsink_query_formats(AVFilterContext *ctx)
  311. {
  312. BufferSinkContext *buf = ctx->priv;
  313. if (buf->pixel_fmts)
  314. ff_set_common_formats(ctx, ff_make_format_list(buf->pixel_fmts));
  315. else
  316. ff_default_query_formats(ctx);
  317. return 0;
  318. }
  319. static int64_t *concat_channels_lists(const int64_t *layouts, const int *counts)
  320. {
  321. int nb_layouts = 0, nb_counts = 0, i;
  322. int64_t *list;
  323. if (layouts)
  324. for (; layouts[nb_layouts] != -1; nb_layouts++);
  325. if (counts)
  326. for (; counts[nb_counts] != -1; nb_counts++);
  327. if (nb_counts > INT_MAX - 1 - nb_layouts)
  328. return NULL;
  329. if (!(list = av_calloc(nb_layouts + nb_counts + 1, sizeof(*list))))
  330. return NULL;
  331. for (i = 0; i < nb_layouts; i++)
  332. list[i] = layouts[i];
  333. for (i = 0; i < nb_counts; i++)
  334. list[nb_layouts + i] = FF_COUNT2LAYOUT(counts[i]);
  335. list[nb_layouts + nb_counts] = -1;
  336. return list;
  337. }
  338. static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
  339. {
  340. BufferSinkContext *buf = ctx->priv;
  341. AVABufferSinkParams *params = opaque;
  342. if (params && params->sample_fmts) {
  343. buf->sample_fmts = ff_copy_int_list(params->sample_fmts);
  344. if (!buf->sample_fmts)
  345. return AVERROR(ENOMEM);
  346. }
  347. if (params && params->sample_rates) {
  348. buf->sample_rates = ff_copy_int_list(params->sample_rates);
  349. if (!buf->sample_rates)
  350. return AVERROR(ENOMEM);
  351. }
  352. if (params && (params->channel_layouts || params->channel_counts)) {
  353. if (params->all_channel_counts) {
  354. av_log(ctx, AV_LOG_ERROR,
  355. "Conflicting all_channel_counts and list in parameters\n");
  356. return AVERROR(EINVAL);
  357. }
  358. buf->channel_layouts = concat_channels_lists(params->channel_layouts,
  359. params->channel_counts);
  360. if (!buf->channel_layouts)
  361. return AVERROR(ENOMEM);
  362. }
  363. if (params)
  364. buf->all_channel_counts = params->all_channel_counts;
  365. return common_init(ctx);
  366. }
  367. static int asink_query_formats(AVFilterContext *ctx)
  368. {
  369. BufferSinkContext *buf = ctx->priv;
  370. AVFilterFormats *formats = NULL;
  371. AVFilterChannelLayouts *layouts = NULL;
  372. if (buf->sample_fmts) {
  373. if (!(formats = ff_make_format_list(buf->sample_fmts)))
  374. return AVERROR(ENOMEM);
  375. ff_set_common_formats(ctx, formats);
  376. }
  377. if (buf->channel_layouts || buf->all_channel_counts) {
  378. layouts = buf->all_channel_counts ? ff_all_channel_counts() :
  379. avfilter_make_format64_list(buf->channel_layouts);
  380. if (!layouts)
  381. return AVERROR(ENOMEM);
  382. ff_set_common_channel_layouts(ctx, layouts);
  383. }
  384. if (buf->sample_rates) {
  385. formats = ff_make_format_list(buf->sample_rates);
  386. if (!formats)
  387. return AVERROR(ENOMEM);
  388. ff_set_common_samplerates(ctx, formats);
  389. }
  390. return 0;
  391. }
  392. #if FF_API_AVFILTERBUFFER
  393. static const AVFilterPad ffbuffersink_inputs[] = {
  394. {
  395. .name = "default",
  396. .type = AVMEDIA_TYPE_VIDEO,
  397. .filter_frame = filter_frame,
  398. },
  399. { NULL },
  400. };
  401. AVFilter avfilter_vsink_ffbuffersink = {
  402. .name = "ffbuffersink",
  403. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  404. .priv_size = sizeof(BufferSinkContext),
  405. .init_opaque = vsink_init,
  406. .uninit = uninit,
  407. .query_formats = vsink_query_formats,
  408. .inputs = ffbuffersink_inputs,
  409. .outputs = NULL,
  410. };
  411. static const AVFilterPad ffabuffersink_inputs[] = {
  412. {
  413. .name = "default",
  414. .type = AVMEDIA_TYPE_AUDIO,
  415. .filter_frame = filter_frame,
  416. },
  417. { NULL },
  418. };
  419. AVFilter avfilter_asink_ffabuffersink = {
  420. .name = "ffabuffersink",
  421. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  422. .init_opaque = asink_init,
  423. .uninit = uninit,
  424. .priv_size = sizeof(BufferSinkContext),
  425. .query_formats = asink_query_formats,
  426. .inputs = ffabuffersink_inputs,
  427. .outputs = NULL,
  428. };
  429. #endif /* FF_API_AVFILTERBUFFER */
  430. static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
  431. {
  432. .name = "default",
  433. .type = AVMEDIA_TYPE_VIDEO,
  434. .filter_frame = filter_frame,
  435. },
  436. { NULL }
  437. };
  438. AVFilter avfilter_vsink_buffer = {
  439. .name = "buffersink",
  440. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
  441. .priv_size = sizeof(BufferSinkContext),
  442. .init_opaque = vsink_init,
  443. .uninit = uninit,
  444. .query_formats = vsink_query_formats,
  445. .inputs = avfilter_vsink_buffer_inputs,
  446. .outputs = NULL,
  447. };
  448. static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
  449. {
  450. .name = "default",
  451. .type = AVMEDIA_TYPE_AUDIO,
  452. .filter_frame = filter_frame,
  453. },
  454. { NULL }
  455. };
  456. AVFilter avfilter_asink_abuffer = {
  457. .name = "abuffersink",
  458. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
  459. .priv_size = sizeof(BufferSinkContext),
  460. .init_opaque = asink_init,
  461. .uninit = uninit,
  462. .query_formats = asink_query_formats,
  463. .inputs = avfilter_asink_abuffer_inputs,
  464. .outputs = NULL,
  465. };