You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

669 lines
22KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "buffersrc.h"
  30. #include "vsrc_buffer.h"
  31. #include "asrc_abuffer.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/fifo.h"
  35. #include "libavutil/imgutils.h"
  36. typedef struct {
  37. AVFifoBuffer *fifo;
  38. AVRational time_base; ///< time_base to set in the output link
  39. int eof;
  40. unsigned nb_failed_requests;
  41. /* Video only */
  42. AVFilterContext *scale;
  43. int h, w;
  44. enum PixelFormat pix_fmt;
  45. AVRational sample_aspect_ratio;
  46. char sws_param[256];
  47. /* Audio only */
  48. // Audio format of incoming buffers
  49. int sample_rate;
  50. unsigned int sample_format;
  51. int64_t channel_layout;
  52. int packing_format;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. static int insert_filter(BufferSourceContext *abuffer,
  64. AVFilterLink *link, AVFilterContext **filt_ctx,
  65. const char *filt_name);
  66. static void remove_filter(AVFilterContext **filt_ctx);
  67. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx);
  68. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  69. {
  70. char old_layout_str[16], new_layout_str[16];
  71. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  72. -1, link->channel_layout);
  73. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  74. -1, ref->audio->channel_layout);
  75. av_log(ctx, AV_LOG_INFO,
  76. "Audio input format changed: "
  77. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  78. av_get_sample_fmt_name(link->format),
  79. old_layout_str, (int)link->sample_rate,
  80. av_get_sample_fmt_name(ref->format),
  81. new_layout_str, ref->audio->sample_rate);
  82. }
  83. static int check_format_change_video(AVFilterContext *buffer_filter,
  84. AVFilterBufferRef *picref)
  85. {
  86. BufferSourceContext *c = buffer_filter->priv;
  87. int ret;
  88. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  89. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  90. AVFilterLink *link;
  91. char scale_param[1024];
  92. av_log(buffer_filter, AV_LOG_INFO,
  93. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  94. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  95. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  96. if (!scale || strcmp(scale->filter->name, "scale")) {
  97. AVFilter *f = avfilter_get_by_name("scale");
  98. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  99. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  100. return ret;
  101. c->scale = scale;
  102. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  103. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  104. return ret;
  105. }
  106. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  107. return ret;
  108. }
  109. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  110. scale->outputs[0]->format= c->pix_fmt;
  111. } else if (!strcmp(scale->filter->name, "scale")) {
  112. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  113. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  114. scale->filter->init(scale, scale_param, NULL);
  115. }
  116. c->pix_fmt = scale->inputs[0]->format = picref->format;
  117. c->w = scale->inputs[0]->w = picref->video->w;
  118. c->h = scale->inputs[0]->h = picref->video->h;
  119. link = scale->outputs[0];
  120. if ((ret = link->srcpad->config_props(link)) < 0)
  121. return ret;
  122. }
  123. return 0;
  124. }
  125. static int check_format_change_audio(AVFilterContext *ctx,
  126. AVFilterBufferRef *samplesref)
  127. {
  128. BufferSourceContext *abuffer = ctx->priv;
  129. AVFilterLink *link;
  130. int ret, logged = 0;
  131. link = ctx->outputs[0];
  132. if (samplesref->audio->sample_rate != link->sample_rate) {
  133. log_input_change(ctx, link, samplesref);
  134. logged = 1;
  135. abuffer->sample_rate = samplesref->audio->sample_rate;
  136. if (!abuffer->aresample) {
  137. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  138. if (ret < 0) return ret;
  139. } else {
  140. link = abuffer->aresample->outputs[0];
  141. if (samplesref->audio->sample_rate == link->sample_rate)
  142. remove_filter(&abuffer->aresample);
  143. else
  144. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  145. return ret;
  146. }
  147. }
  148. link = ctx->outputs[0];
  149. if (samplesref->format != link->format ||
  150. samplesref->audio->channel_layout != link->channel_layout ||
  151. samplesref->audio->planar != link->planar) {
  152. if (!logged) log_input_change(ctx, link, samplesref);
  153. abuffer->sample_format = samplesref->format;
  154. abuffer->channel_layout = samplesref->audio->channel_layout;
  155. abuffer->packing_format = samplesref->audio->planar;
  156. if (!abuffer->aconvert) {
  157. ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
  158. if (ret < 0) return ret;
  159. } else {
  160. link = abuffer->aconvert->outputs[0];
  161. if (samplesref->format == link->format &&
  162. samplesref->audio->channel_layout == link->channel_layout &&
  163. samplesref->audio->planar == link->planar
  164. )
  165. remove_filter(&abuffer->aconvert);
  166. else
  167. if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
  168. return ret;
  169. }
  170. }
  171. return 0;
  172. }
  173. static int check_format_change(AVFilterContext *buffer_filter,
  174. AVFilterBufferRef *picref)
  175. {
  176. switch (buffer_filter->outputs[0]->type) {
  177. case AVMEDIA_TYPE_VIDEO:
  178. return check_format_change_video(buffer_filter, picref);
  179. case AVMEDIA_TYPE_AUDIO:
  180. return check_format_change_audio(buffer_filter, picref);
  181. default:
  182. return AVERROR(ENOSYS);
  183. }
  184. }
  185. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  186. AVFilterBufferRef *ref)
  187. {
  188. AVFilterLink *outlink = ctx->outputs[0];
  189. AVFilterBufferRef *buf;
  190. int channels, data_size, i;
  191. switch (outlink->type) {
  192. case AVMEDIA_TYPE_VIDEO:
  193. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  194. ref->video->w, ref->video->h);
  195. av_image_copy(buf->data, buf->linesize,
  196. (void*)ref->data, ref->linesize,
  197. ref->format, ref->video->w, ref->video->h);
  198. break;
  199. case AVMEDIA_TYPE_AUDIO:
  200. buf = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE,
  201. ref->audio->nb_samples);
  202. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  203. data_size = av_samples_get_buffer_size(NULL, channels,
  204. ref->audio->nb_samples,
  205. ref->format, 1);
  206. for (i = 0; i < FF_ARRAY_ELEMS(ref->buf->data) && ref->buf->data[i]; i++)
  207. memcpy(buf->buf->data[i], ref->buf->data[i], data_size);
  208. break;
  209. default:
  210. return NULL;
  211. }
  212. avfilter_copy_buffer_ref_props(buf, ref);
  213. return buf;
  214. }
  215. int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
  216. AVFilterBufferRef *picref, int flags)
  217. {
  218. BufferSourceContext *c = buffer_filter->priv;
  219. AVFilterBufferRef *buf;
  220. int ret;
  221. if (!picref) {
  222. c->eof = 1;
  223. return 0;
  224. } else if (c->eof)
  225. return AVERROR(EINVAL);
  226. if (!av_fifo_space(c->fifo) &&
  227. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  228. sizeof(buf))) < 0)
  229. return ret;
  230. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  231. ret = check_format_change(buffer_filter, picref);
  232. if (ret < 0)
  233. return ret;
  234. }
  235. if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
  236. buf = picref;
  237. else
  238. buf = copy_buffer_ref(buffer_filter, picref);
  239. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  240. if (buf != picref)
  241. avfilter_unref_buffer(buf);
  242. return ret;
  243. }
  244. c->nb_failed_requests = 0;
  245. return 0;
  246. }
  247. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  248. AVFilterBufferRef *picref, int flags)
  249. {
  250. return av_buffersrc_add_ref(buffer_filter, picref, 0);
  251. }
  252. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  253. {
  254. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
  255. AV_BUFFERSRC_FLAG_NO_COPY);
  256. }
  257. #if CONFIG_AVCODEC
  258. #include "avcodec.h"
  259. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  260. const AVFrame *frame, int flags)
  261. {
  262. BufferSourceContext *c = buffer_src->priv;
  263. AVFilterBufferRef *picref;
  264. int ret;
  265. if (!frame) {
  266. c->eof = 1;
  267. return 0;
  268. } else if (c->eof)
  269. return AVERROR(EINVAL);
  270. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  271. if (!picref)
  272. return AVERROR(ENOMEM);
  273. ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src, picref, flags);
  274. picref->buf->data[0] = NULL;
  275. avfilter_unref_buffer(picref);
  276. return ret;
  277. }
  278. #endif
  279. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  280. {
  281. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  282. }
  283. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  284. {
  285. BufferSourceContext *c = ctx->priv;
  286. char pix_fmt_str[128];
  287. int ret, n = 0;
  288. *c->sws_param = 0;
  289. if (!args ||
  290. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  291. &c->time_base.num, &c->time_base.den,
  292. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  293. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  294. return AVERROR(EINVAL);
  295. }
  296. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  297. return ret;
  298. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  299. return AVERROR(ENOMEM);
  300. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  301. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  302. c->time_base.num, c->time_base.den,
  303. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  304. return 0;
  305. }
  306. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  307. {
  308. BufferSourceContext *abuffer = ctx->priv;
  309. char *arg = NULL, *ptr, chlayout_str[16];
  310. char *args = av_strdup(args0);
  311. int ret;
  312. arg = av_strtok(args, ":", &ptr);
  313. #define ADD_FORMAT(fmt_name) \
  314. if (!arg) \
  315. goto arg_fail; \
  316. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  317. av_freep(&args); \
  318. return ret; \
  319. } \
  320. if (*args) \
  321. arg = av_strtok(NULL, ":", &ptr)
  322. ADD_FORMAT(sample_rate);
  323. ADD_FORMAT(sample_format);
  324. ADD_FORMAT(channel_layout);
  325. ADD_FORMAT(packing_format);
  326. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  327. if (!abuffer->fifo) {
  328. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  329. return AVERROR(ENOMEM);
  330. }
  331. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  332. -1, abuffer->channel_layout);
  333. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  334. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  335. abuffer->sample_rate);
  336. av_freep(&args);
  337. return 0;
  338. arg_fail:
  339. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  340. "sample_rate:sample_fmt:channel_layout:packing\n");
  341. av_freep(&args);
  342. return AVERROR(EINVAL);
  343. }
  344. static av_cold void uninit(AVFilterContext *ctx)
  345. {
  346. BufferSourceContext *s = ctx->priv;
  347. while (s->fifo && av_fifo_size(s->fifo)) {
  348. AVFilterBufferRef *buf;
  349. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  350. avfilter_unref_buffer(buf);
  351. }
  352. av_fifo_free(s->fifo);
  353. s->fifo = NULL;
  354. avfilter_free(s->scale);
  355. s->scale = NULL;
  356. }
  357. static int query_formats_video(AVFilterContext *ctx)
  358. {
  359. BufferSourceContext *c = ctx->priv;
  360. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  361. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  362. return 0;
  363. }
  364. static int query_formats_audio(AVFilterContext *ctx)
  365. {
  366. BufferSourceContext *abuffer = ctx->priv;
  367. AVFilterFormats *formats;
  368. formats = NULL;
  369. avfilter_add_format(&formats, abuffer->sample_format);
  370. avfilter_set_common_sample_formats(ctx, formats);
  371. formats = NULL;
  372. avfilter_add_format(&formats, abuffer->channel_layout);
  373. avfilter_set_common_channel_layouts(ctx, formats);
  374. formats = NULL;
  375. avfilter_add_format(&formats, abuffer->packing_format);
  376. avfilter_set_common_packing_formats(ctx, formats);
  377. return 0;
  378. }
  379. static int config_output_video(AVFilterLink *link)
  380. {
  381. BufferSourceContext *c = link->src->priv;
  382. link->w = c->w;
  383. link->h = c->h;
  384. link->sample_aspect_ratio = c->sample_aspect_ratio;
  385. link->time_base = c->time_base;
  386. return 0;
  387. }
  388. static int config_output_audio(AVFilterLink *outlink)
  389. {
  390. BufferSourceContext *abuffer = outlink->src->priv;
  391. outlink->sample_rate = abuffer->sample_rate;
  392. return 0;
  393. }
  394. static int request_frame(AVFilterLink *link)
  395. {
  396. BufferSourceContext *c = link->src->priv;
  397. AVFilterBufferRef *buf;
  398. if (!av_fifo_size(c->fifo)) {
  399. if (c->eof)
  400. return AVERROR_EOF;
  401. c->nb_failed_requests++;
  402. return AVERROR(EAGAIN);
  403. }
  404. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  405. switch (link->type) {
  406. case AVMEDIA_TYPE_VIDEO:
  407. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  408. avfilter_draw_slice(link, 0, link->h, 1);
  409. avfilter_end_frame(link);
  410. avfilter_unref_buffer(buf);
  411. break;
  412. case AVMEDIA_TYPE_AUDIO:
  413. avfilter_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  414. avfilter_unref_buffer(buf);
  415. break;
  416. default:
  417. return AVERROR(ENOSYS);
  418. }
  419. return 0;
  420. }
  421. static int poll_frame(AVFilterLink *link)
  422. {
  423. BufferSourceContext *c = link->src->priv;
  424. int size = av_fifo_size(c->fifo);
  425. if (!size && c->eof)
  426. return AVERROR_EOF;
  427. return size/sizeof(AVFilterBufferRef*);
  428. }
  429. static void buf_free(AVFilterBuffer *ptr)
  430. {
  431. av_free(ptr);
  432. return;
  433. }
  434. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  435. {
  436. link->src = src;
  437. link->srcpad = &(src->output_pads[0]);
  438. src->outputs[0] = link;
  439. }
  440. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  441. {
  442. int ret;
  443. AVFilterLink * const inlink = filt_ctx->inputs[0];
  444. AVFilterLink * const outlink = filt_ctx->outputs[0];
  445. inlink->format = abuffer->sample_format;
  446. inlink->channel_layout = abuffer->channel_layout;
  447. inlink->planar = abuffer->packing_format;
  448. inlink->sample_rate = abuffer->sample_rate;
  449. filt_ctx->filter->uninit(filt_ctx);
  450. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  451. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  452. return ret;
  453. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  454. return ret;
  455. return outlink->srcpad->config_props(outlink);
  456. }
  457. static int insert_filter(BufferSourceContext *abuffer,
  458. AVFilterLink *link, AVFilterContext **filt_ctx,
  459. const char *filt_name)
  460. {
  461. int ret;
  462. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  463. return ret;
  464. link->src->outputs[0] = NULL;
  465. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  466. link->src->outputs[0] = link;
  467. return ret;
  468. }
  469. set_link_source(*filt_ctx, link);
  470. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  471. avfilter_free(*filt_ctx);
  472. return ret;
  473. }
  474. return 0;
  475. }
  476. static void remove_filter(AVFilterContext **filt_ctx)
  477. {
  478. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  479. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  480. (*filt_ctx)->outputs[0] = NULL;
  481. avfilter_free(*filt_ctx);
  482. *filt_ctx = NULL;
  483. set_link_source(src, outlink);
  484. }
  485. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  486. AVFilterBufferRef *samplesref,
  487. int av_unused flags)
  488. {
  489. return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
  490. }
  491. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  492. uint8_t *data[8], int linesize[8],
  493. int nb_samples, int sample_rate,
  494. int sample_fmt, int64_t channel_layout, int planar,
  495. int64_t pts, int av_unused flags)
  496. {
  497. AVFilterBufferRef *samplesref;
  498. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  499. data, linesize, AV_PERM_WRITE,
  500. nb_samples,
  501. sample_fmt, channel_layout, planar);
  502. if (!samplesref)
  503. return AVERROR(ENOMEM);
  504. samplesref->buf->free = buf_free;
  505. samplesref->pts = pts;
  506. samplesref->audio->sample_rate = sample_rate;
  507. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  508. }
  509. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  510. uint8_t *buf, int buf_size, int sample_rate,
  511. int sample_fmt, int64_t channel_layout, int planar,
  512. int64_t pts, int av_unused flags)
  513. {
  514. uint8_t *data[8] = {0};
  515. int linesize[8];
  516. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  517. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  518. av_samples_fill_arrays(data, linesize,
  519. buf, nb_channels, nb_samples,
  520. sample_fmt, 16);
  521. return av_asrc_buffer_add_samples(ctx,
  522. data, linesize, nb_samples,
  523. sample_rate,
  524. sample_fmt, channel_layout, planar,
  525. pts, flags);
  526. }
  527. AVFilter avfilter_vsrc_buffer = {
  528. .name = "buffer",
  529. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  530. .priv_size = sizeof(BufferSourceContext),
  531. .query_formats = query_formats_video,
  532. .init = init_video,
  533. .uninit = uninit,
  534. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  535. .outputs = (const AVFilterPad[]) {{ .name = "default",
  536. .type = AVMEDIA_TYPE_VIDEO,
  537. .request_frame = request_frame,
  538. .poll_frame = poll_frame,
  539. .config_props = config_output_video, },
  540. { .name = NULL}},
  541. };
  542. #ifdef CONFIG_ABUFFER_FILTER
  543. AVFilter avfilter_asrc_abuffer = {
  544. .name = "abuffer",
  545. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  546. .priv_size = sizeof(BufferSourceContext),
  547. .query_formats = query_formats_audio,
  548. .init = init_audio,
  549. .uninit = uninit,
  550. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  551. .outputs = (const AVFilterPad[]) {{ .name = "default",
  552. .type = AVMEDIA_TYPE_AUDIO,
  553. .request_frame = request_frame,
  554. .poll_frame = poll_frame,
  555. .config_props = config_output_audio, },
  556. { .name = NULL}},
  557. };
  558. #endif