You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

659 lines
21KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "audio.h"
  29. #include "avcodec.h"
  30. #include "buffersrc.h"
  31. #include "vsrc_buffer.h"
  32. #include "asrc_abuffer.h"
  33. #include "libavutil/audioconvert.h"
  34. #include "libavutil/avstring.h"
  35. #include "libavutil/fifo.h"
  36. #include "libavutil/imgutils.h"
  37. typedef struct {
  38. AVFifoBuffer *fifo;
  39. AVRational time_base; ///< time_base to set in the output link
  40. int eof;
  41. unsigned nb_failed_requests;
  42. /* Video only */
  43. AVFilterContext *scale;
  44. int h, w;
  45. enum PixelFormat pix_fmt;
  46. AVRational sample_aspect_ratio;
  47. char sws_param[256];
  48. /* Audio only */
  49. // Audio format of incoming buffers
  50. int sample_rate;
  51. unsigned int sample_format;
  52. int64_t channel_layout;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. static void buf_free(AVFilterBuffer *ptr)
  64. {
  65. av_free(ptr);
  66. return;
  67. }
  68. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  69. {
  70. link->src = src;
  71. link->srcpad = &(src->output_pads[0]);
  72. src->outputs[0] = link;
  73. }
  74. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  75. {
  76. int ret;
  77. AVFilterLink * const inlink = filt_ctx->inputs[0];
  78. AVFilterLink * const outlink = filt_ctx->outputs[0];
  79. inlink->format = abuffer->sample_format;
  80. inlink->channel_layout = abuffer->channel_layout;
  81. inlink->sample_rate = abuffer->sample_rate;
  82. filt_ctx->filter->uninit(filt_ctx);
  83. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  84. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  85. return ret;
  86. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  87. return ret;
  88. return outlink->srcpad->config_props(outlink);
  89. }
  90. static int insert_filter(BufferSourceContext *abuffer,
  91. AVFilterLink *link, AVFilterContext **filt_ctx,
  92. const char *filt_name)
  93. {
  94. int ret;
  95. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  96. return ret;
  97. link->src->outputs[0] = NULL;
  98. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  99. link->src->outputs[0] = link;
  100. return ret;
  101. }
  102. set_link_source(*filt_ctx, link);
  103. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  104. avfilter_free(*filt_ctx);
  105. return ret;
  106. }
  107. return 0;
  108. }
  109. static void remove_filter(AVFilterContext **filt_ctx)
  110. {
  111. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  112. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  113. (*filt_ctx)->outputs[0] = NULL;
  114. avfilter_free(*filt_ctx);
  115. *filt_ctx = NULL;
  116. set_link_source(src, outlink);
  117. }
  118. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  119. {
  120. char old_layout_str[16], new_layout_str[16];
  121. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  122. -1, link->channel_layout);
  123. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  124. -1, ref->audio->channel_layout);
  125. av_log(ctx, AV_LOG_INFO,
  126. "Audio input format changed: "
  127. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  128. av_get_sample_fmt_name(link->format),
  129. old_layout_str, (int)link->sample_rate,
  130. av_get_sample_fmt_name(ref->format),
  131. new_layout_str, ref->audio->sample_rate);
  132. }
  133. static int check_format_change_video(AVFilterContext *buffer_filter,
  134. AVFilterBufferRef *picref)
  135. {
  136. BufferSourceContext *c = buffer_filter->priv;
  137. int ret;
  138. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  139. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  140. AVFilterLink *link;
  141. char scale_param[1024];
  142. av_log(buffer_filter, AV_LOG_INFO,
  143. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  144. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  145. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  146. if (!scale || strcmp(scale->filter->name, "scale")) {
  147. AVFilter *f = avfilter_get_by_name("scale");
  148. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  149. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  150. return ret;
  151. c->scale = scale;
  152. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  153. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  154. return ret;
  155. }
  156. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  157. return ret;
  158. }
  159. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  160. scale->outputs[0]->format= c->pix_fmt;
  161. } else if (!strcmp(scale->filter->name, "scale")) {
  162. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  163. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  164. scale->filter->init(scale, scale_param, NULL);
  165. }
  166. c->pix_fmt = scale->inputs[0]->format = picref->format;
  167. c->w = scale->inputs[0]->w = picref->video->w;
  168. c->h = scale->inputs[0]->h = picref->video->h;
  169. link = scale->outputs[0];
  170. if ((ret = link->srcpad->config_props(link)) < 0)
  171. return ret;
  172. }
  173. return 0;
  174. }
  175. static int check_format_change_audio(AVFilterContext *ctx,
  176. AVFilterBufferRef *samplesref)
  177. {
  178. BufferSourceContext *abuffer = ctx->priv;
  179. AVFilterLink *link;
  180. int ret, logged = 0;
  181. link = ctx->outputs[0];
  182. if (samplesref->audio->sample_rate != link->sample_rate ||
  183. samplesref->format != link->format ||
  184. samplesref->audio->channel_layout != link->channel_layout) {
  185. log_input_change(ctx, link, samplesref);
  186. logged = 1;
  187. abuffer->sample_rate = samplesref->audio->sample_rate;
  188. abuffer->sample_format = samplesref->format;
  189. abuffer->channel_layout = samplesref->audio->channel_layout;
  190. if (!abuffer->aresample) {
  191. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  192. if (ret < 0) return ret;
  193. } else {
  194. link = abuffer->aresample->outputs[0];
  195. if (samplesref->audio->sample_rate == link->sample_rate &&
  196. samplesref->format == link->format &&
  197. samplesref->audio->channel_layout == link->channel_layout)
  198. remove_filter(&abuffer->aresample);
  199. else
  200. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  201. return ret;
  202. }
  203. }
  204. return 0;
  205. }
  206. static int check_format_change(AVFilterContext *buffer_filter,
  207. AVFilterBufferRef *picref)
  208. {
  209. switch (buffer_filter->outputs[0]->type) {
  210. case AVMEDIA_TYPE_VIDEO:
  211. return check_format_change_video(buffer_filter, picref);
  212. case AVMEDIA_TYPE_AUDIO:
  213. return check_format_change_audio(buffer_filter, picref);
  214. default:
  215. return AVERROR(ENOSYS);
  216. }
  217. }
  218. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  219. AVFilterBufferRef *ref)
  220. {
  221. AVFilterLink *outlink = ctx->outputs[0];
  222. AVFilterBufferRef *buf;
  223. int channels, data_size, i;
  224. switch (outlink->type) {
  225. case AVMEDIA_TYPE_VIDEO:
  226. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  227. ref->video->w, ref->video->h);
  228. if(!buf)
  229. return NULL;
  230. av_image_copy(buf->data, buf->linesize,
  231. (void*)ref->data, ref->linesize,
  232. ref->format, ref->video->w, ref->video->h);
  233. break;
  234. case AVMEDIA_TYPE_AUDIO:
  235. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  236. ref->audio->nb_samples);
  237. if(!buf)
  238. return NULL;
  239. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  240. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  241. 0, 0, ref->audio->nb_samples,
  242. channels,
  243. ref->format);
  244. break;
  245. default:
  246. return NULL;
  247. }
  248. avfilter_copy_buffer_ref_props(buf, ref);
  249. return buf;
  250. }
  251. int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
  252. AVFilterBufferRef *picref, int flags)
  253. {
  254. BufferSourceContext *c = buffer_filter->priv;
  255. AVFilterBufferRef *buf;
  256. int ret;
  257. if (!picref) {
  258. c->eof = 1;
  259. return 0;
  260. } else if (c->eof)
  261. return AVERROR(EINVAL);
  262. if (!av_fifo_space(c->fifo) &&
  263. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  264. sizeof(buf))) < 0)
  265. return ret;
  266. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  267. ret = check_format_change(buffer_filter, picref);
  268. if (ret < 0)
  269. return ret;
  270. }
  271. if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
  272. buf = picref;
  273. else
  274. buf = copy_buffer_ref(buffer_filter, picref);
  275. if(!buf)
  276. return -1;
  277. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  278. if (buf != picref)
  279. avfilter_unref_buffer(buf);
  280. return ret;
  281. }
  282. c->nb_failed_requests = 0;
  283. return 0;
  284. }
  285. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  286. AVFilterBufferRef *picref, int flags)
  287. {
  288. return av_buffersrc_add_ref(buffer_filter, picref, 0);
  289. }
  290. #if CONFIG_AVCODEC
  291. #include "avcodec.h"
  292. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  293. const AVFrame *frame, int flags)
  294. {
  295. AVFilterBufferRef *picref;
  296. int ret;
  297. if (!frame) /* NULL for EOF */
  298. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  299. switch (buffer_src->outputs[0]->type) {
  300. case AVMEDIA_TYPE_VIDEO:
  301. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  302. break;
  303. case AVMEDIA_TYPE_AUDIO:
  304. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  305. break;
  306. default:
  307. return AVERROR(ENOSYS);
  308. }
  309. if (!picref)
  310. return AVERROR(ENOMEM);
  311. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  312. picref->buf->data[0] = NULL;
  313. avfilter_unref_buffer(picref);
  314. return ret;
  315. }
  316. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  317. const AVFrame *frame, int flags)
  318. {
  319. return av_buffersrc_add_frame(buffer_src, frame, 0);
  320. }
  321. #endif
  322. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  323. {
  324. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  325. }
  326. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  327. {
  328. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  329. }
  330. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  331. {
  332. BufferSourceContext *c = ctx->priv;
  333. char pix_fmt_str[128];
  334. int ret, n = 0;
  335. *c->sws_param = 0;
  336. if (!args ||
  337. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  338. &c->time_base.num, &c->time_base.den,
  339. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  340. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  341. return AVERROR(EINVAL);
  342. }
  343. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  344. return ret;
  345. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  346. return AVERROR(ENOMEM);
  347. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  348. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  349. c->time_base.num, c->time_base.den,
  350. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  351. return 0;
  352. }
  353. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  354. {
  355. BufferSourceContext *abuffer = ctx->priv;
  356. char *arg = NULL, *ptr, chlayout_str[16];
  357. char *args = av_strdup(args0);
  358. int ret;
  359. arg = av_strtok(args, ":", &ptr);
  360. #define ADD_FORMAT(fmt_name) \
  361. if (!arg) \
  362. goto arg_fail; \
  363. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  364. av_freep(&args); \
  365. return ret; \
  366. } \
  367. if (*args) \
  368. arg = av_strtok(NULL, ":", &ptr)
  369. ADD_FORMAT(time_base);
  370. ADD_FORMAT(sample_rate);
  371. ADD_FORMAT(sample_format);
  372. ADD_FORMAT(channel_layout);
  373. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  374. if (!abuffer->fifo) {
  375. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  376. return AVERROR(ENOMEM);
  377. }
  378. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  379. -1, abuffer->channel_layout);
  380. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  381. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  382. abuffer->sample_rate);
  383. av_freep(&args);
  384. return 0;
  385. arg_fail:
  386. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  387. "sample_rate:sample_fmt:channel_layout\n");
  388. av_freep(&args);
  389. return AVERROR(EINVAL);
  390. }
  391. static av_cold void uninit(AVFilterContext *ctx)
  392. {
  393. BufferSourceContext *s = ctx->priv;
  394. while (s->fifo && av_fifo_size(s->fifo)) {
  395. AVFilterBufferRef *buf;
  396. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  397. avfilter_unref_buffer(buf);
  398. }
  399. av_fifo_free(s->fifo);
  400. s->fifo = NULL;
  401. avfilter_free(s->scale);
  402. s->scale = NULL;
  403. }
  404. static int query_formats_video(AVFilterContext *ctx)
  405. {
  406. BufferSourceContext *c = ctx->priv;
  407. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  408. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  409. return 0;
  410. }
  411. static int query_formats_audio(AVFilterContext *ctx)
  412. {
  413. BufferSourceContext *abuffer = ctx->priv;
  414. AVFilterFormats *formats;
  415. AVFilterChannelLayouts *layouts;
  416. formats = NULL;
  417. avfilter_add_format(&formats, abuffer->sample_format);
  418. avfilter_set_common_sample_formats(ctx, formats);
  419. formats = NULL;
  420. avfilter_add_format(&formats, abuffer->sample_rate);
  421. ff_set_common_samplerates(ctx, formats);
  422. layouts = NULL;
  423. ff_add_channel_layout(&layouts, abuffer->channel_layout);
  424. ff_set_common_channel_layouts(ctx, layouts);
  425. return 0;
  426. }
  427. static int config_output_video(AVFilterLink *link)
  428. {
  429. BufferSourceContext *c = link->src->priv;
  430. link->w = c->w;
  431. link->h = c->h;
  432. link->sample_aspect_ratio = c->sample_aspect_ratio;
  433. link->time_base = c->time_base;
  434. return 0;
  435. }
  436. static int config_output_audio(AVFilterLink *outlink)
  437. {
  438. BufferSourceContext *abuffer = outlink->src->priv;
  439. outlink->sample_rate = abuffer->sample_rate;
  440. outlink->time_base = abuffer->time_base;
  441. return 0;
  442. }
  443. static int request_frame(AVFilterLink *link)
  444. {
  445. BufferSourceContext *c = link->src->priv;
  446. AVFilterBufferRef *buf;
  447. if (!av_fifo_size(c->fifo)) {
  448. if (c->eof)
  449. return AVERROR_EOF;
  450. c->nb_failed_requests++;
  451. return AVERROR(EAGAIN);
  452. }
  453. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  454. switch (link->type) {
  455. case AVMEDIA_TYPE_VIDEO:
  456. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  457. avfilter_draw_slice(link, 0, link->h, 1);
  458. avfilter_end_frame(link);
  459. avfilter_unref_buffer(buf);
  460. break;
  461. case AVMEDIA_TYPE_AUDIO:
  462. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  463. avfilter_unref_buffer(buf);
  464. break;
  465. default:
  466. return AVERROR(ENOSYS);
  467. }
  468. return 0;
  469. }
  470. static int poll_frame(AVFilterLink *link)
  471. {
  472. BufferSourceContext *c = link->src->priv;
  473. int size = av_fifo_size(c->fifo);
  474. if (!size && c->eof)
  475. return AVERROR_EOF;
  476. return size/sizeof(AVFilterBufferRef*);
  477. }
  478. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  479. AVFilterBufferRef *samplesref,
  480. int av_unused flags)
  481. {
  482. return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
  483. }
  484. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  485. uint8_t *data[8], int linesize[8],
  486. int nb_samples, int sample_rate,
  487. int sample_fmt, int64_t channel_layout, int planar,
  488. int64_t pts, int av_unused flags)
  489. {
  490. AVFilterBufferRef *samplesref;
  491. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  492. data, linesize[0], AV_PERM_WRITE,
  493. nb_samples,
  494. sample_fmt, channel_layout);
  495. if (!samplesref)
  496. return AVERROR(ENOMEM);
  497. samplesref->buf->free = buf_free;
  498. samplesref->pts = pts;
  499. samplesref->audio->sample_rate = sample_rate;
  500. AV_NOWARN_DEPRECATED(
  501. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  502. )
  503. }
  504. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  505. uint8_t *buf, int buf_size, int sample_rate,
  506. int sample_fmt, int64_t channel_layout, int planar,
  507. int64_t pts, int av_unused flags)
  508. {
  509. uint8_t *data[8] = {0};
  510. int linesize[8];
  511. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  512. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  513. av_samples_fill_arrays(data, linesize,
  514. buf, nb_channels, nb_samples,
  515. sample_fmt, 16);
  516. AV_NOWARN_DEPRECATED(
  517. return av_asrc_buffer_add_samples(ctx,
  518. data, linesize, nb_samples,
  519. sample_rate,
  520. sample_fmt, channel_layout, planar,
  521. pts, flags);
  522. )
  523. }
  524. AVFilter avfilter_vsrc_buffer = {
  525. .name = "buffer",
  526. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  527. .priv_size = sizeof(BufferSourceContext),
  528. .query_formats = query_formats_video,
  529. .init = init_video,
  530. .uninit = uninit,
  531. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  532. .outputs = (const AVFilterPad[]) {{ .name = "default",
  533. .type = AVMEDIA_TYPE_VIDEO,
  534. .request_frame = request_frame,
  535. .poll_frame = poll_frame,
  536. .config_props = config_output_video, },
  537. { .name = NULL}},
  538. };
  539. AVFilter avfilter_asrc_abuffer = {
  540. .name = "abuffer",
  541. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  542. .priv_size = sizeof(BufferSourceContext),
  543. .query_formats = query_formats_audio,
  544. .init = init_audio,
  545. .uninit = uninit,
  546. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  547. .outputs = (const AVFilterPad[]) {{ .name = "default",
  548. .type = AVMEDIA_TYPE_AUDIO,
  549. .request_frame = request_frame,
  550. .poll_frame = poll_frame,
  551. .config_props = config_output_audio, },
  552. { .name = NULL}},
  553. };