You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

652 lines
21KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "audio.h"
  29. #include "avcodec.h"
  30. #include "buffersrc.h"
  31. #include "vsrc_buffer.h"
  32. #include "asrc_abuffer.h"
  33. #include "libavutil/audioconvert.h"
  34. #include "libavutil/avstring.h"
  35. #include "libavutil/fifo.h"
  36. #include "libavutil/imgutils.h"
  37. typedef struct {
  38. AVFifoBuffer *fifo;
  39. AVRational time_base; ///< time_base to set in the output link
  40. int eof;
  41. unsigned nb_failed_requests;
  42. /* Video only */
  43. AVFilterContext *scale;
  44. int h, w;
  45. enum PixelFormat pix_fmt;
  46. AVRational sample_aspect_ratio;
  47. char sws_param[256];
  48. /* Audio only */
  49. // Audio format of incoming buffers
  50. int sample_rate;
  51. unsigned int sample_format;
  52. int64_t channel_layout;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. static void buf_free(AVFilterBuffer *ptr)
  64. {
  65. av_free(ptr);
  66. return;
  67. }
  68. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  69. {
  70. link->src = src;
  71. link->srcpad = &(src->output_pads[0]);
  72. src->outputs[0] = link;
  73. }
  74. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  75. {
  76. int ret;
  77. AVFilterLink * const inlink = filt_ctx->inputs[0];
  78. AVFilterLink * const outlink = filt_ctx->outputs[0];
  79. inlink->format = abuffer->sample_format;
  80. inlink->channel_layout = abuffer->channel_layout;
  81. inlink->sample_rate = abuffer->sample_rate;
  82. filt_ctx->filter->uninit(filt_ctx);
  83. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  84. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  85. return ret;
  86. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  87. return ret;
  88. return outlink->srcpad->config_props(outlink);
  89. }
  90. static int insert_filter(BufferSourceContext *abuffer,
  91. AVFilterLink *link, AVFilterContext **filt_ctx,
  92. const char *filt_name)
  93. {
  94. int ret;
  95. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  96. return ret;
  97. link->src->outputs[0] = NULL;
  98. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  99. link->src->outputs[0] = link;
  100. return ret;
  101. }
  102. set_link_source(*filt_ctx, link);
  103. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  104. avfilter_free(*filt_ctx);
  105. return ret;
  106. }
  107. return 0;
  108. }
  109. static void remove_filter(AVFilterContext **filt_ctx)
  110. {
  111. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  112. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  113. (*filt_ctx)->outputs[0] = NULL;
  114. avfilter_free(*filt_ctx);
  115. *filt_ctx = NULL;
  116. set_link_source(src, outlink);
  117. }
  118. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  119. {
  120. char old_layout_str[16], new_layout_str[16];
  121. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  122. -1, link->channel_layout);
  123. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  124. -1, ref->audio->channel_layout);
  125. av_log(ctx, AV_LOG_INFO,
  126. "Audio input format changed: "
  127. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  128. av_get_sample_fmt_name(link->format),
  129. old_layout_str, (int)link->sample_rate,
  130. av_get_sample_fmt_name(ref->format),
  131. new_layout_str, ref->audio->sample_rate);
  132. }
  133. static int check_format_change_video(AVFilterContext *buffer_filter,
  134. AVFilterBufferRef *picref)
  135. {
  136. BufferSourceContext *c = buffer_filter->priv;
  137. int ret;
  138. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  139. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  140. AVFilterLink *link;
  141. char scale_param[1024];
  142. av_log(buffer_filter, AV_LOG_INFO,
  143. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  144. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  145. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  146. if (!scale || strcmp(scale->filter->name, "scale")) {
  147. AVFilter *f = avfilter_get_by_name("scale");
  148. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  149. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  150. return ret;
  151. c->scale = scale;
  152. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  153. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  154. return ret;
  155. }
  156. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  157. return ret;
  158. }
  159. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  160. scale->outputs[0]->format= c->pix_fmt;
  161. } else if (!strcmp(scale->filter->name, "scale")) {
  162. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  163. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  164. scale->filter->init(scale, scale_param, NULL);
  165. }
  166. c->pix_fmt = scale->inputs[0]->format = picref->format;
  167. c->w = scale->inputs[0]->w = picref->video->w;
  168. c->h = scale->inputs[0]->h = picref->video->h;
  169. link = scale->outputs[0];
  170. if ((ret = link->srcpad->config_props(link)) < 0)
  171. return ret;
  172. }
  173. return 0;
  174. }
  175. static int check_format_change_audio(AVFilterContext *ctx,
  176. AVFilterBufferRef *samplesref)
  177. {
  178. BufferSourceContext *abuffer = ctx->priv;
  179. AVFilterLink *link;
  180. int ret, logged = 0;
  181. link = ctx->outputs[0];
  182. if (samplesref->audio->sample_rate != link->sample_rate ||
  183. samplesref->format != link->format ||
  184. samplesref->audio->channel_layout != link->channel_layout) {
  185. log_input_change(ctx, link, samplesref);
  186. logged = 1;
  187. abuffer->sample_rate = samplesref->audio->sample_rate;
  188. abuffer->sample_format = samplesref->format;
  189. abuffer->channel_layout = samplesref->audio->channel_layout;
  190. if (!abuffer->aresample) {
  191. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  192. if (ret < 0) return ret;
  193. } else {
  194. link = abuffer->aresample->outputs[0];
  195. if (samplesref->audio->sample_rate == link->sample_rate &&
  196. samplesref->format == link->format &&
  197. samplesref->audio->channel_layout == link->channel_layout)
  198. remove_filter(&abuffer->aresample);
  199. else
  200. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  201. return ret;
  202. }
  203. }
  204. return 0;
  205. }
  206. static int check_format_change(AVFilterContext *buffer_filter,
  207. AVFilterBufferRef *picref)
  208. {
  209. switch (buffer_filter->outputs[0]->type) {
  210. case AVMEDIA_TYPE_VIDEO:
  211. return check_format_change_video(buffer_filter, picref);
  212. case AVMEDIA_TYPE_AUDIO:
  213. return check_format_change_audio(buffer_filter, picref);
  214. default:
  215. return AVERROR(ENOSYS);
  216. }
  217. }
  218. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  219. AVFilterBufferRef *ref)
  220. {
  221. AVFilterLink *outlink = ctx->outputs[0];
  222. AVFilterBufferRef *buf;
  223. int channels, data_size, i;
  224. switch (outlink->type) {
  225. case AVMEDIA_TYPE_VIDEO:
  226. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  227. ref->video->w, ref->video->h);
  228. av_image_copy(buf->data, buf->linesize,
  229. (void*)ref->data, ref->linesize,
  230. ref->format, ref->video->w, ref->video->h);
  231. break;
  232. case AVMEDIA_TYPE_AUDIO:
  233. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  234. ref->audio->nb_samples);
  235. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  236. data_size = av_samples_get_buffer_size(NULL, channels,
  237. ref->audio->nb_samples,
  238. ref->format, 1);
  239. for (i = 0; i < FF_ARRAY_ELEMS(ref->buf->data) && ref->buf->data[i]; i++)
  240. memcpy(buf->buf->data[i], ref->buf->data[i], data_size);
  241. break;
  242. default:
  243. return NULL;
  244. }
  245. avfilter_copy_buffer_ref_props(buf, ref);
  246. return buf;
  247. }
  248. int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
  249. AVFilterBufferRef *picref, int flags)
  250. {
  251. BufferSourceContext *c = buffer_filter->priv;
  252. AVFilterBufferRef *buf;
  253. int ret;
  254. if (!picref) {
  255. c->eof = 1;
  256. return 0;
  257. } else if (c->eof)
  258. return AVERROR(EINVAL);
  259. if (!av_fifo_space(c->fifo) &&
  260. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  261. sizeof(buf))) < 0)
  262. return ret;
  263. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  264. ret = check_format_change(buffer_filter, picref);
  265. if (ret < 0)
  266. return ret;
  267. }
  268. if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
  269. buf = picref;
  270. else
  271. buf = copy_buffer_ref(buffer_filter, picref);
  272. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  273. if (buf != picref)
  274. avfilter_unref_buffer(buf);
  275. return ret;
  276. }
  277. c->nb_failed_requests = 0;
  278. return 0;
  279. }
  280. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  281. AVFilterBufferRef *picref, int flags)
  282. {
  283. return av_buffersrc_add_ref(buffer_filter, picref, 0);
  284. }
  285. #if CONFIG_AVCODEC
  286. #include "avcodec.h"
  287. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  288. const AVFrame *frame, int flags)
  289. {
  290. AVFilterBufferRef *picref;
  291. int ret;
  292. if (!frame) /* NULL for EOF */
  293. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  294. switch (buffer_src->outputs[0]->type) {
  295. case AVMEDIA_TYPE_VIDEO:
  296. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  297. break;
  298. case AVMEDIA_TYPE_AUDIO:
  299. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  300. break;
  301. default:
  302. return AVERROR(ENOSYS);
  303. }
  304. if (!picref)
  305. return AVERROR(ENOMEM);
  306. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  307. picref->buf->data[0] = NULL;
  308. avfilter_unref_buffer(picref);
  309. return ret;
  310. }
  311. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  312. const AVFrame *frame, int flags)
  313. {
  314. return av_buffersrc_add_frame(buffer_src, frame, 0);
  315. }
  316. #endif
  317. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  318. {
  319. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  320. }
  321. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  322. {
  323. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  324. }
  325. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  326. {
  327. BufferSourceContext *c = ctx->priv;
  328. char pix_fmt_str[128];
  329. int ret, n = 0;
  330. *c->sws_param = 0;
  331. if (!args ||
  332. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  333. &c->time_base.num, &c->time_base.den,
  334. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  335. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  336. return AVERROR(EINVAL);
  337. }
  338. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  339. return ret;
  340. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  341. return AVERROR(ENOMEM);
  342. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  343. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  344. c->time_base.num, c->time_base.den,
  345. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  346. return 0;
  347. }
  348. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  349. {
  350. BufferSourceContext *abuffer = ctx->priv;
  351. char *arg = NULL, *ptr, chlayout_str[16];
  352. char *args = av_strdup(args0);
  353. int ret;
  354. arg = av_strtok(args, ":", &ptr);
  355. #define ADD_FORMAT(fmt_name) \
  356. if (!arg) \
  357. goto arg_fail; \
  358. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  359. av_freep(&args); \
  360. return ret; \
  361. } \
  362. if (*args) \
  363. arg = av_strtok(NULL, ":", &ptr)
  364. ADD_FORMAT(sample_rate);
  365. ADD_FORMAT(sample_format);
  366. ADD_FORMAT(channel_layout);
  367. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  368. if (!abuffer->fifo) {
  369. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  370. return AVERROR(ENOMEM);
  371. }
  372. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  373. -1, abuffer->channel_layout);
  374. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  375. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  376. abuffer->sample_rate);
  377. av_freep(&args);
  378. return 0;
  379. arg_fail:
  380. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  381. "sample_rate:sample_fmt:channel_layout\n");
  382. av_freep(&args);
  383. return AVERROR(EINVAL);
  384. }
  385. static av_cold void uninit(AVFilterContext *ctx)
  386. {
  387. BufferSourceContext *s = ctx->priv;
  388. while (s->fifo && av_fifo_size(s->fifo)) {
  389. AVFilterBufferRef *buf;
  390. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  391. avfilter_unref_buffer(buf);
  392. }
  393. av_fifo_free(s->fifo);
  394. s->fifo = NULL;
  395. avfilter_free(s->scale);
  396. s->scale = NULL;
  397. }
  398. static int query_formats_video(AVFilterContext *ctx)
  399. {
  400. BufferSourceContext *c = ctx->priv;
  401. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  402. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  403. return 0;
  404. }
  405. static int query_formats_audio(AVFilterContext *ctx)
  406. {
  407. BufferSourceContext *abuffer = ctx->priv;
  408. AVFilterFormats *formats;
  409. AVFilterChannelLayouts *layouts;
  410. formats = NULL;
  411. avfilter_add_format(&formats, abuffer->sample_format);
  412. avfilter_set_common_sample_formats(ctx, formats);
  413. formats = NULL;
  414. avfilter_add_format(&formats, abuffer->sample_rate);
  415. ff_set_common_samplerates(ctx, formats);
  416. layouts = NULL;
  417. ff_add_channel_layout(&layouts, abuffer->channel_layout);
  418. ff_set_common_channel_layouts(ctx, layouts);
  419. return 0;
  420. }
  421. static int config_output_video(AVFilterLink *link)
  422. {
  423. BufferSourceContext *c = link->src->priv;
  424. link->w = c->w;
  425. link->h = c->h;
  426. link->sample_aspect_ratio = c->sample_aspect_ratio;
  427. link->time_base = c->time_base;
  428. return 0;
  429. }
  430. static int config_output_audio(AVFilterLink *outlink)
  431. {
  432. BufferSourceContext *abuffer = outlink->src->priv;
  433. outlink->sample_rate = abuffer->sample_rate;
  434. return 0;
  435. }
  436. static int request_frame(AVFilterLink *link)
  437. {
  438. BufferSourceContext *c = link->src->priv;
  439. AVFilterBufferRef *buf;
  440. if (!av_fifo_size(c->fifo)) {
  441. if (c->eof)
  442. return AVERROR_EOF;
  443. c->nb_failed_requests++;
  444. return AVERROR(EAGAIN);
  445. }
  446. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  447. switch (link->type) {
  448. case AVMEDIA_TYPE_VIDEO:
  449. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  450. avfilter_draw_slice(link, 0, link->h, 1);
  451. avfilter_end_frame(link);
  452. avfilter_unref_buffer(buf);
  453. break;
  454. case AVMEDIA_TYPE_AUDIO:
  455. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  456. avfilter_unref_buffer(buf);
  457. break;
  458. default:
  459. return AVERROR(ENOSYS);
  460. }
  461. return 0;
  462. }
  463. static int poll_frame(AVFilterLink *link)
  464. {
  465. BufferSourceContext *c = link->src->priv;
  466. int size = av_fifo_size(c->fifo);
  467. if (!size && c->eof)
  468. return AVERROR_EOF;
  469. return size/sizeof(AVFilterBufferRef*);
  470. }
  471. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  472. AVFilterBufferRef *samplesref,
  473. int av_unused flags)
  474. {
  475. return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
  476. }
  477. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  478. uint8_t *data[8], int linesize[8],
  479. int nb_samples, int sample_rate,
  480. int sample_fmt, int64_t channel_layout, int planar,
  481. int64_t pts, int av_unused flags)
  482. {
  483. AVFilterBufferRef *samplesref;
  484. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  485. data, linesize[0], AV_PERM_WRITE,
  486. nb_samples,
  487. sample_fmt, channel_layout);
  488. if (!samplesref)
  489. return AVERROR(ENOMEM);
  490. samplesref->buf->free = buf_free;
  491. samplesref->pts = pts;
  492. samplesref->audio->sample_rate = sample_rate;
  493. AV_NOWARN_DEPRECATED(
  494. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  495. )
  496. }
  497. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  498. uint8_t *buf, int buf_size, int sample_rate,
  499. int sample_fmt, int64_t channel_layout, int planar,
  500. int64_t pts, int av_unused flags)
  501. {
  502. uint8_t *data[8] = {0};
  503. int linesize[8];
  504. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  505. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  506. av_samples_fill_arrays(data, linesize,
  507. buf, nb_channels, nb_samples,
  508. sample_fmt, 16);
  509. AV_NOWARN_DEPRECATED(
  510. return av_asrc_buffer_add_samples(ctx,
  511. data, linesize, nb_samples,
  512. sample_rate,
  513. sample_fmt, channel_layout, planar,
  514. pts, flags);
  515. )
  516. }
  517. AVFilter avfilter_vsrc_buffer = {
  518. .name = "buffer",
  519. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  520. .priv_size = sizeof(BufferSourceContext),
  521. .query_formats = query_formats_video,
  522. .init = init_video,
  523. .uninit = uninit,
  524. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  525. .outputs = (const AVFilterPad[]) {{ .name = "default",
  526. .type = AVMEDIA_TYPE_VIDEO,
  527. .request_frame = request_frame,
  528. .poll_frame = poll_frame,
  529. .config_props = config_output_video, },
  530. { .name = NULL}},
  531. };
  532. AVFilter avfilter_asrc_abuffer = {
  533. .name = "abuffer",
  534. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  535. .priv_size = sizeof(BufferSourceContext),
  536. .query_formats = query_formats_audio,
  537. .init = init_audio,
  538. .uninit = uninit,
  539. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  540. .outputs = (const AVFilterPad[]) {{ .name = "default",
  541. .type = AVMEDIA_TYPE_AUDIO,
  542. .request_frame = request_frame,
  543. .poll_frame = poll_frame,
  544. .config_props = config_output_audio, },
  545. { .name = NULL}},
  546. };