You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

657 lines
21KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "buffersrc.h"
  30. #include "vsrc_buffer.h"
  31. #include "asrc_abuffer.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/fifo.h"
  35. #include "libavutil/imgutils.h"
  36. typedef struct {
  37. AVFifoBuffer *fifo;
  38. AVRational time_base; ///< time_base to set in the output link
  39. int eof;
  40. unsigned nb_failed_requests;
  41. /* Video only */
  42. AVFilterContext *scale;
  43. int h, w;
  44. enum PixelFormat pix_fmt;
  45. AVRational sample_aspect_ratio;
  46. char sws_param[256];
  47. /* Audio only */
  48. // Audio format of incoming buffers
  49. int sample_rate;
  50. unsigned int sample_format;
  51. int64_t channel_layout;
  52. int packing_format;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. static int insert_filter(BufferSourceContext *abuffer,
  64. AVFilterLink *link, AVFilterContext **filt_ctx,
  65. const char *filt_name);
  66. static void remove_filter(AVFilterContext **filt_ctx);
  67. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx);
  68. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  69. {
  70. char old_layout_str[16], new_layout_str[16];
  71. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  72. -1, link->channel_layout);
  73. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  74. -1, ref->audio->channel_layout);
  75. av_log(ctx, AV_LOG_INFO,
  76. "Audio input format changed: "
  77. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  78. av_get_sample_fmt_name(link->format),
  79. old_layout_str, (int)link->sample_rate,
  80. av_get_sample_fmt_name(ref->format),
  81. new_layout_str, ref->audio->sample_rate);
  82. }
  83. static int check_format_change_video(AVFilterContext *buffer_filter,
  84. AVFilterBufferRef *picref)
  85. {
  86. BufferSourceContext *c = buffer_filter->priv;
  87. int ret;
  88. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  89. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  90. AVFilterLink *link;
  91. char scale_param[1024];
  92. av_log(buffer_filter, AV_LOG_INFO,
  93. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  94. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  95. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  96. if (!scale || strcmp(scale->filter->name, "scale")) {
  97. AVFilter *f = avfilter_get_by_name("scale");
  98. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  99. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  100. return ret;
  101. c->scale = scale;
  102. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  103. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  104. return ret;
  105. }
  106. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  107. return ret;
  108. }
  109. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  110. scale->outputs[0]->format= c->pix_fmt;
  111. } else if (!strcmp(scale->filter->name, "scale")) {
  112. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  113. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  114. scale->filter->init(scale, scale_param, NULL);
  115. }
  116. c->pix_fmt = scale->inputs[0]->format = picref->format;
  117. c->w = scale->inputs[0]->w = picref->video->w;
  118. c->h = scale->inputs[0]->h = picref->video->h;
  119. link = scale->outputs[0];
  120. if ((ret = link->srcpad->config_props(link)) < 0)
  121. return ret;
  122. }
  123. return 0;
  124. }
  125. static int check_format_change_audio(AVFilterContext *ctx,
  126. AVFilterBufferRef *samplesref)
  127. {
  128. BufferSourceContext *abuffer = ctx->priv;
  129. AVFilterLink *link;
  130. int ret, logged = 0;
  131. link = ctx->outputs[0];
  132. if (samplesref->audio->sample_rate != link->sample_rate) {
  133. log_input_change(ctx, link, samplesref);
  134. logged = 1;
  135. abuffer->sample_rate = samplesref->audio->sample_rate;
  136. if (!abuffer->aresample) {
  137. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  138. if (ret < 0) return ret;
  139. } else {
  140. link = abuffer->aresample->outputs[0];
  141. if (samplesref->audio->sample_rate == link->sample_rate)
  142. remove_filter(&abuffer->aresample);
  143. else
  144. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  145. return ret;
  146. }
  147. }
  148. link = ctx->outputs[0];
  149. if (samplesref->format != link->format ||
  150. samplesref->audio->channel_layout != link->channel_layout ||
  151. samplesref->audio->planar != link->planar) {
  152. if (!logged) log_input_change(ctx, link, samplesref);
  153. abuffer->sample_format = samplesref->format;
  154. abuffer->channel_layout = samplesref->audio->channel_layout;
  155. abuffer->packing_format = samplesref->audio->planar;
  156. if (!abuffer->aconvert) {
  157. ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
  158. if (ret < 0) return ret;
  159. } else {
  160. link = abuffer->aconvert->outputs[0];
  161. if (samplesref->format == link->format &&
  162. samplesref->audio->channel_layout == link->channel_layout &&
  163. samplesref->audio->planar == link->planar
  164. )
  165. remove_filter(&abuffer->aconvert);
  166. else
  167. if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
  168. return ret;
  169. }
  170. }
  171. return 0;
  172. }
  173. static int check_format_change(AVFilterContext *buffer_filter,
  174. AVFilterBufferRef *picref)
  175. {
  176. switch (buffer_filter->outputs[0]->type) {
  177. case AVMEDIA_TYPE_VIDEO:
  178. return check_format_change_video(buffer_filter, picref);
  179. case AVMEDIA_TYPE_AUDIO:
  180. return check_format_change_audio(buffer_filter, picref);
  181. default:
  182. return AVERROR(ENOSYS);
  183. }
  184. }
  185. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  186. AVFilterBufferRef *ref)
  187. {
  188. AVFilterLink *outlink = ctx->outputs[0];
  189. AVFilterBufferRef *buf;
  190. switch (outlink->type) {
  191. case AVMEDIA_TYPE_VIDEO:
  192. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  193. ref->video->w, ref->video->h);
  194. av_image_copy(buf->data, buf->linesize,
  195. (void*)ref->data, ref->linesize,
  196. ref->format, ref->video->w, ref->video->h);
  197. break;
  198. default:
  199. return NULL;
  200. }
  201. avfilter_copy_buffer_ref_props(buf, ref);
  202. return buf;
  203. }
  204. int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
  205. AVFilterBufferRef *picref, int flags)
  206. {
  207. BufferSourceContext *c = buffer_filter->priv;
  208. AVFilterBufferRef *buf;
  209. int ret;
  210. if (!picref) {
  211. c->eof = 1;
  212. return 0;
  213. } else if (c->eof)
  214. return AVERROR(EINVAL);
  215. if (!av_fifo_space(c->fifo) &&
  216. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  217. sizeof(buf))) < 0)
  218. return ret;
  219. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  220. ret = check_format_change(buffer_filter, picref);
  221. if (ret < 0)
  222. return ret;
  223. }
  224. if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
  225. buf = picref;
  226. else
  227. buf = copy_buffer_ref(buffer_filter, picref);
  228. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  229. if (buf != picref)
  230. avfilter_unref_buffer(buf);
  231. return ret;
  232. }
  233. c->nb_failed_requests = 0;
  234. return 0;
  235. }
  236. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  237. AVFilterBufferRef *picref, int flags)
  238. {
  239. return av_buffersrc_add_ref(buffer_filter, picref, 0);
  240. }
  241. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  242. {
  243. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
  244. AV_BUFFERSRC_FLAG_NO_COPY);
  245. }
  246. #if CONFIG_AVCODEC
  247. #include "avcodec.h"
  248. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  249. const AVFrame *frame, int flags)
  250. {
  251. BufferSourceContext *c = buffer_src->priv;
  252. AVFilterBufferRef *picref;
  253. int ret;
  254. if (!frame) {
  255. c->eof = 1;
  256. return 0;
  257. } else if (c->eof)
  258. return AVERROR(EINVAL);
  259. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  260. if (!picref)
  261. return AVERROR(ENOMEM);
  262. ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src, picref, flags);
  263. picref->buf->data[0] = NULL;
  264. avfilter_unref_buffer(picref);
  265. return ret;
  266. }
  267. #endif
  268. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  269. {
  270. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  271. }
  272. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  273. {
  274. BufferSourceContext *c = ctx->priv;
  275. char pix_fmt_str[128];
  276. int ret, n = 0;
  277. *c->sws_param = 0;
  278. if (!args ||
  279. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  280. &c->time_base.num, &c->time_base.den,
  281. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  282. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  283. return AVERROR(EINVAL);
  284. }
  285. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  286. return ret;
  287. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  288. return AVERROR(ENOMEM);
  289. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  290. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  291. c->time_base.num, c->time_base.den,
  292. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  293. return 0;
  294. }
  295. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  296. {
  297. BufferSourceContext *abuffer = ctx->priv;
  298. char *arg = NULL, *ptr, chlayout_str[16];
  299. char *args = av_strdup(args0);
  300. int ret;
  301. arg = av_strtok(args, ":", &ptr);
  302. #define ADD_FORMAT(fmt_name) \
  303. if (!arg) \
  304. goto arg_fail; \
  305. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  306. av_freep(&args); \
  307. return ret; \
  308. } \
  309. if (*args) \
  310. arg = av_strtok(NULL, ":", &ptr)
  311. ADD_FORMAT(sample_rate);
  312. ADD_FORMAT(sample_format);
  313. ADD_FORMAT(channel_layout);
  314. ADD_FORMAT(packing_format);
  315. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  316. if (!abuffer->fifo) {
  317. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  318. return AVERROR(ENOMEM);
  319. }
  320. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  321. -1, abuffer->channel_layout);
  322. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  323. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  324. abuffer->sample_rate);
  325. av_freep(&args);
  326. return 0;
  327. arg_fail:
  328. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  329. "sample_rate:sample_fmt:channel_layout:packing\n");
  330. av_freep(&args);
  331. return AVERROR(EINVAL);
  332. }
  333. static av_cold void uninit(AVFilterContext *ctx)
  334. {
  335. BufferSourceContext *s = ctx->priv;
  336. while (s->fifo && av_fifo_size(s->fifo)) {
  337. AVFilterBufferRef *buf;
  338. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  339. avfilter_unref_buffer(buf);
  340. }
  341. av_fifo_free(s->fifo);
  342. s->fifo = NULL;
  343. avfilter_free(s->scale);
  344. s->scale = NULL;
  345. }
  346. static int query_formats_video(AVFilterContext *ctx)
  347. {
  348. BufferSourceContext *c = ctx->priv;
  349. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  350. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  351. return 0;
  352. }
  353. static int query_formats_audio(AVFilterContext *ctx)
  354. {
  355. BufferSourceContext *abuffer = ctx->priv;
  356. AVFilterFormats *formats;
  357. formats = NULL;
  358. avfilter_add_format(&formats, abuffer->sample_format);
  359. avfilter_set_common_sample_formats(ctx, formats);
  360. formats = NULL;
  361. avfilter_add_format(&formats, abuffer->channel_layout);
  362. avfilter_set_common_channel_layouts(ctx, formats);
  363. formats = NULL;
  364. avfilter_add_format(&formats, abuffer->packing_format);
  365. avfilter_set_common_packing_formats(ctx, formats);
  366. return 0;
  367. }
  368. static int config_output_video(AVFilterLink *link)
  369. {
  370. BufferSourceContext *c = link->src->priv;
  371. link->w = c->w;
  372. link->h = c->h;
  373. link->sample_aspect_ratio = c->sample_aspect_ratio;
  374. link->time_base = c->time_base;
  375. return 0;
  376. }
  377. static int config_output_audio(AVFilterLink *outlink)
  378. {
  379. BufferSourceContext *abuffer = outlink->src->priv;
  380. outlink->sample_rate = abuffer->sample_rate;
  381. return 0;
  382. }
  383. static int request_frame(AVFilterLink *link)
  384. {
  385. BufferSourceContext *c = link->src->priv;
  386. AVFilterBufferRef *buf;
  387. if (!av_fifo_size(c->fifo)) {
  388. if (c->eof)
  389. return AVERROR_EOF;
  390. c->nb_failed_requests++;
  391. return AVERROR(EAGAIN);
  392. }
  393. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  394. switch (link->type) {
  395. case AVMEDIA_TYPE_VIDEO:
  396. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  397. avfilter_draw_slice(link, 0, link->h, 1);
  398. avfilter_end_frame(link);
  399. avfilter_unref_buffer(buf);
  400. break;
  401. case AVMEDIA_TYPE_AUDIO:
  402. avfilter_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  403. avfilter_unref_buffer(buf);
  404. break;
  405. default:
  406. return AVERROR(ENOSYS);
  407. }
  408. return 0;
  409. }
  410. static int poll_frame(AVFilterLink *link)
  411. {
  412. BufferSourceContext *c = link->src->priv;
  413. int size = av_fifo_size(c->fifo);
  414. if (!size && c->eof)
  415. return AVERROR_EOF;
  416. return size/sizeof(AVFilterBufferRef*);
  417. }
  418. static void buf_free(AVFilterBuffer *ptr)
  419. {
  420. av_free(ptr);
  421. return;
  422. }
  423. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  424. {
  425. link->src = src;
  426. link->srcpad = &(src->output_pads[0]);
  427. src->outputs[0] = link;
  428. }
  429. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  430. {
  431. int ret;
  432. AVFilterLink * const inlink = filt_ctx->inputs[0];
  433. AVFilterLink * const outlink = filt_ctx->outputs[0];
  434. inlink->format = abuffer->sample_format;
  435. inlink->channel_layout = abuffer->channel_layout;
  436. inlink->planar = abuffer->packing_format;
  437. inlink->sample_rate = abuffer->sample_rate;
  438. filt_ctx->filter->uninit(filt_ctx);
  439. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  440. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  441. return ret;
  442. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  443. return ret;
  444. return outlink->srcpad->config_props(outlink);
  445. }
  446. static int insert_filter(BufferSourceContext *abuffer,
  447. AVFilterLink *link, AVFilterContext **filt_ctx,
  448. const char *filt_name)
  449. {
  450. int ret;
  451. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  452. return ret;
  453. link->src->outputs[0] = NULL;
  454. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  455. link->src->outputs[0] = link;
  456. return ret;
  457. }
  458. set_link_source(*filt_ctx, link);
  459. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  460. avfilter_free(*filt_ctx);
  461. return ret;
  462. }
  463. return 0;
  464. }
  465. static void remove_filter(AVFilterContext **filt_ctx)
  466. {
  467. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  468. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  469. (*filt_ctx)->outputs[0] = NULL;
  470. avfilter_free(*filt_ctx);
  471. *filt_ctx = NULL;
  472. set_link_source(src, outlink);
  473. }
  474. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  475. AVFilterBufferRef *samplesref,
  476. int av_unused flags)
  477. {
  478. return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
  479. }
  480. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  481. uint8_t *data[8], int linesize[8],
  482. int nb_samples, int sample_rate,
  483. int sample_fmt, int64_t channel_layout, int planar,
  484. int64_t pts, int av_unused flags)
  485. {
  486. AVFilterBufferRef *samplesref;
  487. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  488. data, linesize, AV_PERM_WRITE,
  489. nb_samples,
  490. sample_fmt, channel_layout, planar);
  491. if (!samplesref)
  492. return AVERROR(ENOMEM);
  493. samplesref->buf->free = buf_free;
  494. samplesref->pts = pts;
  495. samplesref->audio->sample_rate = sample_rate;
  496. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  497. }
  498. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  499. uint8_t *buf, int buf_size, int sample_rate,
  500. int sample_fmt, int64_t channel_layout, int planar,
  501. int64_t pts, int av_unused flags)
  502. {
  503. uint8_t *data[8] = {0};
  504. int linesize[8];
  505. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  506. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  507. av_samples_fill_arrays(data, linesize,
  508. buf, nb_channels, nb_samples,
  509. sample_fmt, 16);
  510. return av_asrc_buffer_add_samples(ctx,
  511. data, linesize, nb_samples,
  512. sample_rate,
  513. sample_fmt, channel_layout, planar,
  514. pts, flags);
  515. }
  516. AVFilter avfilter_vsrc_buffer = {
  517. .name = "buffer",
  518. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  519. .priv_size = sizeof(BufferSourceContext),
  520. .query_formats = query_formats_video,
  521. .init = init_video,
  522. .uninit = uninit,
  523. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  524. .outputs = (const AVFilterPad[]) {{ .name = "default",
  525. .type = AVMEDIA_TYPE_VIDEO,
  526. .request_frame = request_frame,
  527. .poll_frame = poll_frame,
  528. .config_props = config_output_video, },
  529. { .name = NULL}},
  530. };
  531. #ifdef CONFIG_ABUFFER_FILTER
  532. AVFilter avfilter_asrc_abuffer = {
  533. .name = "abuffer",
  534. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  535. .priv_size = sizeof(BufferSourceContext),
  536. .query_formats = query_formats_audio,
  537. .init = init_audio,
  538. .uninit = uninit,
  539. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  540. .outputs = (const AVFilterPad[]) {{ .name = "default",
  541. .type = AVMEDIA_TYPE_AUDIO,
  542. .request_frame = request_frame,
  543. .poll_frame = poll_frame,
  544. .config_props = config_output_audio, },
  545. { .name = NULL}},
  546. };
  547. #endif