You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

676 lines
22KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "buffersrc.h"
  30. #include "vsrc_buffer.h"
  31. #include "asrc_abuffer.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/fifo.h"
  35. #include "libavutil/imgutils.h"
  36. typedef struct {
  37. AVFifoBuffer *fifo;
  38. AVRational time_base; ///< time_base to set in the output link
  39. int eof;
  40. unsigned nb_failed_requests;
  41. /* Video only */
  42. AVFilterContext *scale;
  43. int h, w;
  44. enum PixelFormat pix_fmt;
  45. AVRational sample_aspect_ratio;
  46. char sws_param[256];
  47. /* Audio only */
  48. // Audio format of incoming buffers
  49. int sample_rate;
  50. unsigned int sample_format;
  51. int64_t channel_layout;
  52. int packing_format;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. static void buf_free(AVFilterBuffer *ptr)
  64. {
  65. av_free(ptr);
  66. return;
  67. }
  68. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  69. {
  70. link->src = src;
  71. link->srcpad = &(src->output_pads[0]);
  72. src->outputs[0] = link;
  73. }
  74. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  75. {
  76. int ret;
  77. AVFilterLink * const inlink = filt_ctx->inputs[0];
  78. AVFilterLink * const outlink = filt_ctx->outputs[0];
  79. inlink->format = abuffer->sample_format;
  80. inlink->channel_layout = abuffer->channel_layout;
  81. inlink->planar = abuffer->packing_format;
  82. inlink->sample_rate = abuffer->sample_rate;
  83. filt_ctx->filter->uninit(filt_ctx);
  84. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  85. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  86. return ret;
  87. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  88. return ret;
  89. return outlink->srcpad->config_props(outlink);
  90. }
  91. static int insert_filter(BufferSourceContext *abuffer,
  92. AVFilterLink *link, AVFilterContext **filt_ctx,
  93. const char *filt_name)
  94. {
  95. int ret;
  96. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  97. return ret;
  98. link->src->outputs[0] = NULL;
  99. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  100. link->src->outputs[0] = link;
  101. return ret;
  102. }
  103. set_link_source(*filt_ctx, link);
  104. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  105. avfilter_free(*filt_ctx);
  106. return ret;
  107. }
  108. return 0;
  109. }
  110. static void remove_filter(AVFilterContext **filt_ctx)
  111. {
  112. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  113. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  114. (*filt_ctx)->outputs[0] = NULL;
  115. avfilter_free(*filt_ctx);
  116. *filt_ctx = NULL;
  117. set_link_source(src, outlink);
  118. }
  119. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  120. {
  121. char old_layout_str[16], new_layout_str[16];
  122. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  123. -1, link->channel_layout);
  124. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  125. -1, ref->audio->channel_layout);
  126. av_log(ctx, AV_LOG_INFO,
  127. "Audio input format changed: "
  128. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  129. av_get_sample_fmt_name(link->format),
  130. old_layout_str, (int)link->sample_rate,
  131. av_get_sample_fmt_name(ref->format),
  132. new_layout_str, ref->audio->sample_rate);
  133. }
  134. static int check_format_change_video(AVFilterContext *buffer_filter,
  135. AVFilterBufferRef *picref)
  136. {
  137. BufferSourceContext *c = buffer_filter->priv;
  138. int ret;
  139. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  140. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  141. AVFilterLink *link;
  142. char scale_param[1024];
  143. av_log(buffer_filter, AV_LOG_INFO,
  144. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  145. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  146. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  147. if (!scale || strcmp(scale->filter->name, "scale")) {
  148. AVFilter *f = avfilter_get_by_name("scale");
  149. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  150. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  151. return ret;
  152. c->scale = scale;
  153. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  154. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  155. return ret;
  156. }
  157. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  158. return ret;
  159. }
  160. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  161. scale->outputs[0]->format= c->pix_fmt;
  162. } else if (!strcmp(scale->filter->name, "scale")) {
  163. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  164. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  165. scale->filter->init(scale, scale_param, NULL);
  166. }
  167. c->pix_fmt = scale->inputs[0]->format = picref->format;
  168. c->w = scale->inputs[0]->w = picref->video->w;
  169. c->h = scale->inputs[0]->h = picref->video->h;
  170. link = scale->outputs[0];
  171. if ((ret = link->srcpad->config_props(link)) < 0)
  172. return ret;
  173. }
  174. return 0;
  175. }
  176. static int check_format_change_audio(AVFilterContext *ctx,
  177. AVFilterBufferRef *samplesref)
  178. {
  179. BufferSourceContext *abuffer = ctx->priv;
  180. AVFilterLink *link;
  181. int ret, logged = 0;
  182. link = ctx->outputs[0];
  183. if (samplesref->audio->sample_rate != link->sample_rate) {
  184. log_input_change(ctx, link, samplesref);
  185. logged = 1;
  186. abuffer->sample_rate = samplesref->audio->sample_rate;
  187. if (!abuffer->aresample) {
  188. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  189. if (ret < 0) return ret;
  190. } else {
  191. link = abuffer->aresample->outputs[0];
  192. if (samplesref->audio->sample_rate == link->sample_rate)
  193. remove_filter(&abuffer->aresample);
  194. else
  195. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  196. return ret;
  197. }
  198. }
  199. link = ctx->outputs[0];
  200. if (samplesref->format != link->format ||
  201. samplesref->audio->channel_layout != link->channel_layout ||
  202. samplesref->audio->planar != link->planar) {
  203. if (!logged) log_input_change(ctx, link, samplesref);
  204. abuffer->sample_format = samplesref->format;
  205. abuffer->channel_layout = samplesref->audio->channel_layout;
  206. abuffer->packing_format = samplesref->audio->planar;
  207. if (!abuffer->aconvert) {
  208. ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
  209. if (ret < 0) return ret;
  210. } else {
  211. link = abuffer->aconvert->outputs[0];
  212. if (samplesref->format == link->format &&
  213. samplesref->audio->channel_layout == link->channel_layout &&
  214. samplesref->audio->planar == link->planar
  215. )
  216. remove_filter(&abuffer->aconvert);
  217. else
  218. if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
  219. return ret;
  220. }
  221. }
  222. return 0;
  223. }
  224. static int check_format_change(AVFilterContext *buffer_filter,
  225. AVFilterBufferRef *picref)
  226. {
  227. switch (buffer_filter->outputs[0]->type) {
  228. case AVMEDIA_TYPE_VIDEO:
  229. return check_format_change_video(buffer_filter, picref);
  230. case AVMEDIA_TYPE_AUDIO:
  231. return check_format_change_audio(buffer_filter, picref);
  232. default:
  233. return AVERROR(ENOSYS);
  234. }
  235. }
  236. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  237. AVFilterBufferRef *ref)
  238. {
  239. AVFilterLink *outlink = ctx->outputs[0];
  240. AVFilterBufferRef *buf;
  241. int channels, data_size, i;
  242. switch (outlink->type) {
  243. case AVMEDIA_TYPE_VIDEO:
  244. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  245. ref->video->w, ref->video->h);
  246. av_image_copy(buf->data, buf->linesize,
  247. (void*)ref->data, ref->linesize,
  248. ref->format, ref->video->w, ref->video->h);
  249. break;
  250. case AVMEDIA_TYPE_AUDIO:
  251. buf = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE,
  252. ref->audio->nb_samples);
  253. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  254. data_size = av_samples_get_buffer_size(NULL, channels,
  255. ref->audio->nb_samples,
  256. ref->format, 1);
  257. for (i = 0; i < FF_ARRAY_ELEMS(ref->buf->data) && ref->buf->data[i]; i++)
  258. memcpy(buf->buf->data[i], ref->buf->data[i], data_size);
  259. break;
  260. default:
  261. return NULL;
  262. }
  263. avfilter_copy_buffer_ref_props(buf, ref);
  264. return buf;
  265. }
  266. int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
  267. AVFilterBufferRef *picref, int flags)
  268. {
  269. BufferSourceContext *c = buffer_filter->priv;
  270. AVFilterBufferRef *buf;
  271. int ret;
  272. if (!picref) {
  273. c->eof = 1;
  274. return 0;
  275. } else if (c->eof)
  276. return AVERROR(EINVAL);
  277. if (!av_fifo_space(c->fifo) &&
  278. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  279. sizeof(buf))) < 0)
  280. return ret;
  281. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  282. ret = check_format_change(buffer_filter, picref);
  283. if (ret < 0)
  284. return ret;
  285. }
  286. if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
  287. buf = picref;
  288. else
  289. buf = copy_buffer_ref(buffer_filter, picref);
  290. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  291. if (buf != picref)
  292. avfilter_unref_buffer(buf);
  293. return ret;
  294. }
  295. c->nb_failed_requests = 0;
  296. return 0;
  297. }
  298. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  299. AVFilterBufferRef *picref, int flags)
  300. {
  301. return av_buffersrc_add_ref(buffer_filter, picref, 0);
  302. }
  303. #if CONFIG_AVCODEC
  304. #include "avcodec.h"
  305. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  306. const AVFrame *frame, int flags)
  307. {
  308. AVFilterBufferRef *picref;
  309. int ret;
  310. if (!frame) /* NULL for EOF */
  311. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  312. switch (buffer_src->outputs[0]->type) {
  313. case AVMEDIA_TYPE_VIDEO:
  314. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  315. break;
  316. case AVMEDIA_TYPE_AUDIO:
  317. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  318. break;
  319. default:
  320. return AVERROR(ENOSYS);
  321. }
  322. if (!picref)
  323. return AVERROR(ENOMEM);
  324. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  325. picref->buf->data[0] = NULL;
  326. avfilter_unref_buffer(picref);
  327. return ret;
  328. }
  329. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  330. const AVFrame *frame, int flags)
  331. {
  332. return av_buffersrc_add_frame(buffer_src, frame, 0);
  333. }
  334. #endif
  335. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  336. {
  337. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  338. }
  339. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  340. {
  341. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  342. }
  343. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  344. {
  345. BufferSourceContext *c = ctx->priv;
  346. char pix_fmt_str[128];
  347. int ret, n = 0;
  348. *c->sws_param = 0;
  349. if (!args ||
  350. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  351. &c->time_base.num, &c->time_base.den,
  352. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  353. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  354. return AVERROR(EINVAL);
  355. }
  356. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  357. return ret;
  358. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  359. return AVERROR(ENOMEM);
  360. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  361. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  362. c->time_base.num, c->time_base.den,
  363. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  364. return 0;
  365. }
  366. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  367. {
  368. BufferSourceContext *abuffer = ctx->priv;
  369. char *arg = NULL, *ptr, chlayout_str[16];
  370. char *args = av_strdup(args0);
  371. int ret;
  372. arg = av_strtok(args, ":", &ptr);
  373. #define ADD_FORMAT(fmt_name) \
  374. if (!arg) \
  375. goto arg_fail; \
  376. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  377. av_freep(&args); \
  378. return ret; \
  379. } \
  380. if (*args) \
  381. arg = av_strtok(NULL, ":", &ptr)
  382. ADD_FORMAT(sample_rate);
  383. ADD_FORMAT(sample_format);
  384. ADD_FORMAT(channel_layout);
  385. ADD_FORMAT(packing_format);
  386. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  387. if (!abuffer->fifo) {
  388. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  389. return AVERROR(ENOMEM);
  390. }
  391. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  392. -1, abuffer->channel_layout);
  393. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  394. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  395. abuffer->sample_rate);
  396. av_freep(&args);
  397. return 0;
  398. arg_fail:
  399. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  400. "sample_rate:sample_fmt:channel_layout:packing\n");
  401. av_freep(&args);
  402. return AVERROR(EINVAL);
  403. }
  404. static av_cold void uninit(AVFilterContext *ctx)
  405. {
  406. BufferSourceContext *s = ctx->priv;
  407. while (s->fifo && av_fifo_size(s->fifo)) {
  408. AVFilterBufferRef *buf;
  409. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  410. avfilter_unref_buffer(buf);
  411. }
  412. av_fifo_free(s->fifo);
  413. s->fifo = NULL;
  414. avfilter_free(s->scale);
  415. s->scale = NULL;
  416. }
  417. static int query_formats_video(AVFilterContext *ctx)
  418. {
  419. BufferSourceContext *c = ctx->priv;
  420. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  421. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  422. return 0;
  423. }
  424. static int query_formats_audio(AVFilterContext *ctx)
  425. {
  426. BufferSourceContext *abuffer = ctx->priv;
  427. AVFilterFormats *formats;
  428. formats = NULL;
  429. avfilter_add_format(&formats, abuffer->sample_format);
  430. avfilter_set_common_sample_formats(ctx, formats);
  431. formats = NULL;
  432. avfilter_add_format(&formats, abuffer->channel_layout);
  433. avfilter_set_common_channel_layouts(ctx, formats);
  434. formats = NULL;
  435. avfilter_add_format(&formats, abuffer->packing_format);
  436. avfilter_set_common_packing_formats(ctx, formats);
  437. return 0;
  438. }
  439. static int config_output_video(AVFilterLink *link)
  440. {
  441. BufferSourceContext *c = link->src->priv;
  442. link->w = c->w;
  443. link->h = c->h;
  444. link->sample_aspect_ratio = c->sample_aspect_ratio;
  445. link->time_base = c->time_base;
  446. return 0;
  447. }
  448. static int config_output_audio(AVFilterLink *outlink)
  449. {
  450. BufferSourceContext *abuffer = outlink->src->priv;
  451. outlink->sample_rate = abuffer->sample_rate;
  452. return 0;
  453. }
  454. static int request_frame(AVFilterLink *link)
  455. {
  456. BufferSourceContext *c = link->src->priv;
  457. AVFilterBufferRef *buf;
  458. if (!av_fifo_size(c->fifo)) {
  459. if (c->eof)
  460. return AVERROR_EOF;
  461. c->nb_failed_requests++;
  462. return AVERROR(EAGAIN);
  463. }
  464. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  465. switch (link->type) {
  466. case AVMEDIA_TYPE_VIDEO:
  467. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  468. avfilter_draw_slice(link, 0, link->h, 1);
  469. avfilter_end_frame(link);
  470. avfilter_unref_buffer(buf);
  471. break;
  472. case AVMEDIA_TYPE_AUDIO:
  473. avfilter_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  474. avfilter_unref_buffer(buf);
  475. break;
  476. default:
  477. return AVERROR(ENOSYS);
  478. }
  479. return 0;
  480. }
  481. static int poll_frame(AVFilterLink *link)
  482. {
  483. BufferSourceContext *c = link->src->priv;
  484. int size = av_fifo_size(c->fifo);
  485. if (!size && c->eof)
  486. return AVERROR_EOF;
  487. return size/sizeof(AVFilterBufferRef*);
  488. }
  489. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  490. AVFilterBufferRef *samplesref,
  491. int av_unused flags)
  492. {
  493. return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
  494. }
  495. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  496. uint8_t *data[8], int linesize[8],
  497. int nb_samples, int sample_rate,
  498. int sample_fmt, int64_t channel_layout, int planar,
  499. int64_t pts, int av_unused flags)
  500. {
  501. AVFilterBufferRef *samplesref;
  502. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  503. data, linesize, AV_PERM_WRITE,
  504. nb_samples,
  505. sample_fmt, channel_layout, planar);
  506. if (!samplesref)
  507. return AVERROR(ENOMEM);
  508. samplesref->buf->free = buf_free;
  509. samplesref->pts = pts;
  510. samplesref->audio->sample_rate = sample_rate;
  511. AV_NOWARN_DEPRECATED(
  512. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  513. )
  514. }
  515. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  516. uint8_t *buf, int buf_size, int sample_rate,
  517. int sample_fmt, int64_t channel_layout, int planar,
  518. int64_t pts, int av_unused flags)
  519. {
  520. uint8_t *data[8] = {0};
  521. int linesize[8];
  522. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  523. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  524. av_samples_fill_arrays(data, linesize,
  525. buf, nb_channels, nb_samples,
  526. sample_fmt, 16);
  527. AV_NOWARN_DEPRECATED(
  528. return av_asrc_buffer_add_samples(ctx,
  529. data, linesize, nb_samples,
  530. sample_rate,
  531. sample_fmt, channel_layout, planar,
  532. pts, flags);
  533. )
  534. }
  535. AVFilter avfilter_vsrc_buffer = {
  536. .name = "buffer",
  537. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  538. .priv_size = sizeof(BufferSourceContext),
  539. .query_formats = query_formats_video,
  540. .init = init_video,
  541. .uninit = uninit,
  542. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  543. .outputs = (const AVFilterPad[]) {{ .name = "default",
  544. .type = AVMEDIA_TYPE_VIDEO,
  545. .request_frame = request_frame,
  546. .poll_frame = poll_frame,
  547. .config_props = config_output_video, },
  548. { .name = NULL}},
  549. };
  550. #if CONFIG_ABUFFER_FILTER
  551. AVFilter avfilter_asrc_abuffer = {
  552. .name = "abuffer",
  553. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  554. .priv_size = sizeof(BufferSourceContext),
  555. .query_formats = query_formats_audio,
  556. .init = init_audio,
  557. .uninit = uninit,
  558. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  559. .outputs = (const AVFilterPad[]) {{ .name = "default",
  560. .type = AVMEDIA_TYPE_AUDIO,
  561. .request_frame = request_frame,
  562. .poll_frame = poll_frame,
  563. .config_props = config_output_audio, },
  564. { .name = NULL}},
  565. };
  566. #endif