You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

626 lines
20KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
  4. * Copyright (c) 2011 Mina Nagy Zaki
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * memory buffer source filter
  25. */
  26. #include "avfilter.h"
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "buffersrc.h"
  30. #include "vsrc_buffer.h"
  31. #include "asrc_abuffer.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/fifo.h"
  35. #include "libavutil/imgutils.h"
  36. typedef struct {
  37. AVFifoBuffer *fifo;
  38. AVRational time_base; ///< time_base to set in the output link
  39. int eof;
  40. unsigned nb_failed_requests;
  41. /* Video only */
  42. AVFilterContext *scale;
  43. int h, w;
  44. enum PixelFormat pix_fmt;
  45. AVRational sample_aspect_ratio;
  46. char sws_param[256];
  47. /* Audio only */
  48. // Audio format of incoming buffers
  49. int sample_rate;
  50. unsigned int sample_format;
  51. int64_t channel_layout;
  52. int packing_format;
  53. // Normalization filters
  54. AVFilterContext *aconvert;
  55. AVFilterContext *aresample;
  56. } BufferSourceContext;
  57. #define FIFO_SIZE 8
  58. #define CHECK_PARAM_CHANGE(s, c, width, height, format)\
  59. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
  64. AVFilterBufferRef *picref, int flags)
  65. {
  66. BufferSourceContext *c = buffer_filter->priv;
  67. AVFilterLink *outlink = buffer_filter->outputs[0];
  68. AVFilterBufferRef *buf;
  69. int ret;
  70. if (!picref) {
  71. c->eof = 1;
  72. return 0;
  73. } else if (c->eof)
  74. return AVERROR(EINVAL);
  75. if (!av_fifo_space(c->fifo) &&
  76. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  77. sizeof(buf))) < 0)
  78. return ret;
  79. if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
  80. AVFilterContext *scale = buffer_filter->outputs[0]->dst;
  81. AVFilterLink *link;
  82. char scale_param[1024];
  83. av_log(buffer_filter, AV_LOG_INFO,
  84. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  85. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  86. picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
  87. if (!scale || strcmp(scale->filter->name, "scale")) {
  88. AVFilter *f = avfilter_get_by_name("scale");
  89. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  90. if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
  91. return ret;
  92. c->scale = scale;
  93. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
  94. if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
  95. return ret;
  96. }
  97. if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
  98. return ret;
  99. }
  100. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  101. scale->outputs[0]->format= c->pix_fmt;
  102. } else if (!strcmp(scale->filter->name, "scale")) {
  103. snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
  104. scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
  105. scale->filter->init(scale, scale_param, NULL);
  106. }
  107. c->pix_fmt = scale->inputs[0]->format = picref->format;
  108. c->w = scale->inputs[0]->w = picref->video->w;
  109. c->h = scale->inputs[0]->h = picref->video->h;
  110. link = scale->outputs[0];
  111. if ((ret = link->srcpad->config_props(link)) < 0)
  112. return ret;
  113. }
  114. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  115. picref->video->w, picref->video->h);
  116. av_image_copy(buf->data, buf->linesize,
  117. (void*)picref->data, picref->linesize,
  118. picref->format, picref->video->w, picref->video->h);
  119. avfilter_copy_buffer_ref_props(buf, picref);
  120. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  121. avfilter_unref_buffer(buf);
  122. return ret;
  123. }
  124. c->nb_failed_requests = 0;
  125. return 0;
  126. }
  127. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  128. {
  129. BufferSourceContext *c = s->priv;
  130. int ret;
  131. if (!buf) {
  132. c->eof = 1;
  133. return 0;
  134. } else if (c->eof)
  135. return AVERROR(EINVAL);
  136. if (!av_fifo_space(c->fifo) &&
  137. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  138. sizeof(buf))) < 0)
  139. return ret;
  140. // CHECK_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  141. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0)
  142. return ret;
  143. c->nb_failed_requests = 0;
  144. return 0;
  145. }
  146. #if CONFIG_AVCODEC
  147. #include "avcodec.h"
  148. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
  149. const AVFrame *frame, int flags)
  150. {
  151. BufferSourceContext *c = buffer_src->priv;
  152. AVFilterBufferRef *picref;
  153. int ret;
  154. if (!frame) {
  155. c->eof = 1;
  156. return 0;
  157. } else if (c->eof)
  158. return AVERROR(EINVAL);
  159. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  160. if (!picref)
  161. return AVERROR(ENOMEM);
  162. ret = av_vsrc_buffer_add_video_buffer_ref(buffer_src, picref, flags);
  163. picref->buf->data[0] = NULL;
  164. avfilter_unref_buffer(picref);
  165. return ret;
  166. }
  167. #endif
  168. unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
  169. {
  170. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  171. }
  172. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  173. {
  174. BufferSourceContext *c = ctx->priv;
  175. char pix_fmt_str[128];
  176. int ret, n = 0;
  177. *c->sws_param = 0;
  178. if (!args ||
  179. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  180. &c->time_base.num, &c->time_base.den,
  181. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
  182. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  183. return AVERROR(EINVAL);
  184. }
  185. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  186. return ret;
  187. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  188. return AVERROR(ENOMEM);
  189. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  190. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  191. c->time_base.num, c->time_base.den,
  192. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
  193. return 0;
  194. }
  195. static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
  196. {
  197. BufferSourceContext *abuffer = ctx->priv;
  198. char *arg = NULL, *ptr, chlayout_str[16];
  199. char *args = av_strdup(args0);
  200. int ret;
  201. arg = av_strtok(args, ":", &ptr);
  202. #define ADD_FORMAT(fmt_name) \
  203. if (!arg) \
  204. goto arg_fail; \
  205. if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
  206. av_freep(&args); \
  207. return ret; \
  208. } \
  209. if (*args) \
  210. arg = av_strtok(NULL, ":", &ptr)
  211. ADD_FORMAT(sample_rate);
  212. ADD_FORMAT(sample_format);
  213. ADD_FORMAT(channel_layout);
  214. ADD_FORMAT(packing_format);
  215. abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
  216. if (!abuffer->fifo) {
  217. av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
  218. return AVERROR(ENOMEM);
  219. }
  220. av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
  221. -1, abuffer->channel_layout);
  222. av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
  223. av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
  224. abuffer->sample_rate);
  225. av_freep(&args);
  226. return 0;
  227. arg_fail:
  228. av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
  229. "sample_rate:sample_fmt:channel_layout:packing\n");
  230. av_freep(&args);
  231. return AVERROR(EINVAL);
  232. }
  233. static av_cold void uninit(AVFilterContext *ctx)
  234. {
  235. BufferSourceContext *s = ctx->priv;
  236. while (s->fifo && av_fifo_size(s->fifo)) {
  237. AVFilterBufferRef *buf;
  238. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  239. avfilter_unref_buffer(buf);
  240. }
  241. av_fifo_free(s->fifo);
  242. s->fifo = NULL;
  243. avfilter_free(s->scale);
  244. s->scale = NULL;
  245. }
  246. static int query_formats_video(AVFilterContext *ctx)
  247. {
  248. BufferSourceContext *c = ctx->priv;
  249. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  250. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  251. return 0;
  252. }
  253. static int query_formats_audio(AVFilterContext *ctx)
  254. {
  255. BufferSourceContext *abuffer = ctx->priv;
  256. AVFilterFormats *formats;
  257. formats = NULL;
  258. avfilter_add_format(&formats, abuffer->sample_format);
  259. avfilter_set_common_sample_formats(ctx, formats);
  260. formats = NULL;
  261. avfilter_add_format(&formats, abuffer->channel_layout);
  262. avfilter_set_common_channel_layouts(ctx, formats);
  263. formats = NULL;
  264. avfilter_add_format(&formats, abuffer->packing_format);
  265. avfilter_set_common_packing_formats(ctx, formats);
  266. return 0;
  267. }
  268. static int config_output_video(AVFilterLink *link)
  269. {
  270. BufferSourceContext *c = link->src->priv;
  271. link->w = c->w;
  272. link->h = c->h;
  273. link->sample_aspect_ratio = c->sample_aspect_ratio;
  274. link->time_base = c->time_base;
  275. return 0;
  276. }
  277. static int config_output_audio(AVFilterLink *outlink)
  278. {
  279. BufferSourceContext *abuffer = outlink->src->priv;
  280. outlink->sample_rate = abuffer->sample_rate;
  281. return 0;
  282. }
  283. static int request_frame(AVFilterLink *link)
  284. {
  285. BufferSourceContext *c = link->src->priv;
  286. AVFilterBufferRef *buf;
  287. if (!av_fifo_size(c->fifo)) {
  288. if (c->eof)
  289. return AVERROR_EOF;
  290. c->nb_failed_requests++;
  291. return AVERROR(EAGAIN);
  292. }
  293. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  294. switch (link->type) {
  295. case AVMEDIA_TYPE_VIDEO:
  296. /* TODO reindent */
  297. avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
  298. avfilter_draw_slice(link, 0, link->h, 1);
  299. avfilter_end_frame(link);
  300. avfilter_unref_buffer(buf);
  301. break;
  302. case AVMEDIA_TYPE_AUDIO:
  303. avfilter_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  304. break;
  305. default:
  306. return AVERROR(ENOSYS);
  307. }
  308. return 0;
  309. }
  310. static int poll_frame(AVFilterLink *link)
  311. {
  312. BufferSourceContext *c = link->src->priv;
  313. int size = av_fifo_size(c->fifo);
  314. if (!size && c->eof)
  315. return AVERROR_EOF;
  316. return size/sizeof(AVFilterBufferRef*);
  317. }
  318. static void buf_free(AVFilterBuffer *ptr)
  319. {
  320. av_free(ptr);
  321. return;
  322. }
  323. static void set_link_source(AVFilterContext *src, AVFilterLink *link)
  324. {
  325. link->src = src;
  326. link->srcpad = &(src->output_pads[0]);
  327. src->outputs[0] = link;
  328. }
  329. static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
  330. {
  331. int ret;
  332. AVFilterLink * const inlink = filt_ctx->inputs[0];
  333. AVFilterLink * const outlink = filt_ctx->outputs[0];
  334. inlink->format = abuffer->sample_format;
  335. inlink->channel_layout = abuffer->channel_layout;
  336. inlink->planar = abuffer->packing_format;
  337. inlink->sample_rate = abuffer->sample_rate;
  338. filt_ctx->filter->uninit(filt_ctx);
  339. memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
  340. if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
  341. return ret;
  342. if ((ret = inlink->srcpad->config_props(inlink)) < 0)
  343. return ret;
  344. return outlink->srcpad->config_props(outlink);
  345. }
  346. static int insert_filter(BufferSourceContext *abuffer,
  347. AVFilterLink *link, AVFilterContext **filt_ctx,
  348. const char *filt_name)
  349. {
  350. int ret;
  351. if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
  352. return ret;
  353. link->src->outputs[0] = NULL;
  354. if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
  355. link->src->outputs[0] = link;
  356. return ret;
  357. }
  358. set_link_source(*filt_ctx, link);
  359. if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
  360. avfilter_free(*filt_ctx);
  361. return ret;
  362. }
  363. return 0;
  364. }
  365. static void remove_filter(AVFilterContext **filt_ctx)
  366. {
  367. AVFilterLink *outlink = (*filt_ctx)->outputs[0];
  368. AVFilterContext *src = (*filt_ctx)->inputs[0]->src;
  369. (*filt_ctx)->outputs[0] = NULL;
  370. avfilter_free(*filt_ctx);
  371. *filt_ctx = NULL;
  372. set_link_source(src, outlink);
  373. }
  374. static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
  375. {
  376. char old_layout_str[16], new_layout_str[16];
  377. av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
  378. -1, link->channel_layout);
  379. av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
  380. -1, ref->audio->channel_layout);
  381. av_log(ctx, AV_LOG_INFO,
  382. "Audio input format changed: "
  383. "%s:%s:%d -> %s:%s:%d, normalizing\n",
  384. av_get_sample_fmt_name(link->format),
  385. old_layout_str, (int)link->sample_rate,
  386. av_get_sample_fmt_name(ref->format),
  387. new_layout_str, ref->audio->sample_rate);
  388. }
  389. int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
  390. AVFilterBufferRef *samplesref,
  391. int av_unused flags)
  392. {
  393. BufferSourceContext *abuffer = ctx->priv;
  394. AVFilterLink *link;
  395. int ret, logged = 0;
  396. if (av_fifo_space(abuffer->fifo) < sizeof(samplesref)) {
  397. av_log(ctx, AV_LOG_ERROR,
  398. "Buffering limit reached. Please consume some available frames "
  399. "before adding new ones.\n");
  400. return AVERROR(EINVAL);
  401. }
  402. // Normalize input
  403. link = ctx->outputs[0];
  404. if (samplesref->audio->sample_rate != link->sample_rate) {
  405. log_input_change(ctx, link, samplesref);
  406. logged = 1;
  407. abuffer->sample_rate = samplesref->audio->sample_rate;
  408. if (!abuffer->aresample) {
  409. ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
  410. if (ret < 0) return ret;
  411. } else {
  412. link = abuffer->aresample->outputs[0];
  413. if (samplesref->audio->sample_rate == link->sample_rate)
  414. remove_filter(&abuffer->aresample);
  415. else
  416. if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
  417. return ret;
  418. }
  419. }
  420. link = ctx->outputs[0];
  421. if (samplesref->format != link->format ||
  422. samplesref->audio->channel_layout != link->channel_layout ||
  423. samplesref->audio->planar != link->planar) {
  424. if (!logged) log_input_change(ctx, link, samplesref);
  425. abuffer->sample_format = samplesref->format;
  426. abuffer->channel_layout = samplesref->audio->channel_layout;
  427. abuffer->packing_format = samplesref->audio->planar;
  428. if (!abuffer->aconvert) {
  429. ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
  430. if (ret < 0) return ret;
  431. } else {
  432. link = abuffer->aconvert->outputs[0];
  433. if (samplesref->format == link->format &&
  434. samplesref->audio->channel_layout == link->channel_layout &&
  435. samplesref->audio->planar == link->planar
  436. )
  437. remove_filter(&abuffer->aconvert);
  438. else
  439. if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
  440. return ret;
  441. }
  442. }
  443. if (sizeof(samplesref) != av_fifo_generic_write(abuffer->fifo, &samplesref,
  444. sizeof(samplesref), NULL)) {
  445. av_log(ctx, AV_LOG_ERROR, "Error while writing to FIFO\n");
  446. return AVERROR(EINVAL);
  447. }
  448. return 0;
  449. }
  450. int av_asrc_buffer_add_samples(AVFilterContext *ctx,
  451. uint8_t *data[8], int linesize[8],
  452. int nb_samples, int sample_rate,
  453. int sample_fmt, int64_t channel_layout, int planar,
  454. int64_t pts, int av_unused flags)
  455. {
  456. AVFilterBufferRef *samplesref;
  457. samplesref = avfilter_get_audio_buffer_ref_from_arrays(
  458. data, linesize, AV_PERM_WRITE,
  459. nb_samples,
  460. sample_fmt, channel_layout, planar);
  461. if (!samplesref)
  462. return AVERROR(ENOMEM);
  463. samplesref->buf->free = buf_free;
  464. samplesref->pts = pts;
  465. samplesref->audio->sample_rate = sample_rate;
  466. return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
  467. }
  468. int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
  469. uint8_t *buf, int buf_size, int sample_rate,
  470. int sample_fmt, int64_t channel_layout, int planar,
  471. int64_t pts, int av_unused flags)
  472. {
  473. uint8_t *data[8] = {0};
  474. int linesize[8];
  475. int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
  476. nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
  477. av_samples_fill_arrays(data, linesize,
  478. buf, nb_channels, nb_samples,
  479. sample_fmt, 16);
  480. return av_asrc_buffer_add_samples(ctx,
  481. data, linesize, nb_samples,
  482. sample_rate,
  483. sample_fmt, channel_layout, planar,
  484. pts, flags);
  485. }
  486. AVFilter avfilter_vsrc_buffer = {
  487. .name = "buffer",
  488. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  489. .priv_size = sizeof(BufferSourceContext),
  490. .query_formats = query_formats_video,
  491. .init = init_video,
  492. .uninit = uninit,
  493. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  494. .outputs = (const AVFilterPad[]) {{ .name = "default",
  495. .type = AVMEDIA_TYPE_VIDEO,
  496. .request_frame = request_frame,
  497. .poll_frame = poll_frame,
  498. .config_props = config_output_video, },
  499. { .name = NULL}},
  500. };
  501. #ifdef CONFIG_ABUFFER_FILTER
  502. AVFilter avfilter_asrc_abuffer = {
  503. .name = "abuffer",
  504. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  505. .priv_size = sizeof(BufferSourceContext),
  506. .query_formats = query_formats_audio,
  507. .init = init_audio,
  508. .uninit = uninit,
  509. .inputs = (const AVFilterPad[]) {{ .name = NULL }},
  510. .outputs = (const AVFilterPad[]) {{ .name = "default",
  511. .type = AVMEDIA_TYPE_AUDIO,
  512. .request_frame = request_frame,
  513. .poll_frame = poll_frame,
  514. .config_props = config_output_audio, },
  515. { .name = NULL}},
  516. };
  517. #endif