You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

483 lines
15KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "avcodec.h"
  31. #include "libavutil/audioconvert.h"
  32. #include "libavutil/fifo.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/samplefmt.h"
  36. typedef struct {
  37. const AVClass *class;
  38. AVFifoBuffer *fifo;
  39. AVRational time_base; ///< time_base to set in the output link
  40. AVRational frame_rate; ///< frame_rate to set in the output link
  41. unsigned nb_failed_requests;
  42. unsigned warning_limit;
  43. /* video only */
  44. int w, h;
  45. enum PixelFormat pix_fmt;
  46. AVRational pixel_aspect;
  47. char *sws_param;
  48. /* audio only */
  49. int sample_rate;
  50. enum AVSampleFormat sample_fmt;
  51. char *sample_fmt_str;
  52. uint64_t channel_layout;
  53. char *channel_layout_str;
  54. int eof;
  55. } BufferSourceContext;
  56. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  57. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  58. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  59. return AVERROR(EINVAL);\
  60. }
  61. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  62. if (c->sample_fmt != format || c->sample_rate != srate ||\
  63. c->channel_layout != ch_layout) {\
  64. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  65. return AVERROR(EINVAL);\
  66. }
  67. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  68. AVFilterBufferRef *ref)
  69. {
  70. AVFilterLink *outlink = ctx->outputs[0];
  71. AVFilterBufferRef *buf;
  72. int channels;
  73. switch (outlink->type) {
  74. case AVMEDIA_TYPE_VIDEO:
  75. buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
  76. ref->video->w, ref->video->h);
  77. if(!buf)
  78. return NULL;
  79. av_image_copy(buf->data, buf->linesize,
  80. (void*)ref->data, ref->linesize,
  81. ref->format, ref->video->w, ref->video->h);
  82. break;
  83. case AVMEDIA_TYPE_AUDIO:
  84. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  85. ref->audio->nb_samples);
  86. if(!buf)
  87. return NULL;
  88. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  89. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  90. 0, 0, ref->audio->nb_samples,
  91. channels,
  92. ref->format);
  93. break;
  94. default:
  95. return NULL;
  96. }
  97. avfilter_copy_buffer_ref_props(buf, ref);
  98. return buf;
  99. }
  100. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  101. const AVFrame *frame, int flags)
  102. {
  103. AVFilterBufferRef *picref;
  104. int ret;
  105. if (!frame) /* NULL for EOF */
  106. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  107. switch (buffer_src->outputs[0]->type) {
  108. case AVMEDIA_TYPE_VIDEO:
  109. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  110. break;
  111. case AVMEDIA_TYPE_AUDIO:
  112. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  113. break;
  114. default:
  115. return AVERROR(ENOSYS);
  116. }
  117. if (!picref)
  118. return AVERROR(ENOMEM);
  119. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  120. picref->buf->data[0] = NULL;
  121. avfilter_unref_buffer(picref);
  122. return ret;
  123. }
  124. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  125. {
  126. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  127. }
  128. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  129. {
  130. BufferSourceContext *c = s->priv;
  131. AVFilterBufferRef *to_free = NULL;
  132. int ret;
  133. if (!buf) {
  134. c->eof = 1;
  135. return 0;
  136. } else if (c->eof)
  137. return AVERROR(EINVAL);
  138. if (!av_fifo_space(c->fifo) &&
  139. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  140. sizeof(buf))) < 0)
  141. return ret;
  142. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  143. switch (s->outputs[0]->type) {
  144. case AVMEDIA_TYPE_VIDEO:
  145. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  146. break;
  147. case AVMEDIA_TYPE_AUDIO:
  148. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  149. buf->format);
  150. break;
  151. default:
  152. return AVERROR(EINVAL);
  153. }
  154. }
  155. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  156. to_free = buf = copy_buffer_ref(s, buf);
  157. if(!buf)
  158. return -1;
  159. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  160. avfilter_unref_buffer(to_free);
  161. return ret;
  162. }
  163. c->nb_failed_requests = 0;
  164. if (c->warning_limit &&
  165. av_fifo_size(c->fifo) / sizeof(buf) >= c->warning_limit) {
  166. av_log(s, AV_LOG_WARNING,
  167. "%d buffers queued in %s, something may be wrong.\n",
  168. c->warning_limit,
  169. (char *)av_x_if_null(s->name, s->filter->name));
  170. c->warning_limit *= 10;
  171. }
  172. return 0;
  173. }
  174. #ifdef FF_API_BUFFERSRC_BUFFER
  175. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  176. {
  177. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  178. }
  179. #endif
  180. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  181. {
  182. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  183. }
  184. #define OFFSET(x) offsetof(BufferSourceContext, x)
  185. #define V AV_OPT_FLAG_VIDEO_PARAM
  186. static const AVOption buffer_options[] = {
  187. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  188. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  189. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
  190. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = V },
  191. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  192. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
  193. { NULL },
  194. };
  195. #undef V
  196. AVFILTER_DEFINE_CLASS(buffer);
  197. static av_cold int init_video(AVFilterContext *ctx, const char *args)
  198. {
  199. BufferSourceContext *c = ctx->priv;
  200. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  201. int ret, n = 0;
  202. c->class = &buffer_class;
  203. if (!args) {
  204. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  205. return AVERROR(EINVAL);
  206. }
  207. colon = strchr(args, ':');
  208. equal = strchr(args, '=');
  209. if (equal && (!colon || equal < colon)) {
  210. av_opt_set_defaults(c);
  211. ret = av_set_options_string(c, args, "=", ":");
  212. if (ret < 0) {
  213. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s\n", args);
  214. goto fail;
  215. }
  216. } else {
  217. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  218. &c->time_base.num, &c->time_base.den,
  219. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  220. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  221. ret = AVERROR(EINVAL);
  222. goto fail;
  223. }
  224. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n");
  225. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  226. goto fail;
  227. c->sws_param = av_strdup(sws_param);
  228. if (!c->sws_param) {
  229. ret = AVERROR(ENOMEM);
  230. goto fail;
  231. }
  232. }
  233. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  234. ret = AVERROR(ENOMEM);
  235. goto fail;
  236. }
  237. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  238. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  239. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  240. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  241. c->warning_limit = 100;
  242. return 0;
  243. fail:
  244. av_opt_free(c);
  245. return ret;
  246. }
  247. #define A AV_OPT_FLAG_AUDIO_PARAM
  248. static const AVOption abuffer_options[] = {
  249. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  250. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  251. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  252. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  253. { NULL },
  254. };
  255. AVFILTER_DEFINE_CLASS(abuffer);
  256. static av_cold int init_audio(AVFilterContext *ctx, const char *args)
  257. {
  258. BufferSourceContext *s = ctx->priv;
  259. int ret = 0;
  260. s->class = &abuffer_class;
  261. av_opt_set_defaults(s);
  262. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  263. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
  264. goto fail;
  265. }
  266. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  267. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  268. av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
  269. s->sample_fmt_str);
  270. ret = AVERROR(EINVAL);
  271. goto fail;
  272. }
  273. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  274. if (!s->channel_layout) {
  275. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
  276. s->channel_layout_str);
  277. ret = AVERROR(EINVAL);
  278. goto fail;
  279. }
  280. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  281. ret = AVERROR(ENOMEM);
  282. goto fail;
  283. }
  284. if (!s->time_base.num)
  285. s->time_base = (AVRational){1, s->sample_rate};
  286. av_log(ctx, AV_LOG_INFO,
  287. "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
  288. s->time_base.num, s->time_base.den, s->sample_fmt_str,
  289. s->sample_rate, s->channel_layout_str);
  290. s->warning_limit = 100;
  291. fail:
  292. av_opt_free(s);
  293. return ret;
  294. }
  295. static av_cold void uninit(AVFilterContext *ctx)
  296. {
  297. BufferSourceContext *s = ctx->priv;
  298. while (s->fifo && av_fifo_size(s->fifo)) {
  299. AVFilterBufferRef *buf;
  300. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  301. avfilter_unref_buffer(buf);
  302. }
  303. av_fifo_free(s->fifo);
  304. s->fifo = NULL;
  305. av_freep(&s->sws_param);
  306. }
  307. static int query_formats(AVFilterContext *ctx)
  308. {
  309. BufferSourceContext *c = ctx->priv;
  310. AVFilterChannelLayouts *channel_layouts = NULL;
  311. AVFilterFormats *formats = NULL;
  312. AVFilterFormats *samplerates = NULL;
  313. switch (ctx->outputs[0]->type) {
  314. case AVMEDIA_TYPE_VIDEO:
  315. ff_add_format(&formats, c->pix_fmt);
  316. ff_set_common_formats(ctx, formats);
  317. break;
  318. case AVMEDIA_TYPE_AUDIO:
  319. ff_add_format(&formats, c->sample_fmt);
  320. ff_set_common_formats(ctx, formats);
  321. ff_add_format(&samplerates, c->sample_rate);
  322. ff_set_common_samplerates(ctx, samplerates);
  323. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  324. ff_set_common_channel_layouts(ctx, channel_layouts);
  325. break;
  326. default:
  327. return AVERROR(EINVAL);
  328. }
  329. return 0;
  330. }
  331. static int config_props(AVFilterLink *link)
  332. {
  333. BufferSourceContext *c = link->src->priv;
  334. switch (link->type) {
  335. case AVMEDIA_TYPE_VIDEO:
  336. link->w = c->w;
  337. link->h = c->h;
  338. link->sample_aspect_ratio = c->pixel_aspect;
  339. break;
  340. case AVMEDIA_TYPE_AUDIO:
  341. link->channel_layout = c->channel_layout;
  342. link->sample_rate = c->sample_rate;
  343. break;
  344. default:
  345. return AVERROR(EINVAL);
  346. }
  347. link->time_base = c->time_base;
  348. link->frame_rate = c->frame_rate;
  349. return 0;
  350. }
  351. static int request_frame(AVFilterLink *link)
  352. {
  353. BufferSourceContext *c = link->src->priv;
  354. AVFilterBufferRef *buf;
  355. int ret = 0;
  356. if (!av_fifo_size(c->fifo)) {
  357. if (c->eof)
  358. return AVERROR_EOF;
  359. c->nb_failed_requests++;
  360. return AVERROR(EAGAIN);
  361. }
  362. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  363. switch (link->type) {
  364. case AVMEDIA_TYPE_VIDEO:
  365. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  366. ff_draw_slice(link, 0, link->h, 1);
  367. ff_end_frame(link);
  368. break;
  369. case AVMEDIA_TYPE_AUDIO:
  370. ret = ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  371. break;
  372. default:
  373. return AVERROR(EINVAL);
  374. }
  375. avfilter_unref_buffer(buf);
  376. return ret;
  377. }
  378. static int poll_frame(AVFilterLink *link)
  379. {
  380. BufferSourceContext *c = link->src->priv;
  381. int size = av_fifo_size(c->fifo);
  382. if (!size && c->eof)
  383. return AVERROR_EOF;
  384. return size/sizeof(AVFilterBufferRef*);
  385. }
  386. AVFilter avfilter_vsrc_buffer = {
  387. .name = "buffer",
  388. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  389. .priv_size = sizeof(BufferSourceContext),
  390. .query_formats = query_formats,
  391. .init = init_video,
  392. .uninit = uninit,
  393. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  394. .outputs = (AVFilterPad[]) {{ .name = "default",
  395. .type = AVMEDIA_TYPE_VIDEO,
  396. .request_frame = request_frame,
  397. .poll_frame = poll_frame,
  398. .config_props = config_props, },
  399. { .name = NULL}},
  400. };
  401. AVFilter avfilter_asrc_abuffer = {
  402. .name = "abuffer",
  403. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  404. .priv_size = sizeof(BufferSourceContext),
  405. .query_formats = query_formats,
  406. .init = init_audio,
  407. .uninit = uninit,
  408. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  409. .outputs = (AVFilterPad[]) {{ .name = "default",
  410. .type = AVMEDIA_TYPE_AUDIO,
  411. .request_frame = request_frame,
  412. .poll_frame = poll_frame,
  413. .config_props = config_props, },
  414. { .name = NULL}},
  415. };