You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

500 lines
16KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "vsrc_buffer.h"
  31. #include "avcodec.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/fifo.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/samplefmt.h"
  37. typedef struct {
  38. const AVClass *class;
  39. AVFifoBuffer *fifo;
  40. AVRational time_base; ///< time_base to set in the output link
  41. AVRational frame_rate; ///< frame_rate to set in the output link
  42. unsigned nb_failed_requests;
  43. /* video only */
  44. int w, h;
  45. enum PixelFormat pix_fmt;
  46. AVRational pixel_aspect;
  47. char *sws_param;
  48. /* audio only */
  49. int sample_rate;
  50. enum AVSampleFormat sample_fmt;
  51. char *sample_fmt_str;
  52. uint64_t channel_layout;
  53. char *channel_layout_str;
  54. int eof;
  55. } BufferSourceContext;
  56. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  57. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  58. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  59. return AVERROR(EINVAL);\
  60. }
  61. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  62. if (c->sample_fmt != format || c->sample_rate != srate ||\
  63. c->channel_layout != ch_layout) {\
  64. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  65. return AVERROR(EINVAL);\
  66. }
  67. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  68. AVFilterBufferRef *ref)
  69. {
  70. AVFilterLink *outlink = ctx->outputs[0];
  71. AVFilterBufferRef *buf;
  72. int channels;
  73. switch (outlink->type) {
  74. case AVMEDIA_TYPE_VIDEO:
  75. buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
  76. ref->video->w, ref->video->h);
  77. if(!buf)
  78. return NULL;
  79. av_image_copy(buf->data, buf->linesize,
  80. (void*)ref->data, ref->linesize,
  81. ref->format, ref->video->w, ref->video->h);
  82. break;
  83. case AVMEDIA_TYPE_AUDIO:
  84. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  85. ref->audio->nb_samples);
  86. if(!buf)
  87. return NULL;
  88. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  89. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  90. 0, 0, ref->audio->nb_samples,
  91. channels,
  92. ref->format);
  93. break;
  94. default:
  95. return NULL;
  96. }
  97. avfilter_copy_buffer_ref_props(buf, ref);
  98. return buf;
  99. }
  100. #if FF_API_VSRC_BUFFER_ADD_FRAME
  101. static int av_vsrc_buffer_add_frame_alt(AVFilterContext *buffer_filter, AVFrame *frame,
  102. int64_t pts, AVRational pixel_aspect)
  103. {
  104. int64_t orig_pts = frame->pts;
  105. AVRational orig_sar = frame->sample_aspect_ratio;
  106. int ret;
  107. frame->pts = pts;
  108. frame->sample_aspect_ratio = pixel_aspect;
  109. if ((ret = av_buffersrc_write_frame(buffer_filter, frame)) < 0)
  110. return ret;
  111. frame->pts = orig_pts;
  112. frame->sample_aspect_ratio = orig_sar;
  113. return 0;
  114. }
  115. #endif
  116. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  117. const AVFrame *frame, int flags)
  118. {
  119. AVFilterBufferRef *picref;
  120. int ret;
  121. if (!frame) /* NULL for EOF */
  122. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  123. switch (buffer_src->outputs[0]->type) {
  124. case AVMEDIA_TYPE_VIDEO:
  125. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  126. break;
  127. case AVMEDIA_TYPE_AUDIO:
  128. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  129. break;
  130. default:
  131. return AVERROR(ENOSYS);
  132. }
  133. if (!picref)
  134. return AVERROR(ENOMEM);
  135. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  136. picref->buf->data[0] = NULL;
  137. avfilter_unref_buffer(picref);
  138. return ret;
  139. }
  140. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  141. {
  142. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  143. }
  144. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  145. {
  146. BufferSourceContext *c = s->priv;
  147. AVFilterBufferRef *to_free = NULL;
  148. int ret;
  149. if (!buf) {
  150. c->eof = 1;
  151. return 0;
  152. } else if (c->eof)
  153. return AVERROR(EINVAL);
  154. if (!av_fifo_space(c->fifo) &&
  155. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  156. sizeof(buf))) < 0)
  157. return ret;
  158. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  159. switch (s->outputs[0]->type) {
  160. case AVMEDIA_TYPE_VIDEO:
  161. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  162. break;
  163. case AVMEDIA_TYPE_AUDIO:
  164. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  165. buf->format);
  166. break;
  167. default:
  168. return AVERROR(EINVAL);
  169. }
  170. }
  171. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  172. to_free = buf = copy_buffer_ref(s, buf);
  173. if(!buf)
  174. return -1;
  175. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  176. avfilter_unref_buffer(to_free);
  177. return ret;
  178. }
  179. c->nb_failed_requests = 0;
  180. return 0;
  181. }
  182. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  183. {
  184. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  185. }
  186. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  187. {
  188. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  189. }
  190. #define OFFSET(x) offsetof(BufferSourceContext, x)
  191. #define V AV_OPT_FLAG_VIDEO_PARAM
  192. static const AVOption video_options[] = {
  193. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  194. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  195. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
  196. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = V },
  197. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  198. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
  199. { NULL },
  200. };
  201. #undef V
  202. static const AVClass vbuffer_class = {
  203. .class_name = "vbuffer source",
  204. .item_name = av_default_item_name,
  205. .option = video_options,
  206. .version = LIBAVUTIL_VERSION_INT,
  207. .category = AV_CLASS_CATEGORY_FILTER,
  208. };
  209. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  210. {
  211. BufferSourceContext *c = ctx->priv;
  212. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  213. int ret, n = 0;
  214. c->class = &vbuffer_class;
  215. if (!args) {
  216. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  217. return AVERROR(EINVAL);
  218. }
  219. colon = strchr(args, ':');
  220. equal = strchr(args, '=');
  221. if (equal && (!colon || equal < colon)) {
  222. av_opt_set_defaults(c);
  223. ret = av_set_options_string(c, args, "=", ":");
  224. if (ret < 0) {
  225. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
  226. goto fail;
  227. }
  228. } else {
  229. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  230. &c->time_base.num, &c->time_base.den,
  231. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  232. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  233. ret = AVERROR(EINVAL);
  234. goto fail;
  235. }
  236. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs.\n");
  237. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  238. goto fail;
  239. c->sws_param = av_strdup(sws_param);
  240. if (!c->sws_param) {
  241. ret = AVERROR(ENOMEM);
  242. goto fail;
  243. }
  244. }
  245. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  246. ret = AVERROR(ENOMEM);
  247. goto fail;
  248. }
  249. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  250. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  251. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  252. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  253. return 0;
  254. fail:
  255. av_opt_free(c);
  256. return ret;
  257. }
  258. #define A AV_OPT_FLAG_AUDIO_PARAM
  259. static const AVOption audio_options[] = {
  260. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  261. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  262. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  263. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  264. { NULL },
  265. };
  266. static const AVClass abuffer_class = {
  267. .class_name = "abuffer source",
  268. .item_name = av_default_item_name,
  269. .option = audio_options,
  270. .version = LIBAVUTIL_VERSION_INT,
  271. .category = AV_CLASS_CATEGORY_FILTER,
  272. };
  273. static av_cold int init_audio(AVFilterContext *ctx, const char *args, void *opaque)
  274. {
  275. BufferSourceContext *s = ctx->priv;
  276. int ret = 0;
  277. s->class = &abuffer_class;
  278. av_opt_set_defaults(s);
  279. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  280. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
  281. goto fail;
  282. }
  283. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  284. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  285. av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
  286. s->sample_fmt_str);
  287. ret = AVERROR(EINVAL);
  288. goto fail;
  289. }
  290. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  291. if (!s->channel_layout) {
  292. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
  293. s->channel_layout_str);
  294. ret = AVERROR(EINVAL);
  295. goto fail;
  296. }
  297. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  298. ret = AVERROR(ENOMEM);
  299. goto fail;
  300. }
  301. if (!s->time_base.num)
  302. s->time_base = (AVRational){1, s->sample_rate};
  303. av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
  304. "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
  305. s->sample_rate, s->channel_layout_str);
  306. fail:
  307. av_opt_free(s);
  308. return ret;
  309. }
  310. static av_cold void uninit(AVFilterContext *ctx)
  311. {
  312. BufferSourceContext *s = ctx->priv;
  313. while (s->fifo && av_fifo_size(s->fifo)) {
  314. AVFilterBufferRef *buf;
  315. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  316. avfilter_unref_buffer(buf);
  317. }
  318. av_fifo_free(s->fifo);
  319. s->fifo = NULL;
  320. av_freep(&s->sws_param);
  321. }
  322. static int query_formats(AVFilterContext *ctx)
  323. {
  324. BufferSourceContext *c = ctx->priv;
  325. AVFilterChannelLayouts *channel_layouts = NULL;
  326. AVFilterFormats *formats = NULL;
  327. AVFilterFormats *samplerates = NULL;
  328. switch (ctx->outputs[0]->type) {
  329. case AVMEDIA_TYPE_VIDEO:
  330. ff_add_format(&formats, c->pix_fmt);
  331. ff_set_common_formats(ctx, formats);
  332. break;
  333. case AVMEDIA_TYPE_AUDIO:
  334. ff_add_format(&formats, c->sample_fmt);
  335. ff_set_common_formats(ctx, formats);
  336. ff_add_format(&samplerates, c->sample_rate);
  337. ff_set_common_samplerates(ctx, samplerates);
  338. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  339. ff_set_common_channel_layouts(ctx, channel_layouts);
  340. break;
  341. default:
  342. return AVERROR(EINVAL);
  343. }
  344. return 0;
  345. }
  346. static int config_props(AVFilterLink *link)
  347. {
  348. BufferSourceContext *c = link->src->priv;
  349. switch (link->type) {
  350. case AVMEDIA_TYPE_VIDEO:
  351. link->w = c->w;
  352. link->h = c->h;
  353. link->sample_aspect_ratio = c->pixel_aspect;
  354. break;
  355. case AVMEDIA_TYPE_AUDIO:
  356. link->channel_layout = c->channel_layout;
  357. link->sample_rate = c->sample_rate;
  358. break;
  359. default:
  360. return AVERROR(EINVAL);
  361. }
  362. link->time_base = c->time_base;
  363. link->frame_rate = c->frame_rate;
  364. return 0;
  365. }
  366. static int request_frame(AVFilterLink *link)
  367. {
  368. BufferSourceContext *c = link->src->priv;
  369. AVFilterBufferRef *buf;
  370. if (!av_fifo_size(c->fifo)) {
  371. if (c->eof)
  372. return AVERROR_EOF;
  373. c->nb_failed_requests++;
  374. return AVERROR(EAGAIN);
  375. }
  376. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  377. switch (link->type) {
  378. case AVMEDIA_TYPE_VIDEO:
  379. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  380. ff_draw_slice(link, 0, link->h, 1);
  381. ff_end_frame(link);
  382. break;
  383. case AVMEDIA_TYPE_AUDIO:
  384. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  385. break;
  386. default:
  387. return AVERROR(EINVAL);
  388. }
  389. avfilter_unref_buffer(buf);
  390. return 0;
  391. }
  392. static int poll_frame(AVFilterLink *link)
  393. {
  394. BufferSourceContext *c = link->src->priv;
  395. int size = av_fifo_size(c->fifo);
  396. if (!size && c->eof)
  397. return AVERROR_EOF;
  398. return size/sizeof(AVFilterBufferRef*);
  399. }
  400. AVFilter avfilter_vsrc_buffer = {
  401. .name = "buffer",
  402. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  403. .priv_size = sizeof(BufferSourceContext),
  404. .query_formats = query_formats,
  405. .init = init_video,
  406. .uninit = uninit,
  407. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  408. .outputs = (AVFilterPad[]) {{ .name = "default",
  409. .type = AVMEDIA_TYPE_VIDEO,
  410. .request_frame = request_frame,
  411. .poll_frame = poll_frame,
  412. .config_props = config_props, },
  413. { .name = NULL}},
  414. };
  415. AVFilter avfilter_asrc_abuffer = {
  416. .name = "abuffer",
  417. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  418. .priv_size = sizeof(BufferSourceContext),
  419. .query_formats = query_formats,
  420. .init = init_audio,
  421. .uninit = uninit,
  422. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  423. .outputs = (AVFilterPad[]) {{ .name = "default",
  424. .type = AVMEDIA_TYPE_AUDIO,
  425. .request_frame = request_frame,
  426. .poll_frame = poll_frame,
  427. .config_props = config_props, },
  428. { .name = NULL}},
  429. };