You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

469 lines
15KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "avcodec.h"
  31. #include "libavutil/audioconvert.h"
  32. #include "libavutil/fifo.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/samplefmt.h"
  36. typedef struct {
  37. const AVClass *class;
  38. AVFifoBuffer *fifo;
  39. AVRational time_base; ///< time_base to set in the output link
  40. AVRational frame_rate; ///< frame_rate to set in the output link
  41. unsigned nb_failed_requests;
  42. /* video only */
  43. int w, h;
  44. enum PixelFormat pix_fmt;
  45. AVRational pixel_aspect;
  46. char *sws_param;
  47. /* audio only */
  48. int sample_rate;
  49. enum AVSampleFormat sample_fmt;
  50. char *sample_fmt_str;
  51. uint64_t channel_layout;
  52. char *channel_layout_str;
  53. int eof;
  54. } BufferSourceContext;
  55. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  56. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  57. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  58. return AVERROR(EINVAL);\
  59. }
  60. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  61. if (c->sample_fmt != format || c->sample_rate != srate ||\
  62. c->channel_layout != ch_layout) {\
  63. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  64. return AVERROR(EINVAL);\
  65. }
  66. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  67. AVFilterBufferRef *ref)
  68. {
  69. AVFilterLink *outlink = ctx->outputs[0];
  70. AVFilterBufferRef *buf;
  71. int channels;
  72. switch (outlink->type) {
  73. case AVMEDIA_TYPE_VIDEO:
  74. buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
  75. ref->video->w, ref->video->h);
  76. if(!buf)
  77. return NULL;
  78. av_image_copy(buf->data, buf->linesize,
  79. (void*)ref->data, ref->linesize,
  80. ref->format, ref->video->w, ref->video->h);
  81. break;
  82. case AVMEDIA_TYPE_AUDIO:
  83. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  84. ref->audio->nb_samples);
  85. if(!buf)
  86. return NULL;
  87. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  88. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  89. 0, 0, ref->audio->nb_samples,
  90. channels,
  91. ref->format);
  92. break;
  93. default:
  94. return NULL;
  95. }
  96. avfilter_copy_buffer_ref_props(buf, ref);
  97. return buf;
  98. }
  99. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  100. const AVFrame *frame, int flags)
  101. {
  102. AVFilterBufferRef *picref;
  103. int ret;
  104. if (!frame) /* NULL for EOF */
  105. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  106. switch (buffer_src->outputs[0]->type) {
  107. case AVMEDIA_TYPE_VIDEO:
  108. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  109. break;
  110. case AVMEDIA_TYPE_AUDIO:
  111. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  112. break;
  113. default:
  114. return AVERROR(ENOSYS);
  115. }
  116. if (!picref)
  117. return AVERROR(ENOMEM);
  118. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  119. picref->buf->data[0] = NULL;
  120. avfilter_unref_buffer(picref);
  121. return ret;
  122. }
  123. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  124. {
  125. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  126. }
  127. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  128. {
  129. BufferSourceContext *c = s->priv;
  130. AVFilterBufferRef *to_free = NULL;
  131. int ret;
  132. if (!buf) {
  133. c->eof = 1;
  134. return 0;
  135. } else if (c->eof)
  136. return AVERROR(EINVAL);
  137. if (!av_fifo_space(c->fifo) &&
  138. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  139. sizeof(buf))) < 0)
  140. return ret;
  141. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  142. switch (s->outputs[0]->type) {
  143. case AVMEDIA_TYPE_VIDEO:
  144. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  145. break;
  146. case AVMEDIA_TYPE_AUDIO:
  147. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  148. buf->format);
  149. break;
  150. default:
  151. return AVERROR(EINVAL);
  152. }
  153. }
  154. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  155. to_free = buf = copy_buffer_ref(s, buf);
  156. if(!buf)
  157. return -1;
  158. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  159. avfilter_unref_buffer(to_free);
  160. return ret;
  161. }
  162. c->nb_failed_requests = 0;
  163. return 0;
  164. }
  165. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  166. {
  167. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  168. }
  169. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  170. {
  171. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  172. }
  173. #define OFFSET(x) offsetof(BufferSourceContext, x)
  174. #define V AV_OPT_FLAG_VIDEO_PARAM
  175. static const AVOption buffer_options[] = {
  176. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  177. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  178. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
  179. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = V },
  180. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  181. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
  182. { NULL },
  183. };
  184. #undef V
  185. AVFILTER_DEFINE_CLASS(buffer);
  186. static av_cold int init_video(AVFilterContext *ctx, const char *args)
  187. {
  188. BufferSourceContext *c = ctx->priv;
  189. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  190. int ret, n = 0;
  191. c->class = &buffer_class;
  192. if (!args) {
  193. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  194. return AVERROR(EINVAL);
  195. }
  196. colon = strchr(args, ':');
  197. equal = strchr(args, '=');
  198. if (equal && (!colon || equal < colon)) {
  199. av_opt_set_defaults(c);
  200. ret = av_set_options_string(c, args, "=", ":");
  201. if (ret < 0) {
  202. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s\n", args);
  203. goto fail;
  204. }
  205. } else {
  206. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  207. &c->time_base.num, &c->time_base.den,
  208. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  209. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  210. ret = AVERROR(EINVAL);
  211. goto fail;
  212. }
  213. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n");
  214. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  215. goto fail;
  216. c->sws_param = av_strdup(sws_param);
  217. if (!c->sws_param) {
  218. ret = AVERROR(ENOMEM);
  219. goto fail;
  220. }
  221. }
  222. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  223. ret = AVERROR(ENOMEM);
  224. goto fail;
  225. }
  226. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  227. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  228. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  229. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  230. return 0;
  231. fail:
  232. av_opt_free(c);
  233. return ret;
  234. }
  235. #define A AV_OPT_FLAG_AUDIO_PARAM
  236. static const AVOption abuffer_options[] = {
  237. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  238. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  239. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  240. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  241. { NULL },
  242. };
  243. AVFILTER_DEFINE_CLASS(abuffer);
  244. static av_cold int init_audio(AVFilterContext *ctx, const char *args)
  245. {
  246. BufferSourceContext *s = ctx->priv;
  247. int ret = 0;
  248. s->class = &abuffer_class;
  249. av_opt_set_defaults(s);
  250. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  251. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
  252. goto fail;
  253. }
  254. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  255. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  256. av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
  257. s->sample_fmt_str);
  258. ret = AVERROR(EINVAL);
  259. goto fail;
  260. }
  261. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  262. if (!s->channel_layout) {
  263. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
  264. s->channel_layout_str);
  265. ret = AVERROR(EINVAL);
  266. goto fail;
  267. }
  268. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  269. ret = AVERROR(ENOMEM);
  270. goto fail;
  271. }
  272. if (!s->time_base.num)
  273. s->time_base = (AVRational){1, s->sample_rate};
  274. av_log(ctx, AV_LOG_INFO,
  275. "tb:%d/%d samplefmt:%s samplerate:%d chlayout:%s\n",
  276. s->time_base.num, s->time_base.den, s->sample_fmt_str,
  277. s->sample_rate, s->channel_layout_str);
  278. fail:
  279. av_opt_free(s);
  280. return ret;
  281. }
  282. static av_cold void uninit(AVFilterContext *ctx)
  283. {
  284. BufferSourceContext *s = ctx->priv;
  285. while (s->fifo && av_fifo_size(s->fifo)) {
  286. AVFilterBufferRef *buf;
  287. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  288. avfilter_unref_buffer(buf);
  289. }
  290. av_fifo_free(s->fifo);
  291. s->fifo = NULL;
  292. av_freep(&s->sws_param);
  293. }
  294. static int query_formats(AVFilterContext *ctx)
  295. {
  296. BufferSourceContext *c = ctx->priv;
  297. AVFilterChannelLayouts *channel_layouts = NULL;
  298. AVFilterFormats *formats = NULL;
  299. AVFilterFormats *samplerates = NULL;
  300. switch (ctx->outputs[0]->type) {
  301. case AVMEDIA_TYPE_VIDEO:
  302. ff_add_format(&formats, c->pix_fmt);
  303. ff_set_common_formats(ctx, formats);
  304. break;
  305. case AVMEDIA_TYPE_AUDIO:
  306. ff_add_format(&formats, c->sample_fmt);
  307. ff_set_common_formats(ctx, formats);
  308. ff_add_format(&samplerates, c->sample_rate);
  309. ff_set_common_samplerates(ctx, samplerates);
  310. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  311. ff_set_common_channel_layouts(ctx, channel_layouts);
  312. break;
  313. default:
  314. return AVERROR(EINVAL);
  315. }
  316. return 0;
  317. }
  318. static int config_props(AVFilterLink *link)
  319. {
  320. BufferSourceContext *c = link->src->priv;
  321. switch (link->type) {
  322. case AVMEDIA_TYPE_VIDEO:
  323. link->w = c->w;
  324. link->h = c->h;
  325. link->sample_aspect_ratio = c->pixel_aspect;
  326. break;
  327. case AVMEDIA_TYPE_AUDIO:
  328. link->channel_layout = c->channel_layout;
  329. link->sample_rate = c->sample_rate;
  330. break;
  331. default:
  332. return AVERROR(EINVAL);
  333. }
  334. link->time_base = c->time_base;
  335. link->frame_rate = c->frame_rate;
  336. return 0;
  337. }
  338. static int request_frame(AVFilterLink *link)
  339. {
  340. BufferSourceContext *c = link->src->priv;
  341. AVFilterBufferRef *buf;
  342. if (!av_fifo_size(c->fifo)) {
  343. if (c->eof)
  344. return AVERROR_EOF;
  345. c->nb_failed_requests++;
  346. return AVERROR(EAGAIN);
  347. }
  348. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  349. switch (link->type) {
  350. case AVMEDIA_TYPE_VIDEO:
  351. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  352. ff_draw_slice(link, 0, link->h, 1);
  353. ff_end_frame(link);
  354. break;
  355. case AVMEDIA_TYPE_AUDIO:
  356. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  357. break;
  358. default:
  359. return AVERROR(EINVAL);
  360. }
  361. avfilter_unref_buffer(buf);
  362. return 0;
  363. }
  364. static int poll_frame(AVFilterLink *link)
  365. {
  366. BufferSourceContext *c = link->src->priv;
  367. int size = av_fifo_size(c->fifo);
  368. if (!size && c->eof)
  369. return AVERROR_EOF;
  370. return size/sizeof(AVFilterBufferRef*);
  371. }
  372. AVFilter avfilter_vsrc_buffer = {
  373. .name = "buffer",
  374. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  375. .priv_size = sizeof(BufferSourceContext),
  376. .query_formats = query_formats,
  377. .init = init_video,
  378. .uninit = uninit,
  379. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  380. .outputs = (AVFilterPad[]) {{ .name = "default",
  381. .type = AVMEDIA_TYPE_VIDEO,
  382. .request_frame = request_frame,
  383. .poll_frame = poll_frame,
  384. .config_props = config_props, },
  385. { .name = NULL}},
  386. };
  387. AVFilter avfilter_asrc_abuffer = {
  388. .name = "abuffer",
  389. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  390. .priv_size = sizeof(BufferSourceContext),
  391. .query_formats = query_formats,
  392. .init = init_audio,
  393. .uninit = uninit,
  394. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  395. .outputs = (AVFilterPad[]) {{ .name = "default",
  396. .type = AVMEDIA_TYPE_AUDIO,
  397. .request_frame = request_frame,
  398. .poll_frame = poll_frame,
  399. .config_props = config_props, },
  400. { .name = NULL}},
  401. };