You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

469 lines
15KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "vsrc_buffer.h"
  31. #include "avcodec.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/fifo.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/samplefmt.h"
  37. typedef struct {
  38. const AVClass *class;
  39. AVFifoBuffer *fifo;
  40. AVRational time_base; ///< time_base to set in the output link
  41. AVRational frame_rate; ///< frame_rate to set in the output link
  42. unsigned nb_failed_requests;
  43. /* video only */
  44. int w, h;
  45. enum PixelFormat pix_fmt;
  46. AVRational pixel_aspect;
  47. char *sws_param;
  48. /* audio only */
  49. int sample_rate;
  50. enum AVSampleFormat sample_fmt;
  51. char *sample_fmt_str;
  52. uint64_t channel_layout;
  53. char *channel_layout_str;
  54. int eof;
  55. } BufferSourceContext;
  56. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  57. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  58. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  59. return AVERROR(EINVAL);\
  60. }
  61. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  62. if (c->sample_fmt != format || c->sample_rate != srate ||\
  63. c->channel_layout != ch_layout) {\
  64. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  65. return AVERROR(EINVAL);\
  66. }
  67. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  68. AVFilterBufferRef *ref)
  69. {
  70. AVFilterLink *outlink = ctx->outputs[0];
  71. AVFilterBufferRef *buf;
  72. int channels;
  73. switch (outlink->type) {
  74. case AVMEDIA_TYPE_VIDEO:
  75. buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
  76. ref->video->w, ref->video->h);
  77. if(!buf)
  78. return NULL;
  79. av_image_copy(buf->data, buf->linesize,
  80. (void*)ref->data, ref->linesize,
  81. ref->format, ref->video->w, ref->video->h);
  82. break;
  83. case AVMEDIA_TYPE_AUDIO:
  84. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  85. ref->audio->nb_samples);
  86. if(!buf)
  87. return NULL;
  88. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  89. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  90. 0, 0, ref->audio->nb_samples,
  91. channels,
  92. ref->format);
  93. break;
  94. default:
  95. return NULL;
  96. }
  97. avfilter_copy_buffer_ref_props(buf, ref);
  98. return buf;
  99. }
  100. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  101. const AVFrame *frame, int flags)
  102. {
  103. AVFilterBufferRef *picref;
  104. int ret;
  105. if (!frame) /* NULL for EOF */
  106. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  107. switch (buffer_src->outputs[0]->type) {
  108. case AVMEDIA_TYPE_VIDEO:
  109. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  110. break;
  111. case AVMEDIA_TYPE_AUDIO:
  112. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  113. break;
  114. default:
  115. return AVERROR(ENOSYS);
  116. }
  117. if (!picref)
  118. return AVERROR(ENOMEM);
  119. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  120. picref->buf->data[0] = NULL;
  121. avfilter_unref_buffer(picref);
  122. return ret;
  123. }
  124. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  125. {
  126. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  127. }
  128. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  129. {
  130. BufferSourceContext *c = s->priv;
  131. AVFilterBufferRef *to_free = NULL;
  132. int ret;
  133. if (!buf) {
  134. c->eof = 1;
  135. return 0;
  136. } else if (c->eof)
  137. return AVERROR(EINVAL);
  138. if (!av_fifo_space(c->fifo) &&
  139. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  140. sizeof(buf))) < 0)
  141. return ret;
  142. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  143. switch (s->outputs[0]->type) {
  144. case AVMEDIA_TYPE_VIDEO:
  145. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  146. break;
  147. case AVMEDIA_TYPE_AUDIO:
  148. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  149. buf->format);
  150. break;
  151. default:
  152. return AVERROR(EINVAL);
  153. }
  154. }
  155. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  156. to_free = buf = copy_buffer_ref(s, buf);
  157. if(!buf)
  158. return -1;
  159. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  160. avfilter_unref_buffer(to_free);
  161. return ret;
  162. }
  163. c->nb_failed_requests = 0;
  164. return 0;
  165. }
  166. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  167. {
  168. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  169. }
  170. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  171. {
  172. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  173. }
  174. #define OFFSET(x) offsetof(BufferSourceContext, x)
  175. #define V AV_OPT_FLAG_VIDEO_PARAM
  176. static const AVOption buffer_options[] = {
  177. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  178. { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  179. { "video_size", NULL, OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, .flags = V },
  180. { "pix_fmt", NULL, OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, .flags = V },
  181. { "pixel_aspect", NULL, OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, V },
  182. { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
  183. { NULL },
  184. };
  185. #undef V
  186. AVFILTER_DEFINE_CLASS(buffer);
  187. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  188. {
  189. BufferSourceContext *c = ctx->priv;
  190. char pix_fmt_str[128], sws_param[256] = "", *colon, *equal;
  191. int ret, n = 0;
  192. c->class = &buffer_class;
  193. if (!args) {
  194. av_log(ctx, AV_LOG_ERROR, "Arguments required\n");
  195. return AVERROR(EINVAL);
  196. }
  197. colon = strchr(args, ':');
  198. equal = strchr(args, '=');
  199. if (equal && (!colon || equal < colon)) {
  200. av_opt_set_defaults(c);
  201. ret = av_set_options_string(c, args, "=", ":");
  202. if (ret < 0) {
  203. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s\n", args);
  204. goto fail;
  205. }
  206. } else {
  207. if ((n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  208. &c->time_base.num, &c->time_base.den,
  209. &c->pixel_aspect.num, &c->pixel_aspect.den, sws_param)) < 7) {
  210. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  211. ret = AVERROR(EINVAL);
  212. goto fail;
  213. }
  214. av_log(ctx, AV_LOG_WARNING, "Flat options syntax is deprecated, use key=value pairs\n");
  215. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  216. goto fail;
  217. c->sws_param = av_strdup(sws_param);
  218. if (!c->sws_param) {
  219. ret = AVERROR(ENOMEM);
  220. goto fail;
  221. }
  222. }
  223. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  224. ret = AVERROR(ENOMEM);
  225. goto fail;
  226. }
  227. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
  228. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  229. c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
  230. c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
  231. return 0;
  232. fail:
  233. av_opt_free(c);
  234. return ret;
  235. }
  236. #define A AV_OPT_FLAG_AUDIO_PARAM
  237. static const AVOption abuffer_options[] = {
  238. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  239. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  240. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  241. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  242. { NULL },
  243. };
  244. AVFILTER_DEFINE_CLASS(abuffer);
  245. static av_cold int init_audio(AVFilterContext *ctx, const char *args, void *opaque)
  246. {
  247. BufferSourceContext *s = ctx->priv;
  248. int ret = 0;
  249. s->class = &abuffer_class;
  250. av_opt_set_defaults(s);
  251. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  252. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
  253. goto fail;
  254. }
  255. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  256. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  257. av_log(ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n",
  258. s->sample_fmt_str);
  259. ret = AVERROR(EINVAL);
  260. goto fail;
  261. }
  262. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  263. if (!s->channel_layout) {
  264. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n",
  265. s->channel_layout_str);
  266. ret = AVERROR(EINVAL);
  267. goto fail;
  268. }
  269. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  270. ret = AVERROR(ENOMEM);
  271. goto fail;
  272. }
  273. if (!s->time_base.num)
  274. s->time_base = (AVRational){1, s->sample_rate};
  275. av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
  276. "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
  277. s->sample_rate, s->channel_layout_str);
  278. fail:
  279. av_opt_free(s);
  280. return ret;
  281. }
  282. static av_cold void uninit(AVFilterContext *ctx)
  283. {
  284. BufferSourceContext *s = ctx->priv;
  285. while (s->fifo && av_fifo_size(s->fifo)) {
  286. AVFilterBufferRef *buf;
  287. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  288. avfilter_unref_buffer(buf);
  289. }
  290. av_fifo_free(s->fifo);
  291. s->fifo = NULL;
  292. av_freep(&s->sws_param);
  293. }
  294. static int query_formats(AVFilterContext *ctx)
  295. {
  296. BufferSourceContext *c = ctx->priv;
  297. AVFilterChannelLayouts *channel_layouts = NULL;
  298. AVFilterFormats *formats = NULL;
  299. AVFilterFormats *samplerates = NULL;
  300. switch (ctx->outputs[0]->type) {
  301. case AVMEDIA_TYPE_VIDEO:
  302. ff_add_format(&formats, c->pix_fmt);
  303. ff_set_common_formats(ctx, formats);
  304. break;
  305. case AVMEDIA_TYPE_AUDIO:
  306. ff_add_format(&formats, c->sample_fmt);
  307. ff_set_common_formats(ctx, formats);
  308. ff_add_format(&samplerates, c->sample_rate);
  309. ff_set_common_samplerates(ctx, samplerates);
  310. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  311. ff_set_common_channel_layouts(ctx, channel_layouts);
  312. break;
  313. default:
  314. return AVERROR(EINVAL);
  315. }
  316. return 0;
  317. }
  318. static int config_props(AVFilterLink *link)
  319. {
  320. BufferSourceContext *c = link->src->priv;
  321. switch (link->type) {
  322. case AVMEDIA_TYPE_VIDEO:
  323. link->w = c->w;
  324. link->h = c->h;
  325. link->sample_aspect_ratio = c->pixel_aspect;
  326. break;
  327. case AVMEDIA_TYPE_AUDIO:
  328. link->channel_layout = c->channel_layout;
  329. link->sample_rate = c->sample_rate;
  330. break;
  331. default:
  332. return AVERROR(EINVAL);
  333. }
  334. link->time_base = c->time_base;
  335. link->frame_rate = c->frame_rate;
  336. return 0;
  337. }
  338. static int request_frame(AVFilterLink *link)
  339. {
  340. BufferSourceContext *c = link->src->priv;
  341. AVFilterBufferRef *buf;
  342. if (!av_fifo_size(c->fifo)) {
  343. if (c->eof)
  344. return AVERROR_EOF;
  345. c->nb_failed_requests++;
  346. return AVERROR(EAGAIN);
  347. }
  348. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  349. switch (link->type) {
  350. case AVMEDIA_TYPE_VIDEO:
  351. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  352. ff_draw_slice(link, 0, link->h, 1);
  353. ff_end_frame(link);
  354. break;
  355. case AVMEDIA_TYPE_AUDIO:
  356. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  357. break;
  358. default:
  359. return AVERROR(EINVAL);
  360. }
  361. avfilter_unref_buffer(buf);
  362. return 0;
  363. }
  364. static int poll_frame(AVFilterLink *link)
  365. {
  366. BufferSourceContext *c = link->src->priv;
  367. int size = av_fifo_size(c->fifo);
  368. if (!size && c->eof)
  369. return AVERROR_EOF;
  370. return size/sizeof(AVFilterBufferRef*);
  371. }
  372. AVFilter avfilter_vsrc_buffer = {
  373. .name = "buffer",
  374. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  375. .priv_size = sizeof(BufferSourceContext),
  376. .query_formats = query_formats,
  377. .init = init_video,
  378. .uninit = uninit,
  379. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  380. .outputs = (AVFilterPad[]) {{ .name = "default",
  381. .type = AVMEDIA_TYPE_VIDEO,
  382. .request_frame = request_frame,
  383. .poll_frame = poll_frame,
  384. .config_props = config_props, },
  385. { .name = NULL}},
  386. };
  387. AVFilter avfilter_asrc_abuffer = {
  388. .name = "abuffer",
  389. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  390. .priv_size = sizeof(BufferSourceContext),
  391. .query_formats = query_formats,
  392. .init = init_audio,
  393. .uninit = uninit,
  394. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  395. .outputs = (AVFilterPad[]) {{ .name = "default",
  396. .type = AVMEDIA_TYPE_AUDIO,
  397. .request_frame = request_frame,
  398. .poll_frame = poll_frame,
  399. .config_props = config_props, },
  400. { .name = NULL}},
  401. };