You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

405 lines
12KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "vsrc_buffer.h"
  31. #include "libavutil/audioconvert.h"
  32. #include "libavutil/fifo.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/samplefmt.h"
  36. typedef struct {
  37. const AVClass *class;
  38. AVFifoBuffer *fifo;
  39. AVRational time_base; ///< time_base to set in the output link
  40. /* video only */
  41. int h, w;
  42. enum PixelFormat pix_fmt;
  43. AVRational pixel_aspect;
  44. /* audio only */
  45. int sample_rate;
  46. enum AVSampleFormat sample_fmt;
  47. char *sample_fmt_str;
  48. uint64_t channel_layout;
  49. char *channel_layout_str;
  50. int eof;
  51. } BufferSourceContext;
  52. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  53. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  54. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  55. return AVERROR(EINVAL);\
  56. }
  57. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  58. if (c->sample_fmt != format || c->sample_rate != srate ||\
  59. c->channel_layout != ch_layout) {\
  60. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  61. return AVERROR(EINVAL);\
  62. }
  63. #if FF_API_VSRC_BUFFER_ADD_FRAME
  64. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
  65. int64_t pts, AVRational pixel_aspect)
  66. {
  67. int64_t orig_pts = frame->pts;
  68. AVRational orig_sar = frame->sample_aspect_ratio;
  69. int ret;
  70. frame->pts = pts;
  71. frame->sample_aspect_ratio = pixel_aspect;
  72. if ((ret = av_buffersrc_write_frame(buffer_filter, frame)) < 0)
  73. return ret;
  74. frame->pts = orig_pts;
  75. frame->sample_aspect_ratio = orig_sar;
  76. return 0;
  77. }
  78. #endif
  79. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  80. {
  81. BufferSourceContext *c = buffer_filter->priv;
  82. AVFilterBufferRef *buf;
  83. int ret;
  84. if (!frame) {
  85. c->eof = 1;
  86. return 0;
  87. } else if (c->eof)
  88. return AVERROR(EINVAL);
  89. if (!av_fifo_space(c->fifo) &&
  90. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  91. sizeof(buf))) < 0)
  92. return ret;
  93. switch (buffer_filter->outputs[0]->type) {
  94. case AVMEDIA_TYPE_VIDEO:
  95. CHECK_VIDEO_PARAM_CHANGE(buffer_filter, c, frame->width, frame->height,
  96. frame->format);
  97. buf = ff_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
  98. c->w, c->h);
  99. av_image_copy(buf->data, buf->linesize, frame->data, frame->linesize,
  100. c->pix_fmt, c->w, c->h);
  101. break;
  102. case AVMEDIA_TYPE_AUDIO:
  103. CHECK_AUDIO_PARAM_CHANGE(buffer_filter, c, frame->sample_rate, frame->channel_layout,
  104. frame->format);
  105. buf = ff_get_audio_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
  106. frame->nb_samples);
  107. av_samples_copy(buf->extended_data, frame->extended_data,
  108. 0, 0, frame->nb_samples,
  109. av_get_channel_layout_nb_channels(frame->channel_layout),
  110. frame->format);
  111. break;
  112. default:
  113. return AVERROR(EINVAL);
  114. }
  115. avfilter_copy_frame_props(buf, frame);
  116. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  117. avfilter_unref_buffer(buf);
  118. return ret;
  119. }
  120. return 0;
  121. }
  122. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  123. {
  124. BufferSourceContext *c = s->priv;
  125. int ret;
  126. if (!buf) {
  127. c->eof = 1;
  128. return 0;
  129. } else if (c->eof)
  130. return AVERROR(EINVAL);
  131. if (!av_fifo_space(c->fifo) &&
  132. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  133. sizeof(buf))) < 0)
  134. return ret;
  135. switch (s->outputs[0]->type) {
  136. case AVMEDIA_TYPE_VIDEO:
  137. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  138. break;
  139. case AVMEDIA_TYPE_AUDIO:
  140. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  141. buf->format);
  142. break;
  143. default:
  144. return AVERROR(EINVAL);
  145. }
  146. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0)
  147. return ret;
  148. return 0;
  149. }
  150. static av_cold int init_video(AVFilterContext *ctx, const char *args)
  151. {
  152. BufferSourceContext *c = ctx->priv;
  153. char pix_fmt_str[128];
  154. int n = 0;
  155. if (!args ||
  156. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
  157. &c->time_base.num, &c->time_base.den,
  158. &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
  159. av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args);
  160. return AVERROR(EINVAL);
  161. }
  162. if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) {
  163. char *tail;
  164. c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
  165. if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) {
  166. av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
  167. return AVERROR(EINVAL);
  168. }
  169. }
  170. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  171. return AVERROR(ENOMEM);
  172. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
  173. return 0;
  174. }
  175. #define OFFSET(x) offsetof(BufferSourceContext, x)
  176. #define A AV_OPT_FLAG_AUDIO_PARAM
  177. static const AVOption audio_options[] = {
  178. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  179. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  180. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  181. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  182. { NULL },
  183. };
  184. static const AVClass abuffer_class = {
  185. .class_name = "abuffer source",
  186. .item_name = av_default_item_name,
  187. .option = audio_options,
  188. .version = LIBAVUTIL_VERSION_INT,
  189. };
  190. static av_cold int init_audio(AVFilterContext *ctx, const char *args)
  191. {
  192. BufferSourceContext *s = ctx->priv;
  193. int ret = 0;
  194. s->class = &abuffer_class;
  195. av_opt_set_defaults(s);
  196. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  197. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
  198. goto fail;
  199. }
  200. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  201. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  202. av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
  203. s->sample_fmt_str);
  204. ret = AVERROR(EINVAL);
  205. goto fail;
  206. }
  207. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  208. if (!s->channel_layout) {
  209. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
  210. s->channel_layout_str);
  211. ret = AVERROR(EINVAL);
  212. goto fail;
  213. }
  214. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  215. ret = AVERROR(ENOMEM);
  216. goto fail;
  217. }
  218. if (!s->time_base.num)
  219. s->time_base = (AVRational){1, s->sample_rate};
  220. av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
  221. "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
  222. s->sample_rate, s->channel_layout_str);
  223. fail:
  224. av_opt_free(s);
  225. return ret;
  226. }
  227. static av_cold void uninit(AVFilterContext *ctx)
  228. {
  229. BufferSourceContext *s = ctx->priv;
  230. while (s->fifo && av_fifo_size(s->fifo)) {
  231. AVFilterBufferRef *buf;
  232. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  233. avfilter_unref_buffer(buf);
  234. }
  235. av_fifo_free(s->fifo);
  236. s->fifo = NULL;
  237. }
  238. static int query_formats(AVFilterContext *ctx)
  239. {
  240. BufferSourceContext *c = ctx->priv;
  241. AVFilterChannelLayouts *channel_layouts = NULL;
  242. AVFilterFormats *formats = NULL;
  243. AVFilterFormats *samplerates = NULL;
  244. switch (ctx->outputs[0]->type) {
  245. case AVMEDIA_TYPE_VIDEO:
  246. ff_add_format(&formats, c->pix_fmt);
  247. ff_set_common_formats(ctx, formats);
  248. break;
  249. case AVMEDIA_TYPE_AUDIO:
  250. ff_add_format(&formats, c->sample_fmt);
  251. ff_set_common_formats(ctx, formats);
  252. ff_add_format(&samplerates, c->sample_rate);
  253. ff_set_common_samplerates(ctx, samplerates);
  254. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  255. ff_set_common_channel_layouts(ctx, channel_layouts);
  256. break;
  257. default:
  258. return AVERROR(EINVAL);
  259. }
  260. return 0;
  261. }
  262. static int config_props(AVFilterLink *link)
  263. {
  264. BufferSourceContext *c = link->src->priv;
  265. switch (link->type) {
  266. case AVMEDIA_TYPE_VIDEO:
  267. link->w = c->w;
  268. link->h = c->h;
  269. link->sample_aspect_ratio = c->pixel_aspect;
  270. break;
  271. case AVMEDIA_TYPE_AUDIO:
  272. link->channel_layout = c->channel_layout;
  273. link->sample_rate = c->sample_rate;
  274. break;
  275. default:
  276. return AVERROR(EINVAL);
  277. }
  278. link->time_base = c->time_base;
  279. return 0;
  280. }
  281. static int request_frame(AVFilterLink *link)
  282. {
  283. BufferSourceContext *c = link->src->priv;
  284. AVFilterBufferRef *buf;
  285. if (!av_fifo_size(c->fifo)) {
  286. if (c->eof)
  287. return AVERROR_EOF;
  288. return AVERROR(EAGAIN);
  289. }
  290. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  291. switch (link->type) {
  292. case AVMEDIA_TYPE_VIDEO:
  293. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  294. ff_draw_slice(link, 0, link->h, 1);
  295. ff_end_frame(link);
  296. break;
  297. case AVMEDIA_TYPE_AUDIO:
  298. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  299. break;
  300. default:
  301. return AVERROR(EINVAL);
  302. }
  303. avfilter_unref_buffer(buf);
  304. return 0;
  305. }
  306. static int poll_frame(AVFilterLink *link)
  307. {
  308. BufferSourceContext *c = link->src->priv;
  309. int size = av_fifo_size(c->fifo);
  310. if (!size && c->eof)
  311. return AVERROR_EOF;
  312. return size/sizeof(AVFilterBufferRef*);
  313. }
  314. AVFilter avfilter_vsrc_buffer = {
  315. .name = "buffer",
  316. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  317. .priv_size = sizeof(BufferSourceContext),
  318. .query_formats = query_formats,
  319. .init = init_video,
  320. .uninit = uninit,
  321. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  322. .outputs = (AVFilterPad[]) {{ .name = "default",
  323. .type = AVMEDIA_TYPE_VIDEO,
  324. .request_frame = request_frame,
  325. .poll_frame = poll_frame,
  326. .config_props = config_props, },
  327. { .name = NULL}},
  328. };
  329. AVFilter avfilter_asrc_abuffer = {
  330. .name = "abuffer",
  331. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  332. .priv_size = sizeof(BufferSourceContext),
  333. .query_formats = query_formats,
  334. .init = init_audio,
  335. .uninit = uninit,
  336. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  337. .outputs = (AVFilterPad[]) {{ .name = "default",
  338. .type = AVMEDIA_TYPE_AUDIO,
  339. .request_frame = request_frame,
  340. .poll_frame = poll_frame,
  341. .config_props = config_props, },
  342. { .name = NULL}},
  343. };