You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

449 lines
14KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "audio.h"
  25. #include "avfilter.h"
  26. #include "buffersrc.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "vsrc_buffer.h"
  31. #include "avcodec.h"
  32. #include "libavutil/audioconvert.h"
  33. #include "libavutil/fifo.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/samplefmt.h"
  37. typedef struct {
  38. const AVClass *class;
  39. AVFifoBuffer *fifo;
  40. AVRational time_base; ///< time_base to set in the output link
  41. unsigned nb_failed_requests;
  42. /* video only */
  43. int h, w;
  44. enum PixelFormat pix_fmt;
  45. AVRational pixel_aspect;
  46. char sws_param[256];
  47. /* audio only */
  48. int sample_rate;
  49. enum AVSampleFormat sample_fmt;
  50. char *sample_fmt_str;
  51. uint64_t channel_layout;
  52. char *channel_layout_str;
  53. int eof;
  54. } BufferSourceContext;
  55. #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
  56. if (c->w != width || c->h != height || c->pix_fmt != format) {\
  57. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  58. return AVERROR(EINVAL);\
  59. }
  60. #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
  61. if (c->sample_fmt != format || c->sample_rate != srate ||\
  62. c->channel_layout != ch_layout) {\
  63. av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
  64. return AVERROR(EINVAL);\
  65. }
  66. static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
  67. AVFilterBufferRef *ref)
  68. {
  69. AVFilterLink *outlink = ctx->outputs[0];
  70. AVFilterBufferRef *buf;
  71. int channels;
  72. switch (outlink->type) {
  73. case AVMEDIA_TYPE_VIDEO:
  74. buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
  75. ref->video->w, ref->video->h);
  76. if(!buf)
  77. return NULL;
  78. av_image_copy(buf->data, buf->linesize,
  79. (void*)ref->data, ref->linesize,
  80. ref->format, ref->video->w, ref->video->h);
  81. break;
  82. case AVMEDIA_TYPE_AUDIO:
  83. buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
  84. ref->audio->nb_samples);
  85. if(!buf)
  86. return NULL;
  87. channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
  88. av_samples_copy(buf->extended_data, ref->buf->extended_data,
  89. 0, 0, ref->audio->nb_samples,
  90. channels,
  91. ref->format);
  92. break;
  93. default:
  94. return NULL;
  95. }
  96. avfilter_copy_buffer_ref_props(buf, ref);
  97. return buf;
  98. }
  99. #if FF_API_VSRC_BUFFER_ADD_FRAME
  100. static int av_vsrc_buffer_add_frame_alt(AVFilterContext *buffer_filter, AVFrame *frame,
  101. int64_t pts, AVRational pixel_aspect)
  102. {
  103. int64_t orig_pts = frame->pts;
  104. AVRational orig_sar = frame->sample_aspect_ratio;
  105. int ret;
  106. frame->pts = pts;
  107. frame->sample_aspect_ratio = pixel_aspect;
  108. if ((ret = av_buffersrc_write_frame(buffer_filter, frame)) < 0)
  109. return ret;
  110. frame->pts = orig_pts;
  111. frame->sample_aspect_ratio = orig_sar;
  112. return 0;
  113. }
  114. #endif
  115. int av_buffersrc_add_frame(AVFilterContext *buffer_src,
  116. const AVFrame *frame, int flags)
  117. {
  118. AVFilterBufferRef *picref;
  119. int ret;
  120. if (!frame) /* NULL for EOF */
  121. return av_buffersrc_add_ref(buffer_src, NULL, flags);
  122. switch (buffer_src->outputs[0]->type) {
  123. case AVMEDIA_TYPE_VIDEO:
  124. picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  125. break;
  126. case AVMEDIA_TYPE_AUDIO:
  127. picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
  128. break;
  129. default:
  130. return AVERROR(ENOSYS);
  131. }
  132. if (!picref)
  133. return AVERROR(ENOMEM);
  134. ret = av_buffersrc_add_ref(buffer_src, picref, flags);
  135. picref->buf->data[0] = NULL;
  136. avfilter_unref_buffer(picref);
  137. return ret;
  138. }
  139. int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  140. {
  141. return av_buffersrc_add_frame(buffer_filter, frame, 0);
  142. }
  143. int av_buffersrc_add_ref(AVFilterContext *s, AVFilterBufferRef *buf, int flags)
  144. {
  145. BufferSourceContext *c = s->priv;
  146. AVFilterBufferRef *to_free = NULL;
  147. int ret;
  148. if (!buf) {
  149. c->eof = 1;
  150. return 0;
  151. } else if (c->eof)
  152. return AVERROR(EINVAL);
  153. if (!av_fifo_space(c->fifo) &&
  154. (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
  155. sizeof(buf))) < 0)
  156. return ret;
  157. if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
  158. switch (s->outputs[0]->type) {
  159. case AVMEDIA_TYPE_VIDEO:
  160. CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
  161. break;
  162. case AVMEDIA_TYPE_AUDIO:
  163. CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
  164. buf->format);
  165. break;
  166. default:
  167. return AVERROR(EINVAL);
  168. }
  169. }
  170. if (!(flags & AV_BUFFERSRC_FLAG_NO_COPY))
  171. to_free = buf = copy_buffer_ref(s, buf);
  172. if(!buf)
  173. return -1;
  174. if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
  175. avfilter_unref_buffer(to_free);
  176. return ret;
  177. }
  178. c->nb_failed_requests = 0;
  179. return 0;
  180. }
  181. int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
  182. {
  183. return av_buffersrc_add_ref(s, buf, AV_BUFFERSRC_FLAG_NO_COPY);
  184. }
  185. unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
  186. {
  187. return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
  188. }
  189. static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
  190. {
  191. BufferSourceContext *c = ctx->priv;
  192. char pix_fmt_str[128];
  193. int ret, n = 0;
  194. *c->sws_param = 0;
  195. if (!args ||
  196. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
  197. &c->time_base.num, &c->time_base.den,
  198. &c->pixel_aspect.num, &c->pixel_aspect.den, c->sws_param)) < 7) {
  199. av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
  200. return AVERROR(EINVAL);
  201. }
  202. if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
  203. return ret;
  204. if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
  205. return AVERROR(ENOMEM);
  206. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
  207. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  208. c->time_base.num, c->time_base.den,
  209. c->pixel_aspect.num, c->pixel_aspect.den, c->sws_param);
  210. return 0;
  211. }
  212. #define OFFSET(x) offsetof(BufferSourceContext, x)
  213. #define A AV_OPT_FLAG_AUDIO_PARAM
  214. static const AVOption audio_options[] = {
  215. { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
  216. { "sample_rate", NULL, OFFSET(sample_rate), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, A },
  217. { "sample_fmt", NULL, OFFSET(sample_fmt_str), AV_OPT_TYPE_STRING, .flags = A },
  218. { "channel_layout", NULL, OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A },
  219. { NULL },
  220. };
  221. static const AVClass abuffer_class = {
  222. .class_name = "abuffer source",
  223. .item_name = av_default_item_name,
  224. .option = audio_options,
  225. .version = LIBAVUTIL_VERSION_INT,
  226. .category = AV_CLASS_CATEGORY_FILTER,
  227. };
  228. static av_cold int init_audio(AVFilterContext *ctx, const char *args, void *opaque)
  229. {
  230. BufferSourceContext *s = ctx->priv;
  231. int ret = 0;
  232. s->class = &abuffer_class;
  233. av_opt_set_defaults(s);
  234. if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
  235. av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
  236. goto fail;
  237. }
  238. s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
  239. if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
  240. av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
  241. s->sample_fmt_str);
  242. ret = AVERROR(EINVAL);
  243. goto fail;
  244. }
  245. s->channel_layout = av_get_channel_layout(s->channel_layout_str);
  246. if (!s->channel_layout) {
  247. av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
  248. s->channel_layout_str);
  249. ret = AVERROR(EINVAL);
  250. goto fail;
  251. }
  252. if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
  253. ret = AVERROR(ENOMEM);
  254. goto fail;
  255. }
  256. if (!s->time_base.num)
  257. s->time_base = (AVRational){1, s->sample_rate};
  258. av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
  259. "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
  260. s->sample_rate, s->channel_layout_str);
  261. fail:
  262. av_opt_free(s);
  263. return ret;
  264. }
  265. static av_cold void uninit(AVFilterContext *ctx)
  266. {
  267. BufferSourceContext *s = ctx->priv;
  268. while (s->fifo && av_fifo_size(s->fifo)) {
  269. AVFilterBufferRef *buf;
  270. av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
  271. avfilter_unref_buffer(buf);
  272. }
  273. av_fifo_free(s->fifo);
  274. s->fifo = NULL;
  275. }
  276. static int query_formats(AVFilterContext *ctx)
  277. {
  278. BufferSourceContext *c = ctx->priv;
  279. AVFilterChannelLayouts *channel_layouts = NULL;
  280. AVFilterFormats *formats = NULL;
  281. AVFilterFormats *samplerates = NULL;
  282. switch (ctx->outputs[0]->type) {
  283. case AVMEDIA_TYPE_VIDEO:
  284. ff_add_format(&formats, c->pix_fmt);
  285. ff_set_common_formats(ctx, formats);
  286. break;
  287. case AVMEDIA_TYPE_AUDIO:
  288. ff_add_format(&formats, c->sample_fmt);
  289. ff_set_common_formats(ctx, formats);
  290. ff_add_format(&samplerates, c->sample_rate);
  291. ff_set_common_samplerates(ctx, samplerates);
  292. ff_add_channel_layout(&channel_layouts, c->channel_layout);
  293. ff_set_common_channel_layouts(ctx, channel_layouts);
  294. break;
  295. default:
  296. return AVERROR(EINVAL);
  297. }
  298. return 0;
  299. }
  300. static int config_props(AVFilterLink *link)
  301. {
  302. BufferSourceContext *c = link->src->priv;
  303. switch (link->type) {
  304. case AVMEDIA_TYPE_VIDEO:
  305. link->w = c->w;
  306. link->h = c->h;
  307. link->sample_aspect_ratio = c->pixel_aspect;
  308. break;
  309. case AVMEDIA_TYPE_AUDIO:
  310. link->channel_layout = c->channel_layout;
  311. link->sample_rate = c->sample_rate;
  312. break;
  313. default:
  314. return AVERROR(EINVAL);
  315. }
  316. link->time_base = c->time_base;
  317. return 0;
  318. }
  319. static int request_frame(AVFilterLink *link)
  320. {
  321. BufferSourceContext *c = link->src->priv;
  322. AVFilterBufferRef *buf;
  323. if (!av_fifo_size(c->fifo)) {
  324. if (c->eof)
  325. return AVERROR_EOF;
  326. c->nb_failed_requests++;
  327. return AVERROR(EAGAIN);
  328. }
  329. av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
  330. switch (link->type) {
  331. case AVMEDIA_TYPE_VIDEO:
  332. ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
  333. ff_draw_slice(link, 0, link->h, 1);
  334. ff_end_frame(link);
  335. break;
  336. case AVMEDIA_TYPE_AUDIO:
  337. ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
  338. break;
  339. default:
  340. return AVERROR(EINVAL);
  341. }
  342. avfilter_unref_buffer(buf);
  343. return 0;
  344. }
  345. static int poll_frame(AVFilterLink *link)
  346. {
  347. BufferSourceContext *c = link->src->priv;
  348. int size = av_fifo_size(c->fifo);
  349. if (!size && c->eof)
  350. return AVERROR_EOF;
  351. return size/sizeof(AVFilterBufferRef*);
  352. }
  353. AVFilter avfilter_vsrc_buffer = {
  354. .name = "buffer",
  355. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  356. .priv_size = sizeof(BufferSourceContext),
  357. .query_formats = query_formats,
  358. .init = init_video,
  359. .uninit = uninit,
  360. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  361. .outputs = (AVFilterPad[]) {{ .name = "default",
  362. .type = AVMEDIA_TYPE_VIDEO,
  363. .request_frame = request_frame,
  364. .poll_frame = poll_frame,
  365. .config_props = config_props, },
  366. { .name = NULL}},
  367. };
  368. AVFilter avfilter_asrc_abuffer = {
  369. .name = "abuffer",
  370. .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
  371. .priv_size = sizeof(BufferSourceContext),
  372. .query_formats = query_formats,
  373. .init = init_audio,
  374. .uninit = uninit,
  375. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  376. .outputs = (AVFilterPad[]) {{ .name = "default",
  377. .type = AVMEDIA_TYPE_AUDIO,
  378. .request_frame = request_frame,
  379. .poll_frame = poll_frame,
  380. .config_props = config_props, },
  381. { .name = NULL}},
  382. };