You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

419 lines
12KB

  1. /*
  2. * Copyright (c) 2017 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avstring.h"
  21. #include "libavutil/imgutils.h"
  22. #include "libavutil/intreadwrite.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "framesync.h"
  29. #include "video.h"
  30. typedef struct MixContext {
  31. const AVClass *class;
  32. const AVPixFmtDescriptor *desc;
  33. char *weights_str;
  34. int nb_inputs;
  35. int duration;
  36. float *weights;
  37. float scale;
  38. float wfactor;
  39. int tmix;
  40. int nb_frames;
  41. int depth;
  42. int max;
  43. int nb_planes;
  44. int linesize[4];
  45. int height[4];
  46. AVFrame **frames;
  47. FFFrameSync fs;
  48. } MixContext;
  49. static int query_formats(AVFilterContext *ctx)
  50. {
  51. AVFilterFormats *formats = NULL;
  52. int ret;
  53. ret = ff_formats_pixdesc_filter(&formats, 0,
  54. AV_PIX_FMT_FLAG_BITSTREAM |
  55. AV_PIX_FMT_FLAG_PAL |
  56. AV_PIX_FMT_FLAG_HWACCEL);
  57. if (ret < 0)
  58. return ret;
  59. return ff_set_common_formats(ctx, formats);
  60. }
  61. static av_cold int init(AVFilterContext *ctx)
  62. {
  63. MixContext *s = ctx->priv;
  64. char *p, *arg, *saveptr = NULL;
  65. int i, ret, last = 0;
  66. s->tmix = !strcmp(ctx->filter->name, "tmix");
  67. s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
  68. if (!s->frames)
  69. return AVERROR(ENOMEM);
  70. s->weights = av_calloc(s->nb_inputs, sizeof(*s->weights));
  71. if (!s->weights)
  72. return AVERROR(ENOMEM);
  73. if (!s->tmix) {
  74. for (i = 0; i < s->nb_inputs; i++) {
  75. AVFilterPad pad = { 0 };
  76. pad.type = AVMEDIA_TYPE_VIDEO;
  77. pad.name = av_asprintf("input%d", i);
  78. if (!pad.name)
  79. return AVERROR(ENOMEM);
  80. if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
  81. av_freep(&pad.name);
  82. return ret;
  83. }
  84. }
  85. }
  86. p = s->weights_str;
  87. for (i = 0; i < s->nb_inputs; i++) {
  88. if (!(arg = av_strtok(p, " ", &saveptr)))
  89. break;
  90. p = NULL;
  91. if (av_sscanf(arg, "%f", &s->weights[i]) != 1) {
  92. av_log(ctx, AV_LOG_ERROR, "Invalid syntax for weights[%d].\n", i);
  93. return AVERROR(EINVAL);
  94. }
  95. s->wfactor += s->weights[i];
  96. last = i;
  97. }
  98. for (; i < s->nb_inputs; i++) {
  99. s->weights[i] = s->weights[last];
  100. s->wfactor += s->weights[i];
  101. }
  102. if (s->scale == 0) {
  103. s->wfactor = 1 / s->wfactor;
  104. } else {
  105. s->wfactor = s->scale;
  106. }
  107. return 0;
  108. }
  109. typedef struct ThreadData {
  110. AVFrame **in, *out;
  111. } ThreadData;
  112. static int mix_frames(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  113. {
  114. MixContext *s = ctx->priv;
  115. ThreadData *td = arg;
  116. AVFrame **in = td->in;
  117. AVFrame *out = td->out;
  118. int i, p, x, y;
  119. if (s->depth <= 8) {
  120. for (p = 0; p < s->nb_planes; p++) {
  121. const int slice_start = (s->height[p] * jobnr) / nb_jobs;
  122. const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
  123. uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
  124. for (y = slice_start; y < slice_end; y++) {
  125. for (x = 0; x < s->linesize[p]; x++) {
  126. int val = 0;
  127. for (i = 0; i < s->nb_inputs; i++) {
  128. uint8_t src = in[i]->data[p][y * in[i]->linesize[p] + x];
  129. val += src * s->weights[i];
  130. }
  131. dst[x] = av_clip_uint8(val * s->wfactor);
  132. }
  133. dst += out->linesize[p];
  134. }
  135. }
  136. } else {
  137. for (p = 0; p < s->nb_planes; p++) {
  138. const int slice_start = (s->height[p] * jobnr) / nb_jobs;
  139. const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
  140. uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);
  141. for (y = slice_start; y < slice_end; y++) {
  142. for (x = 0; x < s->linesize[p] / 2; x++) {
  143. int val = 0;
  144. for (i = 0; i < s->nb_inputs; i++) {
  145. uint16_t src = AV_RN16(in[i]->data[p] + y * in[i]->linesize[p] + x * 2);
  146. val += src * s->weights[i];
  147. }
  148. dst[x] = av_clip(val * s->wfactor, 0, s->max);
  149. }
  150. dst += out->linesize[p] / 2;
  151. }
  152. }
  153. }
  154. return 0;
  155. }
  156. static int process_frame(FFFrameSync *fs)
  157. {
  158. AVFilterContext *ctx = fs->parent;
  159. AVFilterLink *outlink = ctx->outputs[0];
  160. MixContext *s = fs->opaque;
  161. AVFrame **in = s->frames;
  162. AVFrame *out;
  163. ThreadData td;
  164. int i, ret;
  165. for (i = 0; i < s->nb_inputs; i++) {
  166. if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
  167. return ret;
  168. }
  169. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  170. if (!out)
  171. return AVERROR(ENOMEM);
  172. out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
  173. td.in = in;
  174. td.out = out;
  175. ctx->internal->execute(ctx, mix_frames, &td, NULL, FFMIN(s->height[0], ff_filter_get_nb_threads(ctx)));
  176. return ff_filter_frame(outlink, out);
  177. }
  178. static int config_output(AVFilterLink *outlink)
  179. {
  180. AVFilterContext *ctx = outlink->src;
  181. MixContext *s = ctx->priv;
  182. AVRational frame_rate = ctx->inputs[0]->frame_rate;
  183. AVRational sar = ctx->inputs[0]->sample_aspect_ratio;
  184. AVFilterLink *inlink = ctx->inputs[0];
  185. int height = ctx->inputs[0]->h;
  186. int width = ctx->inputs[0]->w;
  187. FFFrameSyncIn *in;
  188. int i, ret;
  189. if (!s->tmix) {
  190. for (i = 1; i < s->nb_inputs; i++) {
  191. if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
  192. av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
  193. return AVERROR(EINVAL);
  194. }
  195. }
  196. }
  197. s->desc = av_pix_fmt_desc_get(outlink->format);
  198. if (!s->desc)
  199. return AVERROR_BUG;
  200. s->nb_planes = av_pix_fmt_count_planes(outlink->format);
  201. s->depth = s->desc->comp[0].depth;
  202. s->max = (1 << s->depth) - 1;
  203. if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
  204. return ret;
  205. s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
  206. s->height[0] = s->height[3] = inlink->h;
  207. if (s->tmix)
  208. return 0;
  209. outlink->w = width;
  210. outlink->h = height;
  211. outlink->frame_rate = frame_rate;
  212. outlink->sample_aspect_ratio = sar;
  213. if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
  214. return ret;
  215. in = s->fs.in;
  216. s->fs.opaque = s;
  217. s->fs.on_event = process_frame;
  218. for (i = 0; i < s->nb_inputs; i++) {
  219. AVFilterLink *inlink = ctx->inputs[i];
  220. in[i].time_base = inlink->time_base;
  221. in[i].sync = 1;
  222. in[i].before = EXT_STOP;
  223. in[i].after = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY;
  224. }
  225. ret = ff_framesync_configure(&s->fs);
  226. outlink->time_base = s->fs.time_base;
  227. return ret;
  228. }
  229. static av_cold void uninit(AVFilterContext *ctx)
  230. {
  231. MixContext *s = ctx->priv;
  232. int i;
  233. ff_framesync_uninit(&s->fs);
  234. av_freep(&s->weights);
  235. if (!s->tmix) {
  236. for (i = 0; i < ctx->nb_inputs; i++)
  237. av_freep(&ctx->input_pads[i].name);
  238. } else {
  239. for (i = 0; i < s->nb_frames && s->frames; i++)
  240. av_frame_free(&s->frames[i]);
  241. }
  242. av_freep(&s->frames);
  243. }
  244. static int activate(AVFilterContext *ctx)
  245. {
  246. MixContext *s = ctx->priv;
  247. return ff_framesync_activate(&s->fs);
  248. }
  249. #define OFFSET(x) offsetof(MixContext, x)
  250. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  251. static const AVOption mix_options[] = {
  252. { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT16_MAX, .flags = FLAGS },
  253. { "weights", "set weight for each input", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, .flags = FLAGS },
  254. { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT16_MAX, .flags = FLAGS },
  255. { "duration", "how to determine end of stream", OFFSET(duration), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, .flags = FLAGS, "duration" },
  256. { "longest", "Duration of longest input", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "duration" },
  257. { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "duration" },
  258. { "first", "Duration of first input", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "duration" },
  259. { NULL },
  260. };
  261. static const AVFilterPad outputs[] = {
  262. {
  263. .name = "default",
  264. .type = AVMEDIA_TYPE_VIDEO,
  265. .config_props = config_output,
  266. },
  267. { NULL }
  268. };
  269. #if CONFIG_MIX_FILTER
  270. AVFILTER_DEFINE_CLASS(mix);
  271. AVFilter ff_vf_mix = {
  272. .name = "mix",
  273. .description = NULL_IF_CONFIG_SMALL("Mix video inputs."),
  274. .priv_size = sizeof(MixContext),
  275. .priv_class = &mix_class,
  276. .query_formats = query_formats,
  277. .outputs = outputs,
  278. .init = init,
  279. .uninit = uninit,
  280. .activate = activate,
  281. .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_SLICE_THREADS,
  282. };
  283. #endif /* CONFIG_MIX_FILTER */
  284. #if CONFIG_TMIX_FILTER
  285. static int tmix_filter_frame(AVFilterLink *inlink, AVFrame *in)
  286. {
  287. AVFilterContext *ctx = inlink->dst;
  288. AVFilterLink *outlink = ctx->outputs[0];
  289. MixContext *s = ctx->priv;
  290. ThreadData td;
  291. AVFrame *out;
  292. if (s->nb_inputs == 1)
  293. return ff_filter_frame(outlink, in);
  294. if (s->nb_frames < s->nb_inputs) {
  295. s->frames[s->nb_frames] = in;
  296. s->nb_frames++;
  297. if (s->nb_frames < s->nb_inputs)
  298. return 0;
  299. } else {
  300. av_frame_free(&s->frames[0]);
  301. memmove(&s->frames[0], &s->frames[1], sizeof(*s->frames) * (s->nb_inputs - 1));
  302. s->frames[s->nb_inputs - 1] = in;
  303. }
  304. if (ctx->is_disabled) {
  305. out = av_frame_clone(s->frames[0]);
  306. if (!out)
  307. return AVERROR(ENOMEM);
  308. return ff_filter_frame(outlink, out);
  309. }
  310. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  311. if (!out)
  312. return AVERROR(ENOMEM);
  313. out->pts = s->frames[0]->pts;
  314. td.out = out;
  315. td.in = s->frames;
  316. ctx->internal->execute(ctx, mix_frames, &td, NULL, FFMIN(s->height[0], ff_filter_get_nb_threads(ctx)));
  317. return ff_filter_frame(outlink, out);
  318. }
  319. static const AVOption tmix_options[] = {
  320. { "frames", "set number of successive frames to mix", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=3}, 1, 128, .flags = FLAGS },
  321. { "weights", "set weight for each frame", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1 1"}, 0, 0, .flags = FLAGS },
  322. { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT16_MAX, .flags = FLAGS },
  323. { NULL },
  324. };
  325. static const AVFilterPad inputs[] = {
  326. {
  327. .name = "default",
  328. .type = AVMEDIA_TYPE_VIDEO,
  329. .filter_frame = tmix_filter_frame,
  330. },
  331. { NULL }
  332. };
  333. AVFILTER_DEFINE_CLASS(tmix);
  334. AVFilter ff_vf_tmix = {
  335. .name = "tmix",
  336. .description = NULL_IF_CONFIG_SMALL("Mix successive video frames."),
  337. .priv_size = sizeof(MixContext),
  338. .priv_class = &tmix_class,
  339. .query_formats = query_formats,
  340. .outputs = outputs,
  341. .inputs = inputs,
  342. .init = init,
  343. .uninit = uninit,
  344. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  345. };
  346. #endif /* CONFIG_TMIX_FILTER */