You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

454 lines
14KB

  1. /*
  2. * Copyright (c) 2021 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Caculate the Identity between two input videos.
  23. */
  24. #include "libavutil/avstring.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/pixdesc.h"
  27. #include "avfilter.h"
  28. #include "drawutils.h"
  29. #include "formats.h"
  30. #include "framesync.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. #include "scene_sad.h"
  34. typedef struct IdentityContext {
  35. const AVClass *class;
  36. FFFrameSync fs;
  37. double score, min_score, max_score, score_comp[4];
  38. uint64_t nb_frames;
  39. int is_rgb;
  40. int is_msad;
  41. uint8_t rgba_map[4];
  42. int max[4];
  43. char comps[4];
  44. int nb_components;
  45. int nb_threads;
  46. int planewidth[4];
  47. int planeheight[4];
  48. uint64_t **scores;
  49. unsigned (*filter_line)(const uint8_t *buf, const uint8_t *ref, int w);
  50. int (*filter_slice)(AVFilterContext *ctx, void *arg,
  51. int jobnr, int nb_jobs);
  52. ff_scene_sad_fn sad;
  53. } IdentityContext;
  54. #define OFFSET(x) offsetof(IdentityContext, x)
  55. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  56. static unsigned identity_line_8bit(const uint8_t *main_line, const uint8_t *ref_line, int outw)
  57. {
  58. unsigned score = 0;
  59. for (int j = 0; j < outw; j++)
  60. score += main_line[j] == ref_line[j];
  61. return score;
  62. }
  63. static unsigned identity_line_16bit(const uint8_t *mmain_line, const uint8_t *rref_line, int outw)
  64. {
  65. const uint16_t *main_line = (const uint16_t *)mmain_line;
  66. const uint16_t *ref_line = (const uint16_t *)rref_line;
  67. unsigned score = 0;
  68. for (int j = 0; j < outw; j++)
  69. score += main_line[j] == ref_line[j];
  70. return score;
  71. }
  72. typedef struct ThreadData {
  73. const uint8_t *main_data[4];
  74. const uint8_t *ref_data[4];
  75. int main_linesize[4];
  76. int ref_linesize[4];
  77. int planewidth[4];
  78. int planeheight[4];
  79. uint64_t **score;
  80. int nb_components;
  81. } ThreadData;
  82. static
  83. int compute_images_msad(AVFilterContext *ctx, void *arg,
  84. int jobnr, int nb_jobs)
  85. {
  86. IdentityContext *s = ctx->priv;
  87. ThreadData *td = arg;
  88. uint64_t *score = td->score[jobnr];
  89. for (int c = 0; c < td->nb_components; c++) {
  90. const int outw = td->planewidth[c];
  91. const int outh = td->planeheight[c];
  92. const int slice_start = (outh * jobnr) / nb_jobs;
  93. const int slice_end = (outh * (jobnr+1)) / nb_jobs;
  94. const int ref_linesize = td->ref_linesize[c];
  95. const int main_linesize = td->main_linesize[c];
  96. const uint8_t *main_line = td->main_data[c] + main_linesize * slice_start;
  97. const uint8_t *ref_line = td->ref_data[c] + ref_linesize * slice_start;
  98. uint64_t m = 0;
  99. s->sad(main_line, main_linesize, ref_line, ref_linesize,
  100. outw, slice_end - slice_start, &m);
  101. score[c] = m;
  102. }
  103. return 0;
  104. }
  105. static
  106. int compute_images_identity(AVFilterContext *ctx, void *arg,
  107. int jobnr, int nb_jobs)
  108. {
  109. IdentityContext *s = ctx->priv;
  110. ThreadData *td = arg;
  111. uint64_t *score = td->score[jobnr];
  112. for (int c = 0; c < td->nb_components; c++) {
  113. const int outw = td->planewidth[c];
  114. const int outh = td->planeheight[c];
  115. const int slice_start = (outh * jobnr) / nb_jobs;
  116. const int slice_end = (outh * (jobnr+1)) / nb_jobs;
  117. const int ref_linesize = td->ref_linesize[c];
  118. const int main_linesize = td->main_linesize[c];
  119. const uint8_t *main_line = td->main_data[c] + main_linesize * slice_start;
  120. const uint8_t *ref_line = td->ref_data[c] + ref_linesize * slice_start;
  121. uint64_t m = 0;
  122. for (int i = slice_start; i < slice_end; i++) {
  123. m += s->filter_line(main_line, ref_line, outw);
  124. ref_line += ref_linesize;
  125. main_line += main_linesize;
  126. }
  127. score[c] = m;
  128. }
  129. return 0;
  130. }
  131. static void set_meta(AVFilterContext *ctx,
  132. AVDictionary **metadata, const char *key, char comp, float d)
  133. {
  134. char value[128];
  135. snprintf(value, sizeof(value), "%f", d);
  136. if (comp) {
  137. char key2[128];
  138. snprintf(key2, sizeof(key2), "lavfi.%s.%s%s%c",
  139. ctx->filter->name, ctx->filter->name, key, comp);
  140. av_dict_set(metadata, key2, value, 0);
  141. } else {
  142. char key2[128];
  143. snprintf(key2, sizeof(key2), "lavfi.%s.%s%s",
  144. ctx->filter->name, ctx->filter->name, key);
  145. av_dict_set(metadata, key2, value, 0);
  146. }
  147. }
  148. static int do_identity(FFFrameSync *fs)
  149. {
  150. AVFilterContext *ctx = fs->parent;
  151. IdentityContext *s = ctx->priv;
  152. AVFrame *master, *ref;
  153. double comp_score[4], score = 0.;
  154. uint64_t comp_sum[4] = { 0 };
  155. AVDictionary **metadata;
  156. ThreadData td;
  157. int ret;
  158. ret = ff_framesync_dualinput_get(fs, &master, &ref);
  159. if (ret < 0)
  160. return ret;
  161. if (ctx->is_disabled || !ref)
  162. return ff_filter_frame(ctx->outputs[0], master);
  163. metadata = &master->metadata;
  164. td.nb_components = s->nb_components;
  165. td.score = s->scores;
  166. for (int c = 0; c < s->nb_components; c++) {
  167. td.main_data[c] = master->data[c];
  168. td.ref_data[c] = ref->data[c];
  169. td.main_linesize[c] = master->linesize[c];
  170. td.ref_linesize[c] = ref->linesize[c];
  171. td.planewidth[c] = s->planewidth[c];
  172. td.planeheight[c] = s->planeheight[c];
  173. }
  174. ctx->internal->execute(ctx, s->filter_slice, &td, NULL, FFMIN(s->planeheight[1], s->nb_threads));
  175. for (int j = 0; j < s->nb_threads; j++) {
  176. for (int c = 0; c < s->nb_components; c++)
  177. comp_sum[c] += s->scores[j][c];
  178. }
  179. for (int c = 0; c < s->nb_components; c++)
  180. comp_score[c] = comp_sum[c] / ((double)s->planewidth[c] * s->planeheight[c]);
  181. for (int c = 0; c < s->nb_components && s->is_msad; c++)
  182. comp_score[c] /= (double)s->max[c];
  183. for (int c = 0; c < s->nb_components; c++)
  184. score += comp_score[c];
  185. score /= s->nb_components;
  186. s->min_score = FFMIN(s->min_score, score);
  187. s->max_score = FFMAX(s->max_score, score);
  188. s->score += score;
  189. for (int j = 0; j < s->nb_components; j++)
  190. s->score_comp[j] += comp_score[j];
  191. s->nb_frames++;
  192. for (int j = 0; j < s->nb_components; j++) {
  193. int c = s->is_rgb ? s->rgba_map[j] : j;
  194. set_meta(ctx, metadata, ".", s->comps[j], comp_score[c]);
  195. }
  196. set_meta(ctx, metadata, "_avg", 0, score);
  197. return ff_filter_frame(ctx->outputs[0], master);
  198. }
  199. static av_cold int init(AVFilterContext *ctx)
  200. {
  201. IdentityContext *s = ctx->priv;
  202. s->fs.on_event = do_identity;
  203. return 0;
  204. }
  205. static int query_formats(AVFilterContext *ctx)
  206. {
  207. static const enum AVPixelFormat pix_fmts[] = {
  208. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
  209. #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
  210. #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
  211. #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
  212. PF(P), PF(P9), PF(P10), PF_NOALPHA(P12), PF_NOALPHA(P14), PF(P16),
  213. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  214. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
  215. AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
  216. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  217. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  218. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  219. AV_PIX_FMT_NONE
  220. };
  221. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  222. if (!fmts_list)
  223. return AVERROR(ENOMEM);
  224. return ff_set_common_formats(ctx, fmts_list);
  225. }
  226. static int config_input_ref(AVFilterLink *inlink)
  227. {
  228. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  229. AVFilterContext *ctx = inlink->dst;
  230. IdentityContext *s = ctx->priv;
  231. s->nb_threads = ff_filter_get_nb_threads(ctx);
  232. s->nb_components = desc->nb_components;
  233. if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
  234. ctx->inputs[0]->h != ctx->inputs[1]->h) {
  235. av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
  236. return AVERROR(EINVAL);
  237. }
  238. if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
  239. av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
  240. return AVERROR(EINVAL);
  241. }
  242. s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
  243. s->comps[0] = s->is_rgb ? 'R' : 'Y' ;
  244. s->comps[1] = s->is_rgb ? 'G' : 'U' ;
  245. s->comps[2] = s->is_rgb ? 'B' : 'V' ;
  246. s->comps[3] = 'A';
  247. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  248. s->planeheight[0] = s->planeheight[3] = inlink->h;
  249. s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
  250. s->planewidth[0] = s->planewidth[3] = inlink->w;
  251. s->scores = av_calloc(s->nb_threads, sizeof(*s->scores));
  252. if (!s->scores)
  253. return AVERROR(ENOMEM);
  254. for (int t = 0; t < s->nb_threads && s->scores; t++) {
  255. s->scores[t] = av_calloc(s->nb_components, sizeof(*s->scores[0]));
  256. if (!s->scores[t])
  257. return AVERROR(ENOMEM);
  258. }
  259. s->min_score = +INFINITY;
  260. s->max_score = -INFINITY;
  261. s->max[0] = (1 << desc->comp[0].depth) - 1;
  262. s->max[1] = (1 << desc->comp[1].depth) - 1;
  263. s->max[2] = (1 << desc->comp[2].depth) - 1;
  264. s->max[3] = (1 << desc->comp[3].depth) - 1;
  265. s->is_msad = !strcmp(ctx->filter->name, "msad");
  266. s->filter_slice = !s->is_msad ? compute_images_identity : compute_images_msad;
  267. s->filter_line = desc->comp[0].depth > 8 ? identity_line_16bit : identity_line_8bit;
  268. s->sad = ff_scene_sad_get_fn(desc->comp[0].depth <= 8 ? 8 : 16);
  269. if (!s->sad)
  270. return AVERROR(EINVAL);
  271. return 0;
  272. }
  273. static int config_output(AVFilterLink *outlink)
  274. {
  275. AVFilterContext *ctx = outlink->src;
  276. IdentityContext *s = ctx->priv;
  277. AVFilterLink *mainlink = ctx->inputs[0];
  278. int ret;
  279. ret = ff_framesync_init_dualinput(&s->fs, ctx);
  280. if (ret < 0)
  281. return ret;
  282. outlink->w = mainlink->w;
  283. outlink->h = mainlink->h;
  284. outlink->time_base = mainlink->time_base;
  285. outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
  286. outlink->frame_rate = mainlink->frame_rate;
  287. if ((ret = ff_framesync_configure(&s->fs)) < 0)
  288. return ret;
  289. outlink->time_base = s->fs.time_base;
  290. if (av_cmp_q(mainlink->time_base, outlink->time_base) ||
  291. av_cmp_q(ctx->inputs[1]->time_base, outlink->time_base))
  292. av_log(ctx, AV_LOG_WARNING, "not matching timebases found between first input: %d/%d and second input %d/%d, results may be incorrect!\n",
  293. mainlink->time_base.num, mainlink->time_base.den,
  294. ctx->inputs[1]->time_base.num, ctx->inputs[1]->time_base.den);
  295. return 0;
  296. }
  297. static int activate(AVFilterContext *ctx)
  298. {
  299. IdentityContext *s = ctx->priv;
  300. return ff_framesync_activate(&s->fs);
  301. }
  302. static av_cold void uninit(AVFilterContext *ctx)
  303. {
  304. IdentityContext *s = ctx->priv;
  305. if (s->nb_frames > 0) {
  306. char buf[256];
  307. buf[0] = 0;
  308. for (int j = 0; j < s->nb_components; j++) {
  309. int c = s->is_rgb ? s->rgba_map[j] : j;
  310. av_strlcatf(buf, sizeof(buf), " %c:%f", s->comps[j], s->score_comp[c] / s->nb_frames);
  311. }
  312. av_log(ctx, AV_LOG_INFO, "%s%s average:%f min:%f max:%f\n",
  313. ctx->filter->name,
  314. buf,
  315. s->score / s->nb_frames,
  316. s->min_score,
  317. s->max_score);
  318. }
  319. ff_framesync_uninit(&s->fs);
  320. for (int t = 0; t < s->nb_threads && s->scores; t++)
  321. av_freep(&s->scores[t]);
  322. av_freep(&s->scores);
  323. }
  324. static const AVFilterPad identity_inputs[] = {
  325. {
  326. .name = "main",
  327. .type = AVMEDIA_TYPE_VIDEO,
  328. },{
  329. .name = "reference",
  330. .type = AVMEDIA_TYPE_VIDEO,
  331. .config_props = config_input_ref,
  332. },
  333. { NULL }
  334. };
  335. static const AVFilterPad identity_outputs[] = {
  336. {
  337. .name = "default",
  338. .type = AVMEDIA_TYPE_VIDEO,
  339. .config_props = config_output,
  340. },
  341. { NULL }
  342. };
  343. static const AVOption options[] = {
  344. { NULL }
  345. };
  346. #if CONFIG_IDENTITY_FILTER
  347. #define identity_options options
  348. FRAMESYNC_DEFINE_CLASS(identity, IdentityContext, fs);
  349. AVFilter ff_vf_identity = {
  350. .name = "identity",
  351. .description = NULL_IF_CONFIG_SMALL("Calculate the Identity between two video streams."),
  352. .preinit = identity_framesync_preinit,
  353. .init = init,
  354. .uninit = uninit,
  355. .query_formats = query_formats,
  356. .activate = activate,
  357. .priv_size = sizeof(IdentityContext),
  358. .priv_class = &identity_class,
  359. .inputs = identity_inputs,
  360. .outputs = identity_outputs,
  361. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  362. };
  363. #endif /* CONFIG_IDENTITY_FILTER */
  364. #if CONFIG_MSAD_FILTER
  365. #define msad_options options
  366. FRAMESYNC_DEFINE_CLASS(msad, IdentityContext, fs);
  367. AVFilter ff_vf_msad = {
  368. .name = "msad",
  369. .description = NULL_IF_CONFIG_SMALL("Calculate the MSAD between two video streams."),
  370. .preinit = msad_framesync_preinit,
  371. .init = init,
  372. .uninit = uninit,
  373. .query_formats = query_formats,
  374. .activate = activate,
  375. .priv_size = sizeof(IdentityContext),
  376. .priv_class = &msad_class,
  377. .inputs = identity_inputs,
  378. .outputs = identity_outputs,
  379. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  380. };
  381. #endif /* CONFIG_MSAD_FILTER */