You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

381 lines
14KB

  1. /*
  2. * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
  3. * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Calculate the VMAF between two input videos.
  24. */
  25. #include <pthread.h>
  26. #include <libvmaf.h>
  27. #include "libavutil/avstring.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avfilter.h"
  31. #include "drawutils.h"
  32. #include "formats.h"
  33. #include "framesync.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. typedef struct LIBVMAFContext {
  37. const AVClass *class;
  38. FFFrameSync fs;
  39. const AVPixFmtDescriptor *desc;
  40. int width;
  41. int height;
  42. double vmaf_score;
  43. int vmaf_thread_created;
  44. pthread_t vmaf_thread;
  45. pthread_mutex_t lock;
  46. pthread_cond_t cond;
  47. int eof;
  48. AVFrame *gmain;
  49. AVFrame *gref;
  50. int frame_set;
  51. char *model_path;
  52. char *log_path;
  53. char *log_fmt;
  54. int disable_clip;
  55. int disable_avx;
  56. int enable_transform;
  57. int phone_model;
  58. int psnr;
  59. int ssim;
  60. int ms_ssim;
  61. char *pool;
  62. int n_threads;
  63. int n_subsample;
  64. int enable_conf_interval;
  65. int error;
  66. } LIBVMAFContext;
  67. #define OFFSET(x) offsetof(LIBVMAFContext, x)
  68. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  69. static const AVOption libvmaf_options[] = {
  70. {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
  71. {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  72. {"log_fmt", "Set the format of the log (csv, json or xml).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  73. {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  74. {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  75. {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  76. {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  77. {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  78. {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  79. {"n_threads", "Set number of threads to be used when computing vmaf.", OFFSET(n_threads), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT_MAX, FLAGS},
  80. {"n_subsample", "Set interval for frame subsampling used when computing vmaf.", OFFSET(n_subsample), AV_OPT_TYPE_INT, {.i64=1}, 1, UINT_MAX, FLAGS},
  81. {"enable_conf_interval", "Enables confidence interval.", OFFSET(enable_conf_interval), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  82. { NULL }
  83. };
  84. FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
  85. #define read_frame_fn(type, bits) \
  86. static int read_frame_##bits##bit(float *ref_data, float *main_data, \
  87. float *temp_data, int stride, void *ctx) \
  88. { \
  89. LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
  90. int ret; \
  91. \
  92. pthread_mutex_lock(&s->lock); \
  93. \
  94. while (!s->frame_set && !s->eof) { \
  95. pthread_cond_wait(&s->cond, &s->lock); \
  96. } \
  97. \
  98. if (s->frame_set) { \
  99. int ref_stride = s->gref->linesize[0]; \
  100. int main_stride = s->gmain->linesize[0]; \
  101. \
  102. const type *ref_ptr = (const type *) s->gref->data[0]; \
  103. const type *main_ptr = (const type *) s->gmain->data[0]; \
  104. \
  105. float *ptr = ref_data; \
  106. float factor = 1.f / (1 << (bits - 8)); \
  107. \
  108. int h = s->height; \
  109. int w = s->width; \
  110. \
  111. int i,j; \
  112. \
  113. for (i = 0; i < h; i++) { \
  114. for ( j = 0; j < w; j++) { \
  115. ptr[j] = ref_ptr[j] * factor; \
  116. } \
  117. ref_ptr += ref_stride / sizeof(*ref_ptr); \
  118. ptr += stride / sizeof(*ptr); \
  119. } \
  120. \
  121. ptr = main_data; \
  122. \
  123. for (i = 0; i < h; i++) { \
  124. for (j = 0; j < w; j++) { \
  125. ptr[j] = main_ptr[j] * factor; \
  126. } \
  127. main_ptr += main_stride / sizeof(*main_ptr); \
  128. ptr += stride / sizeof(*ptr); \
  129. } \
  130. } \
  131. \
  132. ret = !s->frame_set; \
  133. \
  134. av_frame_unref(s->gref); \
  135. av_frame_unref(s->gmain); \
  136. s->frame_set = 0; \
  137. \
  138. pthread_cond_signal(&s->cond); \
  139. pthread_mutex_unlock(&s->lock); \
  140. \
  141. if (ret) { \
  142. return 2; \
  143. } \
  144. \
  145. return 0; \
  146. }
  147. read_frame_fn(uint8_t, 8);
  148. read_frame_fn(uint16_t, 10);
  149. static void compute_vmaf_score(LIBVMAFContext *s)
  150. {
  151. int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
  152. int stride, void *ctx);
  153. char *format;
  154. if (s->desc->comp[0].depth <= 8) {
  155. read_frame = read_frame_8bit;
  156. } else {
  157. read_frame = read_frame_10bit;
  158. }
  159. format = (char *) s->desc->name;
  160. s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
  161. read_frame, s, s->model_path, s->log_path,
  162. s->log_fmt, 0, 0, s->enable_transform,
  163. s->phone_model, s->psnr, s->ssim,
  164. s->ms_ssim, s->pool,
  165. s->n_threads, s->n_subsample, s->enable_conf_interval);
  166. }
  167. static void *call_vmaf(void *ctx)
  168. {
  169. LIBVMAFContext *s = (LIBVMAFContext *) ctx;
  170. compute_vmaf_score(s);
  171. if (!s->error) {
  172. av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
  173. } else {
  174. pthread_mutex_lock(&s->lock);
  175. pthread_cond_signal(&s->cond);
  176. pthread_mutex_unlock(&s->lock);
  177. }
  178. pthread_exit(NULL);
  179. return NULL;
  180. }
  181. static int do_vmaf(FFFrameSync *fs)
  182. {
  183. AVFilterContext *ctx = fs->parent;
  184. LIBVMAFContext *s = ctx->priv;
  185. AVFrame *master, *ref;
  186. int ret;
  187. ret = ff_framesync_dualinput_get(fs, &master, &ref);
  188. if (ret < 0)
  189. return ret;
  190. if (!ref)
  191. return ff_filter_frame(ctx->outputs[0], master);
  192. pthread_mutex_lock(&s->lock);
  193. while (s->frame_set && !s->error) {
  194. pthread_cond_wait(&s->cond, &s->lock);
  195. }
  196. if (s->error) {
  197. av_log(ctx, AV_LOG_ERROR,
  198. "libvmaf encountered an error, check log for details\n");
  199. pthread_mutex_unlock(&s->lock);
  200. return AVERROR(EINVAL);
  201. }
  202. av_frame_ref(s->gref, ref);
  203. av_frame_ref(s->gmain, master);
  204. s->frame_set = 1;
  205. pthread_cond_signal(&s->cond);
  206. pthread_mutex_unlock(&s->lock);
  207. return ff_filter_frame(ctx->outputs[0], master);
  208. }
  209. static av_cold int init(AVFilterContext *ctx)
  210. {
  211. LIBVMAFContext *s = ctx->priv;
  212. s->gref = av_frame_alloc();
  213. s->gmain = av_frame_alloc();
  214. if (!s->gref || !s->gmain)
  215. return AVERROR(ENOMEM);
  216. s->error = 0;
  217. s->vmaf_thread_created = 0;
  218. pthread_mutex_init(&s->lock, NULL);
  219. pthread_cond_init (&s->cond, NULL);
  220. s->fs.on_event = do_vmaf;
  221. return 0;
  222. }
  223. static int query_formats(AVFilterContext *ctx)
  224. {
  225. static const enum AVPixelFormat pix_fmts[] = {
  226. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
  227. AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
  228. AV_PIX_FMT_NONE
  229. };
  230. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  231. if (!fmts_list)
  232. return AVERROR(ENOMEM);
  233. return ff_set_common_formats(ctx, fmts_list);
  234. }
  235. static int config_input_ref(AVFilterLink *inlink)
  236. {
  237. AVFilterContext *ctx = inlink->dst;
  238. LIBVMAFContext *s = ctx->priv;
  239. int th;
  240. if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
  241. ctx->inputs[0]->h != ctx->inputs[1]->h) {
  242. av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
  243. return AVERROR(EINVAL);
  244. }
  245. if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
  246. av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
  247. return AVERROR(EINVAL);
  248. }
  249. s->desc = av_pix_fmt_desc_get(inlink->format);
  250. s->width = ctx->inputs[0]->w;
  251. s->height = ctx->inputs[0]->h;
  252. th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
  253. if (th) {
  254. av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
  255. return AVERROR(EINVAL);
  256. }
  257. s->vmaf_thread_created = 1;
  258. return 0;
  259. }
  260. static int config_output(AVFilterLink *outlink)
  261. {
  262. AVFilterContext *ctx = outlink->src;
  263. LIBVMAFContext *s = ctx->priv;
  264. AVFilterLink *mainlink = ctx->inputs[0];
  265. int ret;
  266. ret = ff_framesync_init_dualinput(&s->fs, ctx);
  267. if (ret < 0)
  268. return ret;
  269. outlink->w = mainlink->w;
  270. outlink->h = mainlink->h;
  271. outlink->time_base = mainlink->time_base;
  272. outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
  273. outlink->frame_rate = mainlink->frame_rate;
  274. if ((ret = ff_framesync_configure(&s->fs)) < 0)
  275. return ret;
  276. return 0;
  277. }
  278. static int activate(AVFilterContext *ctx)
  279. {
  280. LIBVMAFContext *s = ctx->priv;
  281. return ff_framesync_activate(&s->fs);
  282. }
  283. static av_cold void uninit(AVFilterContext *ctx)
  284. {
  285. LIBVMAFContext *s = ctx->priv;
  286. ff_framesync_uninit(&s->fs);
  287. pthread_mutex_lock(&s->lock);
  288. s->eof = 1;
  289. pthread_cond_signal(&s->cond);
  290. pthread_mutex_unlock(&s->lock);
  291. if (s->vmaf_thread_created)
  292. {
  293. pthread_join(s->vmaf_thread, NULL);
  294. s->vmaf_thread_created = 0;
  295. }
  296. av_frame_free(&s->gref);
  297. av_frame_free(&s->gmain);
  298. pthread_mutex_destroy(&s->lock);
  299. pthread_cond_destroy(&s->cond);
  300. }
  301. static const AVFilterPad libvmaf_inputs[] = {
  302. {
  303. .name = "main",
  304. .type = AVMEDIA_TYPE_VIDEO,
  305. },{
  306. .name = "reference",
  307. .type = AVMEDIA_TYPE_VIDEO,
  308. .config_props = config_input_ref,
  309. },
  310. { NULL }
  311. };
  312. static const AVFilterPad libvmaf_outputs[] = {
  313. {
  314. .name = "default",
  315. .type = AVMEDIA_TYPE_VIDEO,
  316. .config_props = config_output,
  317. },
  318. { NULL }
  319. };
  320. AVFilter ff_vf_libvmaf = {
  321. .name = "libvmaf",
  322. .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
  323. .preinit = libvmaf_framesync_preinit,
  324. .init = init,
  325. .uninit = uninit,
  326. .query_formats = query_formats,
  327. .activate = activate,
  328. .priv_size = sizeof(LIBVMAFContext),
  329. .priv_class = &libvmaf_class,
  330. .inputs = libvmaf_inputs,
  331. .outputs = libvmaf_outputs,
  332. };