You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

370 lines
13KB

  1. /*
  2. * Copyright (c) 2017 Ronald S. Bultje <rsbultje@gmail.com>
  3. * Copyright (c) 2017 Ashish Pratap Singh <ashk43712@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Calculate the VMAF between two input videos.
  24. */
  25. #include <pthread.h>
  26. #include <libvmaf.h>
  27. #include "libavutil/avstring.h"
  28. #include "libavutil/opt.h"
  29. #include "libavutil/pixdesc.h"
  30. #include "avfilter.h"
  31. #include "drawutils.h"
  32. #include "formats.h"
  33. #include "framesync.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. typedef struct LIBVMAFContext {
  37. const AVClass *class;
  38. FFFrameSync fs;
  39. const AVPixFmtDescriptor *desc;
  40. int width;
  41. int height;
  42. double vmaf_score;
  43. int vmaf_thread_created;
  44. pthread_t vmaf_thread;
  45. pthread_mutex_t lock;
  46. pthread_cond_t cond;
  47. int eof;
  48. AVFrame *gmain;
  49. AVFrame *gref;
  50. int frame_set;
  51. char *model_path;
  52. char *log_path;
  53. char *log_fmt;
  54. int disable_clip;
  55. int disable_avx;
  56. int enable_transform;
  57. int phone_model;
  58. int psnr;
  59. int ssim;
  60. int ms_ssim;
  61. char *pool;
  62. int error;
  63. } LIBVMAFContext;
  64. #define OFFSET(x) offsetof(LIBVMAFContext, x)
  65. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  66. static const AVOption libvmaf_options[] = {
  67. {"model_path", "Set the model to be used for computing vmaf.", OFFSET(model_path), AV_OPT_TYPE_STRING, {.str="/usr/local/share/model/vmaf_v0.6.1.pkl"}, 0, 1, FLAGS},
  68. {"log_path", "Set the file path to be used to store logs.", OFFSET(log_path), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  69. {"log_fmt", "Set the format of the log (xml or json).", OFFSET(log_fmt), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  70. {"enable_transform", "Enables transform for computing vmaf.", OFFSET(enable_transform), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  71. {"phone_model", "Invokes the phone model that will generate higher VMAF scores.", OFFSET(phone_model), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  72. {"psnr", "Enables computing psnr along with vmaf.", OFFSET(psnr), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  73. {"ssim", "Enables computing ssim along with vmaf.", OFFSET(ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  74. {"ms_ssim", "Enables computing ms-ssim along with vmaf.", OFFSET(ms_ssim), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
  75. {"pool", "Set the pool method to be used for computing vmaf.", OFFSET(pool), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, FLAGS},
  76. { NULL }
  77. };
  78. FRAMESYNC_DEFINE_CLASS(libvmaf, LIBVMAFContext, fs);
  79. #define read_frame_fn(type, bits) \
  80. static int read_frame_##bits##bit(float *ref_data, float *main_data, \
  81. float *temp_data, int stride, void *ctx) \
  82. { \
  83. LIBVMAFContext *s = (LIBVMAFContext *) ctx; \
  84. int ret; \
  85. \
  86. pthread_mutex_lock(&s->lock); \
  87. \
  88. while (!s->frame_set && !s->eof) { \
  89. pthread_cond_wait(&s->cond, &s->lock); \
  90. } \
  91. \
  92. if (s->frame_set) { \
  93. int ref_stride = s->gref->linesize[0]; \
  94. int main_stride = s->gmain->linesize[0]; \
  95. \
  96. const type *ref_ptr = (const type *) s->gref->data[0]; \
  97. const type *main_ptr = (const type *) s->gmain->data[0]; \
  98. \
  99. float *ptr = ref_data; \
  100. \
  101. int h = s->height; \
  102. int w = s->width; \
  103. \
  104. int i,j; \
  105. \
  106. for (i = 0; i < h; i++) { \
  107. for ( j = 0; j < w; j++) { \
  108. ptr[j] = (float)ref_ptr[j]; \
  109. } \
  110. ref_ptr += ref_stride / sizeof(*ref_ptr); \
  111. ptr += stride / sizeof(*ptr); \
  112. } \
  113. \
  114. ptr = main_data; \
  115. \
  116. for (i = 0; i < h; i++) { \
  117. for (j = 0; j < w; j++) { \
  118. ptr[j] = (float)main_ptr[j]; \
  119. } \
  120. main_ptr += main_stride / sizeof(*main_ptr); \
  121. ptr += stride / sizeof(*ptr); \
  122. } \
  123. } \
  124. \
  125. ret = !s->frame_set; \
  126. \
  127. av_frame_unref(s->gref); \
  128. av_frame_unref(s->gmain); \
  129. s->frame_set = 0; \
  130. \
  131. pthread_cond_signal(&s->cond); \
  132. pthread_mutex_unlock(&s->lock); \
  133. \
  134. if (ret) { \
  135. return 2; \
  136. } \
  137. \
  138. return 0; \
  139. }
  140. read_frame_fn(uint8_t, 8);
  141. read_frame_fn(uint16_t, 10);
  142. static void compute_vmaf_score(LIBVMAFContext *s)
  143. {
  144. int (*read_frame)(float *ref_data, float *main_data, float *temp_data,
  145. int stride, void *ctx);
  146. char *format;
  147. if (s->desc->comp[0].depth <= 8) {
  148. read_frame = read_frame_8bit;
  149. } else {
  150. read_frame = read_frame_10bit;
  151. }
  152. format = (char *) s->desc->name;
  153. s->error = compute_vmaf(&s->vmaf_score, format, s->width, s->height,
  154. read_frame, s, s->model_path, s->log_path,
  155. s->log_fmt, 0, 0, s->enable_transform,
  156. s->phone_model, s->psnr, s->ssim,
  157. s->ms_ssim, s->pool);
  158. }
  159. static void *call_vmaf(void *ctx)
  160. {
  161. LIBVMAFContext *s = (LIBVMAFContext *) ctx;
  162. compute_vmaf_score(s);
  163. if (!s->error) {
  164. av_log(ctx, AV_LOG_INFO, "VMAF score: %f\n",s->vmaf_score);
  165. } else {
  166. pthread_mutex_lock(&s->lock);
  167. pthread_cond_signal(&s->cond);
  168. pthread_mutex_unlock(&s->lock);
  169. }
  170. pthread_exit(NULL);
  171. return NULL;
  172. }
  173. static int do_vmaf(FFFrameSync *fs)
  174. {
  175. AVFilterContext *ctx = fs->parent;
  176. LIBVMAFContext *s = ctx->priv;
  177. AVFrame *master, *ref;
  178. int ret;
  179. ret = ff_framesync_dualinput_get(fs, &master, &ref);
  180. if (ret < 0)
  181. return ret;
  182. if (!ref)
  183. return ff_filter_frame(ctx->outputs[0], master);
  184. pthread_mutex_lock(&s->lock);
  185. while (s->frame_set && !s->error) {
  186. pthread_cond_wait(&s->cond, &s->lock);
  187. }
  188. if (s->error) {
  189. av_log(ctx, AV_LOG_ERROR,
  190. "libvmaf encountered an error, check log for details\n");
  191. pthread_mutex_unlock(&s->lock);
  192. return AVERROR(EINVAL);
  193. }
  194. av_frame_ref(s->gref, ref);
  195. av_frame_ref(s->gmain, master);
  196. s->frame_set = 1;
  197. pthread_cond_signal(&s->cond);
  198. pthread_mutex_unlock(&s->lock);
  199. return ff_filter_frame(ctx->outputs[0], master);
  200. }
  201. static av_cold int init(AVFilterContext *ctx)
  202. {
  203. LIBVMAFContext *s = ctx->priv;
  204. s->gref = av_frame_alloc();
  205. s->gmain = av_frame_alloc();
  206. s->error = 0;
  207. s->vmaf_thread_created = 0;
  208. pthread_mutex_init(&s->lock, NULL);
  209. pthread_cond_init (&s->cond, NULL);
  210. s->fs.on_event = do_vmaf;
  211. return 0;
  212. }
  213. static int query_formats(AVFilterContext *ctx)
  214. {
  215. static const enum AVPixelFormat pix_fmts[] = {
  216. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
  217. AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE,
  218. AV_PIX_FMT_NONE
  219. };
  220. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  221. if (!fmts_list)
  222. return AVERROR(ENOMEM);
  223. return ff_set_common_formats(ctx, fmts_list);
  224. }
  225. static int config_input_ref(AVFilterLink *inlink)
  226. {
  227. AVFilterContext *ctx = inlink->dst;
  228. LIBVMAFContext *s = ctx->priv;
  229. int th;
  230. if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
  231. ctx->inputs[0]->h != ctx->inputs[1]->h) {
  232. av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
  233. return AVERROR(EINVAL);
  234. }
  235. if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
  236. av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
  237. return AVERROR(EINVAL);
  238. }
  239. s->desc = av_pix_fmt_desc_get(inlink->format);
  240. s->width = ctx->inputs[0]->w;
  241. s->height = ctx->inputs[0]->h;
  242. th = pthread_create(&s->vmaf_thread, NULL, call_vmaf, (void *) s);
  243. if (th) {
  244. av_log(ctx, AV_LOG_ERROR, "Thread creation failed.\n");
  245. return AVERROR(EINVAL);
  246. }
  247. s->vmaf_thread_created = 1;
  248. return 0;
  249. }
  250. static int config_output(AVFilterLink *outlink)
  251. {
  252. AVFilterContext *ctx = outlink->src;
  253. LIBVMAFContext *s = ctx->priv;
  254. AVFilterLink *mainlink = ctx->inputs[0];
  255. int ret;
  256. ret = ff_framesync_init_dualinput(&s->fs, ctx);
  257. if (ret < 0)
  258. return ret;
  259. outlink->w = mainlink->w;
  260. outlink->h = mainlink->h;
  261. outlink->time_base = mainlink->time_base;
  262. outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
  263. outlink->frame_rate = mainlink->frame_rate;
  264. if ((ret = ff_framesync_configure(&s->fs)) < 0)
  265. return ret;
  266. return 0;
  267. }
  268. static int activate(AVFilterContext *ctx)
  269. {
  270. LIBVMAFContext *s = ctx->priv;
  271. return ff_framesync_activate(&s->fs);
  272. }
  273. static av_cold void uninit(AVFilterContext *ctx)
  274. {
  275. LIBVMAFContext *s = ctx->priv;
  276. ff_framesync_uninit(&s->fs);
  277. pthread_mutex_lock(&s->lock);
  278. s->eof = 1;
  279. pthread_cond_signal(&s->cond);
  280. pthread_mutex_unlock(&s->lock);
  281. if (s->vmaf_thread_created)
  282. {
  283. pthread_join(s->vmaf_thread, NULL);
  284. s->vmaf_thread_created = 0;
  285. }
  286. av_frame_free(&s->gref);
  287. av_frame_free(&s->gmain);
  288. pthread_mutex_destroy(&s->lock);
  289. pthread_cond_destroy(&s->cond);
  290. }
  291. static const AVFilterPad libvmaf_inputs[] = {
  292. {
  293. .name = "main",
  294. .type = AVMEDIA_TYPE_VIDEO,
  295. },{
  296. .name = "reference",
  297. .type = AVMEDIA_TYPE_VIDEO,
  298. .config_props = config_input_ref,
  299. },
  300. { NULL }
  301. };
  302. static const AVFilterPad libvmaf_outputs[] = {
  303. {
  304. .name = "default",
  305. .type = AVMEDIA_TYPE_VIDEO,
  306. .config_props = config_output,
  307. },
  308. { NULL }
  309. };
  310. AVFilter ff_vf_libvmaf = {
  311. .name = "libvmaf",
  312. .description = NULL_IF_CONFIG_SMALL("Calculate the VMAF between two video streams."),
  313. .preinit = libvmaf_framesync_preinit,
  314. .init = init,
  315. .uninit = uninit,
  316. .query_formats = query_formats,
  317. .activate = activate,
  318. .priv_size = sizeof(LIBVMAFContext),
  319. .priv_class = &libvmaf_class,
  320. .inputs = libvmaf_inputs,
  321. .outputs = libvmaf_outputs,
  322. };