You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

414 lines
14KB

  1. /*
  2. * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
  3. * Copyright (c) 2010 Baptiste Coudurier
  4. * Copyright (c) 2012 Loren Merritt
  5. *
  6. * This file is part of FFmpeg, ported from MPlayer.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. */
  22. /**
  23. * @file
  24. * high quality 3d video denoiser, ported from MPlayer
  25. * libmpcodecs/vf_hqdn3d.c.
  26. */
  27. #include <float.h>
  28. #include "config.h"
  29. #include "libavutil/attributes.h"
  30. #include "libavutil/common.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "libavutil/intreadwrite.h"
  33. #include "libavutil/opt.h"
  34. #include "avfilter.h"
  35. #include "formats.h"
  36. #include "internal.h"
  37. #include "video.h"
  38. #include "vf_hqdn3d.h"
  39. #define LUT_BITS (depth==16 ? 8 : 4)
  40. #define LOAD(x) (((depth == 8 ? src[x] : AV_RN16A(src + (x) * 2)) << (16 - depth))\
  41. + (((1 << (16 - depth)) - 1) >> 1))
  42. #define STORE(x,val) (depth == 8 ? dst[x] = (val) >> (16 - depth) : \
  43. AV_WN16A(dst + (x) * 2, (val) >> (16 - depth)))
  44. av_always_inline
  45. static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth)
  46. {
  47. int d = (prev - cur) >> (8 - LUT_BITS);
  48. return cur + coef[d];
  49. }
  50. av_always_inline
  51. static void denoise_temporal(uint8_t *src, uint8_t *dst,
  52. uint16_t *frame_ant,
  53. int w, int h, int sstride, int dstride,
  54. int16_t *temporal, int depth)
  55. {
  56. long x, y;
  57. uint32_t tmp;
  58. temporal += 256 << LUT_BITS;
  59. for (y = 0; y < h; y++) {
  60. for (x = 0; x < w; x++) {
  61. frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal, depth);
  62. STORE(x, tmp);
  63. }
  64. src += sstride;
  65. dst += dstride;
  66. frame_ant += w;
  67. }
  68. }
  69. av_always_inline
  70. static void denoise_spatial(HQDN3DContext *s,
  71. uint8_t *src, uint8_t *dst,
  72. uint16_t *line_ant, uint16_t *frame_ant,
  73. int w, int h, int sstride, int dstride,
  74. int16_t *spatial, int16_t *temporal, int depth)
  75. {
  76. long x, y;
  77. uint32_t pixel_ant;
  78. uint32_t tmp;
  79. spatial += 256 << LUT_BITS;
  80. temporal += 256 << LUT_BITS;
  81. /* First line has no top neighbor. Only left one for each tmp and
  82. * last frame */
  83. pixel_ant = LOAD(0);
  84. for (x = 0; x < w; x++) {
  85. line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial, depth);
  86. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
  87. STORE(x, tmp);
  88. }
  89. for (y = 1; y < h; y++) {
  90. src += sstride;
  91. dst += dstride;
  92. frame_ant += w;
  93. if (s->denoise_row[depth]) {
  94. s->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
  95. continue;
  96. }
  97. pixel_ant = LOAD(0);
  98. for (x = 0; x < w-1; x++) {
  99. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
  100. pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial, depth);
  101. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
  102. STORE(x, tmp);
  103. }
  104. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
  105. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
  106. STORE(x, tmp);
  107. }
  108. }
  109. av_always_inline
  110. static int denoise_depth(HQDN3DContext *s,
  111. uint8_t *src, uint8_t *dst,
  112. uint16_t *line_ant, uint16_t **frame_ant_ptr,
  113. int w, int h, int sstride, int dstride,
  114. int16_t *spatial, int16_t *temporal, int depth)
  115. {
  116. // FIXME: For 16-bit depth, frame_ant could be a pointer to the previous
  117. // filtered frame rather than a separate buffer.
  118. long x, y;
  119. uint16_t *frame_ant = *frame_ant_ptr;
  120. if (!frame_ant) {
  121. uint8_t *frame_src = src;
  122. *frame_ant_ptr = frame_ant = av_malloc_array(w, h*sizeof(uint16_t));
  123. if (!frame_ant)
  124. return AVERROR(ENOMEM);
  125. for (y = 0; y < h; y++, src += sstride, frame_ant += w)
  126. for (x = 0; x < w; x++)
  127. frame_ant[x] = LOAD(x);
  128. src = frame_src;
  129. frame_ant = *frame_ant_ptr;
  130. }
  131. if (spatial[0])
  132. denoise_spatial(s, src, dst, line_ant, frame_ant,
  133. w, h, sstride, dstride, spatial, temporal, depth);
  134. else
  135. denoise_temporal(src, dst, frame_ant,
  136. w, h, sstride, dstride, temporal, depth);
  137. emms_c();
  138. return 0;
  139. }
  140. #define denoise(...) \
  141. do { \
  142. int ret = AVERROR_BUG; \
  143. switch (s->depth) { \
  144. case 8: ret = denoise_depth(__VA_ARGS__, 8); break; \
  145. case 9: ret = denoise_depth(__VA_ARGS__, 9); break; \
  146. case 10: ret = denoise_depth(__VA_ARGS__, 10); break; \
  147. case 12: ret = denoise_depth(__VA_ARGS__, 12); break; \
  148. case 14: ret = denoise_depth(__VA_ARGS__, 14); break; \
  149. case 16: ret = denoise_depth(__VA_ARGS__, 16); break; \
  150. } \
  151. if (ret < 0) { \
  152. av_frame_free(&out); \
  153. if (!direct) \
  154. av_frame_free(&in); \
  155. return ret; \
  156. } \
  157. } while (0)
  158. static void precalc_coefs(double dist25, int depth, int16_t *ct)
  159. {
  160. int i;
  161. double gamma, simil, C;
  162. gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
  163. for (i = -256<<LUT_BITS; i < 256<<LUT_BITS; i++) {
  164. double f = ((i<<(9-LUT_BITS)) + (1<<(8-LUT_BITS)) - 1) / 512.0; // midpoint of the bin
  165. simil = FFMAX(0, 1.0 - fabs(f) / 255.0);
  166. C = pow(simil, gamma) * 256.0 * f;
  167. ct[(256<<LUT_BITS)+i] = lrint(C);
  168. }
  169. ct[0] = !!dist25;
  170. }
  171. #define PARAM1_DEFAULT 4.0
  172. #define PARAM2_DEFAULT 3.0
  173. #define PARAM3_DEFAULT 6.0
  174. static av_cold int init(AVFilterContext *ctx)
  175. {
  176. HQDN3DContext *s = ctx->priv;
  177. if (!s->strength[LUMA_SPATIAL])
  178. s->strength[LUMA_SPATIAL] = PARAM1_DEFAULT;
  179. if (!s->strength[CHROMA_SPATIAL])
  180. s->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
  181. if (!s->strength[LUMA_TMP])
  182. s->strength[LUMA_TMP] = PARAM3_DEFAULT * s->strength[LUMA_SPATIAL] / PARAM1_DEFAULT;
  183. if (!s->strength[CHROMA_TMP])
  184. s->strength[CHROMA_TMP] = s->strength[LUMA_TMP] * s->strength[CHROMA_SPATIAL] / s->strength[LUMA_SPATIAL];
  185. av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n",
  186. s->strength[LUMA_SPATIAL], s->strength[CHROMA_SPATIAL],
  187. s->strength[LUMA_TMP], s->strength[CHROMA_TMP]);
  188. return 0;
  189. }
  190. static av_cold void uninit(AVFilterContext *ctx)
  191. {
  192. HQDN3DContext *s = ctx->priv;
  193. av_freep(&s->coefs[0]);
  194. av_freep(&s->coefs[1]);
  195. av_freep(&s->coefs[2]);
  196. av_freep(&s->coefs[3]);
  197. av_freep(&s->line[0]);
  198. av_freep(&s->line[1]);
  199. av_freep(&s->line[2]);
  200. av_freep(&s->frame_prev[0]);
  201. av_freep(&s->frame_prev[1]);
  202. av_freep(&s->frame_prev[2]);
  203. }
  204. static int query_formats(AVFilterContext *ctx)
  205. {
  206. static const enum AVPixelFormat pix_fmts[] = {
  207. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  208. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P,
  209. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  210. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  211. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  212. AV_PIX_FMT_YUV440P10,
  213. AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
  214. AV_PIX_FMT_YUV440P12,
  215. AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
  216. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  217. AV_PIX_FMT_NONE
  218. };
  219. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  220. if (!fmts_list)
  221. return AVERROR(ENOMEM);
  222. return ff_set_common_formats(ctx, fmts_list);
  223. }
  224. static void calc_coefs(AVFilterContext *ctx)
  225. {
  226. HQDN3DContext *s = ctx->priv;
  227. for (int i = 0; i < 4; i++)
  228. precalc_coefs(s->strength[i], s->depth, s->coefs[i]);
  229. }
  230. static int config_input(AVFilterLink *inlink)
  231. {
  232. AVFilterContext *ctx = inlink->dst;
  233. HQDN3DContext *s = inlink->dst->priv;
  234. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  235. int i, depth;
  236. uninit(inlink->dst);
  237. s->hsub = desc->log2_chroma_w;
  238. s->vsub = desc->log2_chroma_h;
  239. s->depth = depth = desc->comp[0].depth;
  240. for (i = 0; i < 3; i++) {
  241. s->line[i] = av_malloc_array(inlink->w, sizeof(*s->line[i]));
  242. if (!s->line[i])
  243. return AVERROR(ENOMEM);
  244. }
  245. for (i = 0; i < 4; i++) {
  246. s->coefs[i] = av_malloc((512<<LUT_BITS) * sizeof(int16_t));
  247. if (!s->coefs[i])
  248. return AVERROR(ENOMEM);
  249. }
  250. calc_coefs(ctx);
  251. if (ARCH_X86)
  252. ff_hqdn3d_init_x86(s);
  253. return 0;
  254. }
  255. typedef struct ThreadData {
  256. AVFrame *in, *out;
  257. int direct;
  258. } ThreadData;
  259. static int do_denoise(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
  260. {
  261. HQDN3DContext *s = ctx->priv;
  262. const ThreadData *td = data;
  263. AVFrame *out = td->out;
  264. AVFrame *in = td->in;
  265. int direct = td->direct;
  266. denoise(s, in->data[job_nr], out->data[job_nr],
  267. s->line[job_nr], &s->frame_prev[job_nr],
  268. AV_CEIL_RSHIFT(in->width, (!!job_nr * s->hsub)),
  269. AV_CEIL_RSHIFT(in->height, (!!job_nr * s->vsub)),
  270. in->linesize[job_nr], out->linesize[job_nr],
  271. s->coefs[job_nr ? CHROMA_SPATIAL : LUMA_SPATIAL],
  272. s->coefs[job_nr ? CHROMA_TMP : LUMA_TMP]);
  273. return 0;
  274. }
  275. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  276. {
  277. AVFilterContext *ctx = inlink->dst;
  278. AVFilterLink *outlink = ctx->outputs[0];
  279. AVFrame *out;
  280. int direct = av_frame_is_writable(in) && !ctx->is_disabled;
  281. ThreadData td;
  282. if (direct) {
  283. out = in;
  284. } else {
  285. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  286. if (!out) {
  287. av_frame_free(&in);
  288. return AVERROR(ENOMEM);
  289. }
  290. av_frame_copy_props(out, in);
  291. }
  292. td.in = in;
  293. td.out = out;
  294. td.direct = direct;
  295. /* one thread per plane */
  296. ctx->internal->execute(ctx, do_denoise, &td, NULL, 3);
  297. if (ctx->is_disabled) {
  298. av_frame_free(&out);
  299. return ff_filter_frame(outlink, in);
  300. }
  301. if (!direct)
  302. av_frame_free(&in);
  303. return ff_filter_frame(outlink, out);
  304. }
  305. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  306. char *res, int res_len, int flags)
  307. {
  308. int ret;
  309. ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
  310. if (ret < 0)
  311. return ret;
  312. calc_coefs(ctx);
  313. return 0;
  314. }
  315. #define OFFSET(x) offsetof(HQDN3DContext, x)
  316. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
  317. static const AVOption hqdn3d_options[] = {
  318. { "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
  319. { "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
  320. { "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
  321. { "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
  322. { NULL }
  323. };
  324. AVFILTER_DEFINE_CLASS(hqdn3d);
  325. static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = {
  326. {
  327. .name = "default",
  328. .type = AVMEDIA_TYPE_VIDEO,
  329. .config_props = config_input,
  330. .filter_frame = filter_frame,
  331. },
  332. { NULL }
  333. };
  334. static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
  335. {
  336. .name = "default",
  337. .type = AVMEDIA_TYPE_VIDEO
  338. },
  339. { NULL }
  340. };
  341. AVFilter ff_vf_hqdn3d = {
  342. .name = "hqdn3d",
  343. .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
  344. .priv_size = sizeof(HQDN3DContext),
  345. .priv_class = &hqdn3d_class,
  346. .init = init,
  347. .uninit = uninit,
  348. .query_formats = query_formats,
  349. .inputs = avfilter_vf_hqdn3d_inputs,
  350. .outputs = avfilter_vf_hqdn3d_outputs,
  351. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  352. .process_command = process_command,
  353. };