You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

331 lines
10KB

  1. /*
  2. * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
  3. * Copyright (c) 2010 Baptiste Coudurier
  4. * Copyright (c) 2012 Loren Merritt
  5. *
  6. * This file is part of FFmpeg, ported from MPlayer.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. */
  22. /**
  23. * @file
  24. * high quality 3d video denoiser, ported from MPlayer
  25. * libmpcodecs/vf_hqdn3d.c.
  26. */
  27. #include "libavutil/common.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. typedef struct {
  35. int16_t coefs[4][512*16];
  36. uint16_t *line;
  37. uint16_t *frame_prev[3];
  38. int hsub, vsub;
  39. int depth;
  40. } HQDN3DContext;
  41. #define RIGHTSHIFT(a,b) (((a)+(((1<<(b))-1)>>1))>>(b))
  42. #define LOAD(x) ((depth==8 ? src[x] : AV_RN16A(src+(x)*2)) << (16-depth))
  43. #define STORE(x,val) (depth==8 ? dst[x] = RIGHTSHIFT(val, 16-depth)\
  44. : AV_WN16A(dst+(x)*2, RIGHTSHIFT(val, 16-depth)))
  45. static inline uint32_t lowpass(int prev, int cur, int16_t *coef)
  46. {
  47. int d = (prev-cur)>>4;
  48. return cur + coef[d];
  49. }
  50. av_always_inline
  51. static void denoise_temporal(uint8_t *src, uint8_t *dst,
  52. uint16_t *frame_ant,
  53. int w, int h, int sstride, int dstride,
  54. int16_t *temporal, int depth)
  55. {
  56. long x, y;
  57. uint32_t tmp;
  58. temporal += 0x1000;
  59. for (y = 0; y < h; y++) {
  60. for (x = 0; x < w; x++) {
  61. frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal);
  62. STORE(x, tmp);
  63. }
  64. src += sstride;
  65. dst += dstride;
  66. frame_ant += w;
  67. }
  68. }
  69. av_always_inline
  70. static void denoise_spatial(uint8_t *src, uint8_t *dst,
  71. uint16_t *line_ant, uint16_t *frame_ant,
  72. int w, int h, int sstride, int dstride,
  73. int16_t *spatial, int16_t *temporal, int depth)
  74. {
  75. long x, y;
  76. uint32_t pixel_ant;
  77. uint32_t tmp;
  78. spatial += 0x1000;
  79. temporal += 0x1000;
  80. /* First line has no top neighbor. Only left one for each tmp and
  81. * last frame */
  82. pixel_ant = LOAD(0);
  83. for (x = 0; x < w; x++) {
  84. line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial);
  85. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  86. STORE(x, tmp);
  87. }
  88. for (y = 1; y < h; y++) {
  89. src += sstride;
  90. dst += dstride;
  91. frame_ant += w;
  92. pixel_ant = LOAD(0);
  93. for (x = 0; x < w-1; x++) {
  94. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial);
  95. pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial);
  96. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  97. STORE(x, tmp);
  98. }
  99. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial);
  100. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  101. STORE(x, tmp);
  102. }
  103. }
  104. av_always_inline
  105. static void denoise_depth(uint8_t *src, uint8_t *dst,
  106. uint16_t *line_ant, uint16_t **frame_ant_ptr,
  107. int w, int h, int sstride, int dstride,
  108. int16_t *spatial, int16_t *temporal, int depth)
  109. {
  110. long x, y;
  111. uint16_t *frame_ant = *frame_ant_ptr;
  112. if (!frame_ant) {
  113. uint8_t *frame_src = src;
  114. *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
  115. for (y = 0; y < h; y++, src += sstride, frame_ant += w)
  116. for (x = 0; x < w; x++)
  117. frame_ant[x] = LOAD(x);
  118. src = frame_src;
  119. frame_ant = *frame_ant_ptr;
  120. }
  121. if (spatial[0])
  122. denoise_spatial(src, dst, line_ant, frame_ant,
  123. w, h, sstride, dstride, spatial, temporal, depth);
  124. else
  125. denoise_temporal(src, dst, frame_ant,
  126. w, h, sstride, dstride, temporal, depth);
  127. }
  128. #define denoise(...) \
  129. switch (hqdn3d->depth) {\
  130. case 8: denoise_depth(__VA_ARGS__, 8); break;\
  131. case 9: denoise_depth(__VA_ARGS__, 9); break;\
  132. case 10: denoise_depth(__VA_ARGS__, 10); break;\
  133. }
  134. static void precalc_coefs(int16_t *ct, double dist25)
  135. {
  136. int i;
  137. double gamma, simil, C;
  138. gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
  139. for (i = -255*16; i <= 255*16; i++) {
  140. // lowpass() truncates (not rounds) the diff, so +15/32 for the midpoint of the bin.
  141. double f = (i + 15.0/32.0) / 16.0;
  142. simil = 1.0 - FFABS(f) / 255.0;
  143. C = pow(simil, gamma) * 256.0 * f;
  144. ct[16*256+i] = lrint(C);
  145. }
  146. ct[0] = !!dist25;
  147. }
  148. #define PARAM1_DEFAULT 4.0
  149. #define PARAM2_DEFAULT 3.0
  150. #define PARAM3_DEFAULT 6.0
  151. static int init(AVFilterContext *ctx, const char *args)
  152. {
  153. HQDN3DContext *hqdn3d = ctx->priv;
  154. double lum_spac, lum_tmp, chrom_spac, chrom_tmp;
  155. double param1, param2, param3, param4;
  156. lum_spac = PARAM1_DEFAULT;
  157. chrom_spac = PARAM2_DEFAULT;
  158. lum_tmp = PARAM3_DEFAULT;
  159. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  160. if (args) {
  161. switch (sscanf(args, "%lf:%lf:%lf:%lf",
  162. &param1, &param2, &param3, &param4)) {
  163. case 1:
  164. lum_spac = param1;
  165. chrom_spac = PARAM2_DEFAULT * param1 / PARAM1_DEFAULT;
  166. lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
  167. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  168. break;
  169. case 2:
  170. lum_spac = param1;
  171. chrom_spac = param2;
  172. lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
  173. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  174. break;
  175. case 3:
  176. lum_spac = param1;
  177. chrom_spac = param2;
  178. lum_tmp = param3;
  179. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  180. break;
  181. case 4:
  182. lum_spac = param1;
  183. chrom_spac = param2;
  184. lum_tmp = param3;
  185. chrom_tmp = param4;
  186. break;
  187. }
  188. }
  189. av_log(ctx, AV_LOG_VERBOSE, "ls:%lf cs:%lf lt:%lf ct:%lf\n",
  190. lum_spac, chrom_spac, lum_tmp, chrom_tmp);
  191. if (lum_spac < 0 || chrom_spac < 0 || isnan(chrom_tmp)) {
  192. av_log(ctx, AV_LOG_ERROR,
  193. "Invalid negative value for luma or chroma spatial strength, "
  194. "or resulting value for chroma temporal strength is nan.\n");
  195. return AVERROR(EINVAL);
  196. }
  197. precalc_coefs(hqdn3d->coefs[0], lum_spac);
  198. precalc_coefs(hqdn3d->coefs[1], lum_tmp);
  199. precalc_coefs(hqdn3d->coefs[2], chrom_spac);
  200. precalc_coefs(hqdn3d->coefs[3], chrom_tmp);
  201. return 0;
  202. }
  203. static void uninit(AVFilterContext *ctx)
  204. {
  205. HQDN3DContext *hqdn3d = ctx->priv;
  206. av_freep(&hqdn3d->line);
  207. av_freep(&hqdn3d->frame_prev[0]);
  208. av_freep(&hqdn3d->frame_prev[1]);
  209. av_freep(&hqdn3d->frame_prev[2]);
  210. }
  211. static int query_formats(AVFilterContext *ctx)
  212. {
  213. static const enum PixelFormat pix_fmts[] = {
  214. PIX_FMT_YUV420P,
  215. PIX_FMT_YUV422P,
  216. PIX_FMT_YUV444P,
  217. PIX_FMT_YUV410P,
  218. PIX_FMT_YUV411P,
  219. PIX_FMT_YUV440P,
  220. PIX_FMT_YUVJ420P,
  221. PIX_FMT_YUVJ422P,
  222. PIX_FMT_YUVJ444P,
  223. PIX_FMT_YUVJ440P,
  224. AV_NE( PIX_FMT_YUV420P9BE, PIX_FMT_YUV420P9LE ),
  225. AV_NE( PIX_FMT_YUV422P9BE, PIX_FMT_YUV422P9LE ),
  226. AV_NE( PIX_FMT_YUV444P9BE, PIX_FMT_YUV444P9LE ),
  227. AV_NE( PIX_FMT_YUV420P10BE, PIX_FMT_YUV420P10LE ),
  228. AV_NE( PIX_FMT_YUV422P10BE, PIX_FMT_YUV422P10LE ),
  229. AV_NE( PIX_FMT_YUV444P10BE, PIX_FMT_YUV444P10LE ),
  230. PIX_FMT_NONE
  231. };
  232. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  233. return 0;
  234. }
  235. static int config_input(AVFilterLink *inlink)
  236. {
  237. HQDN3DContext *hqdn3d = inlink->dst->priv;
  238. hqdn3d->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
  239. hqdn3d->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
  240. hqdn3d->depth = av_pix_fmt_descriptors[inlink->format].comp[0].depth_minus1+1;
  241. hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
  242. if (!hqdn3d->line)
  243. return AVERROR(ENOMEM);
  244. return 0;
  245. }
  246. static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
  247. {
  248. return 0;
  249. }
  250. static int end_frame(AVFilterLink *inlink)
  251. {
  252. HQDN3DContext *hqdn3d = inlink->dst->priv;
  253. AVFilterLink *outlink = inlink->dst->outputs[0];
  254. AVFilterBufferRef *inpic = inlink ->cur_buf;
  255. AVFilterBufferRef *outpic = outlink->out_buf;
  256. int ret, c;
  257. for (c = 0; c < 3; c++) {
  258. denoise(inpic->data[c], outpic->data[c],
  259. hqdn3d->line, &hqdn3d->frame_prev[c],
  260. inpic->video->w >> (!!c * hqdn3d->hsub),
  261. inpic->video->h >> (!!c * hqdn3d->vsub),
  262. inpic->linesize[c], outpic->linesize[c],
  263. hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
  264. }
  265. if ((ret = ff_draw_slice(outlink, 0, inpic->video->h, 1)) < 0 ||
  266. (ret = ff_end_frame(outlink)) < 0)
  267. return ret;
  268. return 0;
  269. }
  270. AVFilter avfilter_vf_hqdn3d = {
  271. .name = "hqdn3d",
  272. .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
  273. .priv_size = sizeof(HQDN3DContext),
  274. .init = init,
  275. .uninit = uninit,
  276. .query_formats = query_formats,
  277. .inputs = (const AVFilterPad[]) {{ .name = "default",
  278. .type = AVMEDIA_TYPE_VIDEO,
  279. .start_frame = ff_inplace_start_frame,
  280. .draw_slice = null_draw_slice,
  281. .config_props = config_input,
  282. .end_frame = end_frame },
  283. { .name = NULL}},
  284. .outputs = (const AVFilterPad[]) {{ .name = "default",
  285. .type = AVMEDIA_TYPE_VIDEO },
  286. { .name = NULL}},
  287. };