You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

330 lines
10KB

  1. /*
  2. * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
  3. * Copyright (c) 2010 Baptiste Coudurier
  4. * Copyright (c) 2012 Loren Merritt
  5. *
  6. * This file is part of FFmpeg, ported from MPlayer.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. */
  22. /**
  23. * @file
  24. * high quality 3d video denoiser, ported from MPlayer
  25. * libmpcodecs/vf_hqdn3d.c.
  26. */
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. typedef struct {
  34. int16_t coefs[4][512*16];
  35. uint16_t *line;
  36. uint16_t *frame_prev[3];
  37. int hsub, vsub;
  38. int depth;
  39. } HQDN3DContext;
  40. #define RIGHTSHIFT(a,b) (((a)+(((1<<(b))-1)>>1))>>(b))
  41. #define LOAD(x) ((depth==8 ? src[x] : AV_RN16A(src+(x)*2)) << (16-depth))
  42. #define STORE(x,val) (depth==8 ? dst[x] = RIGHTSHIFT(val, 16-depth)\
  43. : AV_WN16A(dst+(x)*2, RIGHTSHIFT(val, 16-depth)))
  44. static inline uint32_t lowpass(int prev, int cur, int16_t *coef)
  45. {
  46. int d = (prev-cur)>>4;
  47. return cur + coef[d];
  48. }
  49. av_always_inline
  50. static void denoise_temporal(uint8_t *src, uint8_t *dst,
  51. uint16_t *frame_ant,
  52. int w, int h, int sstride, int dstride,
  53. int16_t *temporal, int depth)
  54. {
  55. long x, y;
  56. uint32_t tmp;
  57. temporal += 0x1000;
  58. for (y = 0; y < h; y++) {
  59. for (x = 0; x < w; x++) {
  60. frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal);
  61. STORE(x, tmp);
  62. }
  63. src += sstride;
  64. dst += dstride;
  65. frame_ant += w;
  66. }
  67. }
  68. av_always_inline
  69. static void denoise_spatial(uint8_t *src, uint8_t *dst,
  70. uint16_t *line_ant, uint16_t *frame_ant,
  71. int w, int h, int sstride, int dstride,
  72. int16_t *spatial, int16_t *temporal, int depth)
  73. {
  74. long x, y;
  75. uint32_t pixel_ant;
  76. uint32_t tmp;
  77. spatial += 0x1000;
  78. temporal += 0x1000;
  79. /* First line has no top neighbor. Only left one for each tmp and
  80. * last frame */
  81. pixel_ant = LOAD(0);
  82. for (x = 0; x < w; x++) {
  83. line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial);
  84. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  85. STORE(x, tmp);
  86. }
  87. for (y = 1; y < h; y++) {
  88. src += sstride;
  89. dst += dstride;
  90. frame_ant += w;
  91. pixel_ant = LOAD(0);
  92. for (x = 0; x < w-1; x++) {
  93. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial);
  94. pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial);
  95. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  96. STORE(x, tmp);
  97. }
  98. line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial);
  99. frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal);
  100. STORE(x, tmp);
  101. }
  102. }
  103. av_always_inline
  104. static void denoise_depth(uint8_t *src, uint8_t *dst,
  105. uint16_t *line_ant, uint16_t **frame_ant_ptr,
  106. int w, int h, int sstride, int dstride,
  107. int16_t *spatial, int16_t *temporal, int depth)
  108. {
  109. long x, y;
  110. uint16_t *frame_ant = *frame_ant_ptr;
  111. if (!frame_ant) {
  112. uint8_t *frame_src = src;
  113. *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
  114. for (y = 0; y < h; y++, src += sstride, frame_ant += w)
  115. for (x = 0; x < w; x++)
  116. frame_ant[x] = LOAD(x);
  117. src = frame_src;
  118. frame_ant = *frame_ant_ptr;
  119. }
  120. if (spatial[0])
  121. denoise_spatial(src, dst, line_ant, frame_ant,
  122. w, h, sstride, dstride, spatial, temporal, depth);
  123. else
  124. denoise_temporal(src, dst, frame_ant,
  125. w, h, sstride, dstride, temporal, depth);
  126. }
  127. #define denoise(...) \
  128. switch (hqdn3d->depth) {\
  129. case 8: denoise_depth(__VA_ARGS__, 8); break;\
  130. case 9: denoise_depth(__VA_ARGS__, 9); break;\
  131. case 10: denoise_depth(__VA_ARGS__, 10); break;\
  132. }
  133. static void precalc_coefs(int16_t *ct, double dist25)
  134. {
  135. int i;
  136. double gamma, simil, C;
  137. gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
  138. for (i = -255*16; i <= 255*16; i++) {
  139. // lowpass() truncates (not rounds) the diff, so +15/32 for the midpoint of the bin.
  140. double f = (i + 15.0/32.0) / 16.0;
  141. simil = 1.0 - FFABS(f) / 255.0;
  142. C = pow(simil, gamma) * 256.0 * f;
  143. ct[16*256+i] = lrint(C);
  144. }
  145. ct[0] = !!dist25;
  146. }
  147. #define PARAM1_DEFAULT 4.0
  148. #define PARAM2_DEFAULT 3.0
  149. #define PARAM3_DEFAULT 6.0
  150. static int init(AVFilterContext *ctx, const char *args)
  151. {
  152. HQDN3DContext *hqdn3d = ctx->priv;
  153. double lum_spac, lum_tmp, chrom_spac, chrom_tmp;
  154. double param1, param2, param3, param4;
  155. lum_spac = PARAM1_DEFAULT;
  156. chrom_spac = PARAM2_DEFAULT;
  157. lum_tmp = PARAM3_DEFAULT;
  158. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  159. if (args) {
  160. switch (sscanf(args, "%lf:%lf:%lf:%lf",
  161. &param1, &param2, &param3, &param4)) {
  162. case 1:
  163. lum_spac = param1;
  164. chrom_spac = PARAM2_DEFAULT * param1 / PARAM1_DEFAULT;
  165. lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
  166. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  167. break;
  168. case 2:
  169. lum_spac = param1;
  170. chrom_spac = param2;
  171. lum_tmp = PARAM3_DEFAULT * param1 / PARAM1_DEFAULT;
  172. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  173. break;
  174. case 3:
  175. lum_spac = param1;
  176. chrom_spac = param2;
  177. lum_tmp = param3;
  178. chrom_tmp = lum_tmp * chrom_spac / lum_spac;
  179. break;
  180. case 4:
  181. lum_spac = param1;
  182. chrom_spac = param2;
  183. lum_tmp = param3;
  184. chrom_tmp = param4;
  185. break;
  186. }
  187. }
  188. av_log(ctx, AV_LOG_VERBOSE, "ls:%lf cs:%lf lt:%lf ct:%lf\n",
  189. lum_spac, chrom_spac, lum_tmp, chrom_tmp);
  190. if (lum_spac < 0 || chrom_spac < 0 || isnan(chrom_tmp)) {
  191. av_log(ctx, AV_LOG_ERROR,
  192. "Invalid negative value for luma or chroma spatial strength, "
  193. "or resulting value for chroma temporal strength is nan.\n");
  194. return AVERROR(EINVAL);
  195. }
  196. precalc_coefs(hqdn3d->coefs[0], lum_spac);
  197. precalc_coefs(hqdn3d->coefs[1], lum_tmp);
  198. precalc_coefs(hqdn3d->coefs[2], chrom_spac);
  199. precalc_coefs(hqdn3d->coefs[3], chrom_tmp);
  200. return 0;
  201. }
  202. static void uninit(AVFilterContext *ctx)
  203. {
  204. HQDN3DContext *hqdn3d = ctx->priv;
  205. av_freep(&hqdn3d->line);
  206. av_freep(&hqdn3d->frame_prev[0]);
  207. av_freep(&hqdn3d->frame_prev[1]);
  208. av_freep(&hqdn3d->frame_prev[2]);
  209. }
  210. static int query_formats(AVFilterContext *ctx)
  211. {
  212. static const enum PixelFormat pix_fmts[] = {
  213. PIX_FMT_YUV420P,
  214. PIX_FMT_YUV422P,
  215. PIX_FMT_YUV444P,
  216. PIX_FMT_YUV410P,
  217. PIX_FMT_YUV411P,
  218. PIX_FMT_YUV440P,
  219. PIX_FMT_YUVJ420P,
  220. PIX_FMT_YUVJ422P,
  221. PIX_FMT_YUVJ444P,
  222. PIX_FMT_YUVJ440P,
  223. AV_NE( PIX_FMT_YUV420P9BE, PIX_FMT_YUV420P9LE ),
  224. AV_NE( PIX_FMT_YUV422P9BE, PIX_FMT_YUV422P9LE ),
  225. AV_NE( PIX_FMT_YUV444P9BE, PIX_FMT_YUV444P9LE ),
  226. AV_NE( PIX_FMT_YUV420P10BE, PIX_FMT_YUV420P10LE ),
  227. AV_NE( PIX_FMT_YUV422P10BE, PIX_FMT_YUV422P10LE ),
  228. AV_NE( PIX_FMT_YUV444P10BE, PIX_FMT_YUV444P10LE ),
  229. PIX_FMT_NONE
  230. };
  231. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  232. return 0;
  233. }
  234. static int config_input(AVFilterLink *inlink)
  235. {
  236. HQDN3DContext *hqdn3d = inlink->dst->priv;
  237. hqdn3d->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
  238. hqdn3d->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
  239. hqdn3d->depth = av_pix_fmt_descriptors[inlink->format].comp[0].depth_minus1+1;
  240. hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
  241. if (!hqdn3d->line)
  242. return AVERROR(ENOMEM);
  243. return 0;
  244. }
  245. static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
  246. {
  247. return 0;
  248. }
  249. static int end_frame(AVFilterLink *inlink)
  250. {
  251. HQDN3DContext *hqdn3d = inlink->dst->priv;
  252. AVFilterLink *outlink = inlink->dst->outputs[0];
  253. AVFilterBufferRef *inpic = inlink ->cur_buf;
  254. AVFilterBufferRef *outpic = outlink->out_buf;
  255. int ret, c;
  256. for (c = 0; c < 3; c++) {
  257. denoise(inpic->data[c], outpic->data[c],
  258. hqdn3d->line, &hqdn3d->frame_prev[c],
  259. inpic->video->w >> (!!c * hqdn3d->hsub),
  260. inpic->video->h >> (!!c * hqdn3d->vsub),
  261. inpic->linesize[c], outpic->linesize[c],
  262. hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
  263. }
  264. if ((ret = ff_draw_slice(outlink, 0, inpic->video->h, 1)) < 0 ||
  265. (ret = ff_end_frame(outlink)) < 0)
  266. return ret;
  267. return 0;
  268. }
  269. AVFilter avfilter_vf_hqdn3d = {
  270. .name = "hqdn3d",
  271. .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
  272. .priv_size = sizeof(HQDN3DContext),
  273. .init = init,
  274. .uninit = uninit,
  275. .query_formats = query_formats,
  276. .inputs = (const AVFilterPad[]) {{ .name = "default",
  277. .type = AVMEDIA_TYPE_VIDEO,
  278. .start_frame = ff_inplace_start_frame,
  279. .draw_slice = null_draw_slice,
  280. .config_props = config_input,
  281. .end_frame = end_frame },
  282. { .name = NULL}},
  283. .outputs = (const AVFilterPad[]) {{ .name = "default",
  284. .type = AVMEDIA_TYPE_VIDEO },
  285. { .name = NULL}},
  286. };