You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

432 lines
14KB

  1. /*
  2. * Copyright (c) 2013 Clément Bœsch
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * A simple, relatively efficient and extremely slow DCT image denoiser.
  22. * @see http://www.ipol.im/pub/art/2011/ys-dct/
  23. */
  24. #include "libavcodec/avfft.h"
  25. #include "libavutil/eval.h"
  26. #include "libavutil/opt.h"
  27. #include "drawutils.h"
  28. #include "internal.h"
  29. #define NBITS 4
  30. #define BSIZE (1<<(NBITS))
  31. static const char *const var_names[] = { "c", NULL };
  32. enum { VAR_C, VAR_VARS_NB };
  33. typedef struct {
  34. const AVClass *class;
  35. /* coefficient factor expression */
  36. char *expr_str;
  37. AVExpr *expr;
  38. double var_values[VAR_VARS_NB];
  39. int pr_width, pr_height; // width and height to process
  40. float sigma; // used when no expression are st
  41. float th; // threshold (3*sigma)
  42. float color_dct[3][3]; // 3x3 DCT for color decorrelation
  43. float *cbuf[2][3]; // two planar rgb color buffers
  44. float *weights; // dct coeff are cumulated with overlapping; these values are used for averaging
  45. int p_linesize; // line sizes for color and weights
  46. int overlap; // number of block overlapping pixels
  47. int step; // block step increment (BSIZE - overlap)
  48. DCTContext *dct, *idct; // DCT and inverse DCT contexts
  49. float *block, *tmp_block; // two BSIZE x BSIZE block buffers
  50. } DCTdnoizContext;
  51. #define OFFSET(x) offsetof(DCTdnoizContext, x)
  52. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  53. static const AVOption dctdnoiz_options[] = {
  54. { "sigma", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
  55. { "s", "set noise sigma constant", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 999, .flags = FLAGS },
  56. { "overlap", "set number of block overlapping pixels", OFFSET(overlap), AV_OPT_TYPE_INT, {.i64=(1<<NBITS)-1}, 0, (1<<NBITS)-1, .flags = FLAGS },
  57. { "expr", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
  58. { "e", "set coefficient factor expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
  59. { NULL }
  60. };
  61. AVFILTER_DEFINE_CLASS(dctdnoiz);
  62. static float *dct_block(DCTdnoizContext *ctx, const float *src, int src_linesize)
  63. {
  64. int x, y;
  65. float *column;
  66. for (y = 0; y < BSIZE; y++) {
  67. float *line = ctx->block;
  68. memcpy(line, src, BSIZE * sizeof(*line));
  69. src += src_linesize;
  70. av_dct_calc(ctx->dct, line);
  71. column = ctx->tmp_block + y;
  72. for (x = 0; x < BSIZE; x++) {
  73. *line *= x == 0 ? 1. / sqrt(BSIZE) : sqrt(2. / BSIZE);
  74. *column = *line++;
  75. column += BSIZE;
  76. }
  77. }
  78. column = ctx->tmp_block;
  79. for (x = 0; x < BSIZE; x++) {
  80. av_dct_calc(ctx->dct, column);
  81. for (y = 0; y < BSIZE; y++)
  82. column[y] *= y == 0 ? 1. / sqrt(BSIZE) : sqrt(2. / BSIZE);
  83. column += BSIZE;
  84. }
  85. for (y = 0; y < BSIZE; y++)
  86. for (x = 0; x < BSIZE; x++)
  87. ctx->block[y*BSIZE + x] = ctx->tmp_block[x*BSIZE + y];
  88. return ctx->block;
  89. }
  90. static void idct_block(DCTdnoizContext *ctx, float *dst, int dst_linesize)
  91. {
  92. int x, y;
  93. float *block = ctx->block;
  94. float *tmp = ctx->tmp_block;
  95. for (y = 0; y < BSIZE; y++) {
  96. for (x = 0; x < BSIZE; x++)
  97. block[x] *= x == 0 ? sqrt(BSIZE) : 1./sqrt(2. / BSIZE);
  98. av_dct_calc(ctx->idct, block);
  99. block += BSIZE;
  100. }
  101. block = ctx->block;
  102. for (y = 0; y < BSIZE; y++) {
  103. for (x = 0; x < BSIZE; x++) {
  104. tmp[x] = block[x*BSIZE + y];
  105. tmp[x] *= x == 0 ? sqrt(BSIZE) : 1./sqrt(2. / BSIZE);
  106. }
  107. av_dct_calc(ctx->idct, tmp);
  108. for (x = 0; x < BSIZE; x++)
  109. dst[x*dst_linesize + y] += tmp[x];
  110. }
  111. }
  112. static int config_input(AVFilterLink *inlink)
  113. {
  114. AVFilterContext *ctx = inlink->dst;
  115. DCTdnoizContext *s = ctx->priv;
  116. int i, x, y, bx, by, linesize, *iweights;
  117. const float dct_3x3[3][3] = {
  118. { 1./sqrt(3), 1./sqrt(3), 1./sqrt(3) },
  119. { 1./sqrt(2), 0, -1./sqrt(2) },
  120. { 1./sqrt(6), -2./sqrt(6), 1./sqrt(6) },
  121. };
  122. uint8_t rgba_map[4];
  123. ff_fill_rgba_map(rgba_map, inlink->format);
  124. for (y = 0; y < 3; y++)
  125. for (x = 0; x < 3; x++)
  126. s->color_dct[y][x] = dct_3x3[rgba_map[y]][rgba_map[x]];
  127. s->pr_width = inlink->w - (inlink->w - BSIZE) % s->step;
  128. s->pr_height = inlink->h - (inlink->h - BSIZE) % s->step;
  129. if (s->pr_width != inlink->w)
  130. av_log(ctx, AV_LOG_WARNING, "The last %d horizontal pixels won't be denoised\n",
  131. inlink->w - s->pr_width);
  132. if (s->pr_height != inlink->h)
  133. av_log(ctx, AV_LOG_WARNING, "The last %d vertical pixels won't be denoised\n",
  134. inlink->h - s->pr_height);
  135. s->p_linesize = linesize = FFALIGN(s->pr_width, 32);
  136. for (i = 0; i < 2; i++) {
  137. s->cbuf[i][0] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][0]));
  138. s->cbuf[i][1] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][1]));
  139. s->cbuf[i][2] = av_malloc(linesize * s->pr_height * sizeof(*s->cbuf[i][2]));
  140. if (!s->cbuf[i][0] || !s->cbuf[i][1] || !s->cbuf[i][2])
  141. return AVERROR(ENOMEM);
  142. }
  143. s->weights = av_malloc(s->pr_height * linesize * sizeof(*s->weights));
  144. if (!s->weights)
  145. return AVERROR(ENOMEM);
  146. iweights = av_calloc(s->pr_height, linesize * sizeof(*iweights));
  147. if (!iweights)
  148. return AVERROR(ENOMEM);
  149. for (y = 0; y < s->pr_height - BSIZE + 1; y += s->step)
  150. for (x = 0; x < s->pr_width - BSIZE + 1; x += s->step)
  151. for (by = 0; by < BSIZE; by++)
  152. for (bx = 0; bx < BSIZE; bx++)
  153. iweights[(y + by)*linesize + x + bx]++;
  154. for (y = 0; y < s->pr_height; y++)
  155. for (x = 0; x < s->pr_width; x++)
  156. s->weights[y*linesize + x] = 1. / iweights[y*linesize + x];
  157. av_free(iweights);
  158. return 0;
  159. }
  160. static av_cold int init(AVFilterContext *ctx)
  161. {
  162. DCTdnoizContext *s = ctx->priv;
  163. if (s->expr_str) {
  164. int ret = av_expr_parse(&s->expr, s->expr_str, var_names,
  165. NULL, NULL, NULL, NULL, 0, ctx);
  166. if (ret < 0)
  167. return ret;
  168. }
  169. s->th = s->sigma * 3.;
  170. s->step = BSIZE - s->overlap;
  171. s->dct = av_dct_init(NBITS, DCT_II);
  172. s->idct = av_dct_init(NBITS, DCT_III);
  173. s->block = av_malloc(BSIZE * BSIZE * sizeof(*s->block));
  174. s->tmp_block = av_malloc(BSIZE * BSIZE * sizeof(*s->tmp_block));
  175. if (!s->dct || !s->idct || !s->tmp_block || !s->block)
  176. return AVERROR(ENOMEM);
  177. return 0;
  178. }
  179. static int query_formats(AVFilterContext *ctx)
  180. {
  181. static const enum AVPixelFormat pix_fmts[] = {
  182. AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
  183. AV_PIX_FMT_NONE
  184. };
  185. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  186. return 0;
  187. }
  188. static void color_decorrelation(float dct3ch[3][3], float **dst, int dst_linesize,
  189. const uint8_t *src, int src_linesize, int w, int h)
  190. {
  191. int x, y;
  192. float *dstp_r = dst[0];
  193. float *dstp_g = dst[1];
  194. float *dstp_b = dst[2];
  195. for (y = 0; y < h; y++) {
  196. const uint8_t *srcp = src;
  197. for (x = 0; x < w; x++) {
  198. dstp_r[x] = srcp[0] * dct3ch[0][0] + srcp[1] * dct3ch[0][1] + srcp[2] * dct3ch[0][2];
  199. dstp_g[x] = srcp[0] * dct3ch[1][0] + srcp[1] * dct3ch[1][1] + srcp[2] * dct3ch[1][2];
  200. dstp_b[x] = srcp[0] * dct3ch[2][0] + srcp[1] * dct3ch[2][1] + srcp[2] * dct3ch[2][2];
  201. srcp += 3;
  202. }
  203. src += src_linesize;
  204. dstp_r += dst_linesize;
  205. dstp_g += dst_linesize;
  206. dstp_b += dst_linesize;
  207. }
  208. }
  209. static void color_correlation(float dct3ch[3][3], uint8_t *dst, int dst_linesize,
  210. float **src, int src_linesize, int w, int h)
  211. {
  212. int x, y;
  213. const float *src_r = src[0];
  214. const float *src_g = src[1];
  215. const float *src_b = src[2];
  216. for (y = 0; y < h; y++) {
  217. uint8_t *dstp = dst;
  218. for (x = 0; x < w; x++) {
  219. dstp[0] = av_clip_uint8(src_r[x] * dct3ch[0][0] + src_g[x] * dct3ch[1][0] + src_b[x] * dct3ch[2][0]);
  220. dstp[1] = av_clip_uint8(src_r[x] * dct3ch[0][1] + src_g[x] * dct3ch[1][1] + src_b[x] * dct3ch[2][1]);
  221. dstp[2] = av_clip_uint8(src_r[x] * dct3ch[0][2] + src_g[x] * dct3ch[1][2] + src_b[x] * dct3ch[2][2]);
  222. dstp += 3;
  223. }
  224. dst += dst_linesize;
  225. src_r += src_linesize;
  226. src_g += src_linesize;
  227. src_b += src_linesize;
  228. }
  229. }
  230. static void filter_plane(AVFilterContext *ctx,
  231. float *dst, int dst_linesize,
  232. const float *src, int src_linesize,
  233. int w, int h)
  234. {
  235. int x, y, bx, by;
  236. DCTdnoizContext *s = ctx->priv;
  237. float *dst0 = dst;
  238. const float *weights = s->weights;
  239. // reset block sums
  240. memset(dst, 0, h * dst_linesize * sizeof(*dst));
  241. // block dct sums
  242. for (y = 0; y < h - BSIZE + 1; y += s->step) {
  243. for (x = 0; x < w - BSIZE + 1; x += s->step) {
  244. float *ftb = dct_block(s, src + x, src_linesize);
  245. if (s->expr) {
  246. for (by = 0; by < BSIZE; by++) {
  247. for (bx = 0; bx < BSIZE; bx++) {
  248. s->var_values[VAR_C] = FFABS(*ftb);
  249. *ftb++ *= av_expr_eval(s->expr, s->var_values, s);
  250. }
  251. }
  252. } else {
  253. for (by = 0; by < BSIZE; by++) {
  254. for (bx = 0; bx < BSIZE; bx++) {
  255. if (FFABS(*ftb) < s->th)
  256. *ftb = 0;
  257. ftb++;
  258. }
  259. }
  260. }
  261. idct_block(s, dst + x, dst_linesize);
  262. }
  263. src += s->step * src_linesize;
  264. dst += s->step * dst_linesize;
  265. }
  266. // average blocks
  267. dst = dst0;
  268. for (y = 0; y < h; y++) {
  269. for (x = 0; x < w; x++)
  270. dst[x] *= weights[x];
  271. dst += dst_linesize;
  272. weights += dst_linesize;
  273. }
  274. }
  275. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  276. {
  277. AVFilterContext *ctx = inlink->dst;
  278. DCTdnoizContext *s = ctx->priv;
  279. AVFilterLink *outlink = inlink->dst->outputs[0];
  280. int direct, plane;
  281. AVFrame *out;
  282. if (av_frame_is_writable(in)) {
  283. direct = 1;
  284. out = in;
  285. } else {
  286. direct = 0;
  287. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  288. if (!out) {
  289. av_frame_free(&in);
  290. return AVERROR(ENOMEM);
  291. }
  292. av_frame_copy_props(out, in);
  293. }
  294. color_decorrelation(s->color_dct, s->cbuf[0], s->p_linesize,
  295. in->data[0], in->linesize[0], s->pr_width, s->pr_height);
  296. for (plane = 0; plane < 3; plane++)
  297. filter_plane(ctx, s->cbuf[1][plane], s->p_linesize,
  298. s->cbuf[0][plane], s->p_linesize,
  299. s->pr_width, s->pr_height);
  300. color_correlation(s->color_dct, out->data[0], out->linesize[0],
  301. s->cbuf[1], s->p_linesize, s->pr_width, s->pr_height);
  302. if (!direct) {
  303. int y;
  304. uint8_t *dst = out->data[0];
  305. const uint8_t *src = in->data[0];
  306. const int dst_linesize = out->linesize[0];
  307. const int src_linesize = in->linesize[0];
  308. const int hpad = (inlink->w - s->pr_width) * 3;
  309. const int vpad = (inlink->h - s->pr_height);
  310. if (hpad) {
  311. uint8_t *dstp = dst + s->pr_width * 3;
  312. const uint8_t *srcp = src + s->pr_width * 3;
  313. for (y = 0; y < s->pr_height; y++) {
  314. memcpy(dstp, srcp, hpad);
  315. dstp += dst_linesize;
  316. srcp += src_linesize;
  317. }
  318. }
  319. if (vpad) {
  320. uint8_t *dstp = dst + s->pr_height * dst_linesize;
  321. const uint8_t *srcp = src + s->pr_height * src_linesize;
  322. for (y = 0; y < vpad; y++) {
  323. memcpy(dstp, srcp, inlink->w * 3);
  324. dstp += dst_linesize;
  325. srcp += src_linesize;
  326. }
  327. }
  328. av_frame_free(&in);
  329. }
  330. return ff_filter_frame(outlink, out);
  331. }
  332. static av_cold void uninit(AVFilterContext *ctx)
  333. {
  334. int i;
  335. DCTdnoizContext *s = ctx->priv;
  336. av_dct_end(s->dct);
  337. av_dct_end(s->idct);
  338. av_free(s->block);
  339. av_free(s->tmp_block);
  340. av_free(s->weights);
  341. for (i = 0; i < 2; i++) {
  342. av_free(s->cbuf[i][0]);
  343. av_free(s->cbuf[i][1]);
  344. av_free(s->cbuf[i][2]);
  345. }
  346. av_expr_free(s->expr);
  347. }
  348. static const AVFilterPad dctdnoiz_inputs[] = {
  349. {
  350. .name = "default",
  351. .type = AVMEDIA_TYPE_VIDEO,
  352. .filter_frame = filter_frame,
  353. .config_props = config_input,
  354. },
  355. { NULL }
  356. };
  357. static const AVFilterPad dctdnoiz_outputs[] = {
  358. {
  359. .name = "default",
  360. .type = AVMEDIA_TYPE_VIDEO,
  361. },
  362. { NULL }
  363. };
  364. AVFilter avfilter_vf_dctdnoiz = {
  365. .name = "dctdnoiz",
  366. .description = NULL_IF_CONFIG_SMALL("Denoise frames using 2D DCT."),
  367. .priv_size = sizeof(DCTdnoizContext),
  368. .init = init,
  369. .uninit = uninit,
  370. .query_formats = query_formats,
  371. .inputs = dctdnoiz_inputs,
  372. .outputs = dctdnoiz_outputs,
  373. .priv_class = &dctdnoiz_class,
  374. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  375. };