You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

625 lines
22KB

  1. /*
  2. * Copyright (c) 2003 LeFunGus, lefungus@altern.org
  3. *
  4. * This file is part of FFmpeg
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along
  17. * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
  18. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  19. */
  20. #include <float.h>
  21. #include "libavutil/imgutils.h"
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/common.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "libavutil/intreadwrite.h"
  26. #include "libavutil/opt.h"
  27. #include "avfilter.h"
  28. #include "formats.h"
  29. #include "internal.h"
  30. #include "video.h"
  31. typedef struct VagueDenoiserContext {
  32. const AVClass *class;
  33. float threshold;
  34. float percent;
  35. int method;
  36. int type;
  37. int nsteps;
  38. int planes;
  39. int depth;
  40. int bpc;
  41. int peak;
  42. int nb_planes;
  43. int planeheight[4];
  44. int planewidth[4];
  45. float *block;
  46. float *in;
  47. float *out;
  48. float *tmp;
  49. int hlowsize[4][32];
  50. int hhighsize[4][32];
  51. int vlowsize[4][32];
  52. int vhighsize[4][32];
  53. void (*thresholding)(float *block, const int width, const int height,
  54. const int stride, const float threshold,
  55. const float percent);
  56. } VagueDenoiserContext;
  57. #define OFFSET(x) offsetof(VagueDenoiserContext, x)
  58. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption vaguedenoiser_options[] = {
  60. { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
  61. { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
  62. { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
  63. { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
  64. { "garrote", "garrote thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
  65. { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
  66. { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
  67. { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
  68. { "type", "set threshold type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0 }, 0, 1, FLAGS, "type" },
  69. { "universal", "universal (VisuShrink)", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
  70. { "bayes", "bayes (BayesShrink)", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
  71. { NULL }
  72. };
  73. AVFILTER_DEFINE_CLASS(vaguedenoiser);
  74. #define NPAD 10
  75. static const float analysis_low[9] = {
  76. 0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
  77. 0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
  78. };
  79. static const float analysis_high[7] = {
  80. -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
  81. 0.418092273222212f, 0.040689417609558f, -0.064538882628938f
  82. };
  83. static const float synthesis_low[7] = {
  84. -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
  85. 0.418092273222212f, -0.040689417609558f, -0.064538882628938f
  86. };
  87. static const float synthesis_high[9] = {
  88. -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
  89. -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
  90. };
  91. static int query_formats(AVFilterContext *ctx)
  92. {
  93. static const enum AVPixelFormat pix_fmts[] = {
  94. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10,
  95. AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
  96. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  97. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  98. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  99. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
  100. AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
  101. AV_PIX_FMT_YUVJ411P,
  102. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  103. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  104. AV_PIX_FMT_YUV440P10,
  105. AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
  106. AV_PIX_FMT_YUV440P12,
  107. AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
  108. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  109. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  110. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  111. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
  112. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
  113. AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
  114. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
  115. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  116. AV_PIX_FMT_NONE
  117. };
  118. AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  119. if (!fmts_list)
  120. return AVERROR(ENOMEM);
  121. return ff_set_common_formats(ctx, fmts_list);
  122. }
  123. static int config_input(AVFilterLink *inlink)
  124. {
  125. VagueDenoiserContext *s = inlink->dst->priv;
  126. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  127. int p, i, nsteps_width, nsteps_height, nsteps_max;
  128. s->depth = desc->comp[0].depth;
  129. s->bpc = (s->depth + 7) / 8;
  130. s->nb_planes = desc->nb_components;
  131. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  132. s->planeheight[0] = s->planeheight[3] = inlink->h;
  133. s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
  134. s->planewidth[0] = s->planewidth[3] = inlink->w;
  135. s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
  136. s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
  137. s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
  138. s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
  139. if (!s->block || !s->in || !s->out || !s->tmp)
  140. return AVERROR(ENOMEM);
  141. s->threshold *= 1 << (s->depth - 8);
  142. s->peak = (1 << s->depth) - 1;
  143. nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
  144. nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
  145. for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
  146. if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
  147. break;
  148. }
  149. s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
  150. for (p = 0; p < 4; p++) {
  151. s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
  152. s->hhighsize[p][0] = s->planewidth[p] >> 1;
  153. s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
  154. s->vhighsize[p][0] = s->planeheight[p] >> 1;
  155. for (i = 1; i < s->nsteps; i++) {
  156. s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
  157. s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
  158. s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
  159. s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
  160. }
  161. }
  162. return 0;
  163. }
  164. static inline void copy(const float *p1, float *p2, const int length)
  165. {
  166. memcpy(p2, p1, length * sizeof(float));
  167. }
  168. static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
  169. {
  170. int i;
  171. for (i = 0; i < length; i++) {
  172. p2[i] = *p1;
  173. p1 += stride1;
  174. }
  175. }
  176. static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
  177. {
  178. int i;
  179. for (i = 0; i < length; i++) {
  180. *p2 = p1[i];
  181. p2 += stride2;
  182. }
  183. }
  184. // Do symmetric extension of data using prescribed symmetries
  185. // Original values are in output[npad] through output[npad+size-1]
  186. // New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
  187. // extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
  188. // same for right boundary
  189. // if right_ext=1 then ... 3 2 1 0 | 1 2 3
  190. static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
  191. {
  192. int first = NPAD;
  193. int last = NPAD - 1 + size;
  194. const int originalLast = last;
  195. int i, nextend, idx;
  196. if (left_ext == 2)
  197. output[--first] = output[NPAD];
  198. if (right_ext == 2)
  199. output[++last] = output[originalLast];
  200. // extend left end
  201. nextend = first;
  202. for (i = 0; i < nextend; i++)
  203. output[--first] = output[NPAD + 1 + i];
  204. idx = NPAD + NPAD - 1 + size;
  205. // extend right end
  206. nextend = idx - last;
  207. for (i = 0; i < nextend; i++)
  208. output[++last] = output[originalLast - 1 - i];
  209. }
  210. static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
  211. {
  212. int i;
  213. symmetric_extension(input, size, 1, 1);
  214. for (i = NPAD; i < NPAD + low_size; i++) {
  215. const float a = input[2 * i - 14] * analysis_low[0];
  216. const float b = input[2 * i - 13] * analysis_low[1];
  217. const float c = input[2 * i - 12] * analysis_low[2];
  218. const float d = input[2 * i - 11] * analysis_low[3];
  219. const float e = input[2 * i - 10] * analysis_low[4];
  220. const float f = input[2 * i - 9] * analysis_low[3];
  221. const float g = input[2 * i - 8] * analysis_low[2];
  222. const float h = input[2 * i - 7] * analysis_low[1];
  223. const float k = input[2 * i - 6] * analysis_low[0];
  224. output[i] = a + b + c + d + e + f + g + h + k;
  225. }
  226. for (i = NPAD; i < NPAD + low_size; i++) {
  227. const float a = input[2 * i - 12] * analysis_high[0];
  228. const float b = input[2 * i - 11] * analysis_high[1];
  229. const float c = input[2 * i - 10] * analysis_high[2];
  230. const float d = input[2 * i - 9] * analysis_high[3];
  231. const float e = input[2 * i - 8] * analysis_high[2];
  232. const float f = input[2 * i - 7] * analysis_high[1];
  233. const float g = input[2 * i - 6] * analysis_high[0];
  234. output[i + low_size] = a + b + c + d + e + f + g;
  235. }
  236. }
  237. static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
  238. {
  239. const int low_size = (size + 1) >> 1;
  240. const int high_size = size >> 1;
  241. int left_ext = 1, right_ext, i;
  242. int findex;
  243. memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
  244. right_ext = (size % 2 == 0) ? 2 : 1;
  245. symmetric_extension(temp, low_size, left_ext, right_ext);
  246. memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
  247. findex = (size + 2) >> 1;
  248. for (i = 9; i < findex + 11; i++) {
  249. const float a = temp[i] * synthesis_low[0];
  250. const float b = temp[i] * synthesis_low[1];
  251. const float c = temp[i] * synthesis_low[2];
  252. const float d = temp[i] * synthesis_low[3];
  253. output[2 * i - 13] += a;
  254. output[2 * i - 12] += b;
  255. output[2 * i - 11] += c;
  256. output[2 * i - 10] += d;
  257. output[2 * i - 9] += c;
  258. output[2 * i - 8] += b;
  259. output[2 * i - 7] += a;
  260. }
  261. memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
  262. left_ext = 2;
  263. right_ext = (size % 2 == 0) ? 1 : 2;
  264. symmetric_extension(temp, high_size, left_ext, right_ext);
  265. for (i = 8; i < findex + 11; i++) {
  266. const float a = temp[i] * synthesis_high[0];
  267. const float b = temp[i] * synthesis_high[1];
  268. const float c = temp[i] * synthesis_high[2];
  269. const float d = temp[i] * synthesis_high[3];
  270. const float e = temp[i] * synthesis_high[4];
  271. output[2 * i - 13] += a;
  272. output[2 * i - 12] += b;
  273. output[2 * i - 11] += c;
  274. output[2 * i - 10] += d;
  275. output[2 * i - 9] += e;
  276. output[2 * i - 8] += d;
  277. output[2 * i - 7] += c;
  278. output[2 * i - 6] += b;
  279. output[2 * i - 5] += a;
  280. }
  281. }
  282. static void hard_thresholding(float *block, const int width, const int height,
  283. const int stride, const float threshold,
  284. const float percent)
  285. {
  286. const float frac = 1.f - percent * 0.01f;
  287. int y, x;
  288. for (y = 0; y < height; y++) {
  289. for (x = 0; x < width; x++) {
  290. if (FFABS(block[x]) <= threshold)
  291. block[x] *= frac;
  292. }
  293. block += stride;
  294. }
  295. }
  296. static void soft_thresholding(float *block, const int width, const int height, const int stride,
  297. const float threshold, const float percent)
  298. {
  299. const float frac = 1.f - percent * 0.01f;
  300. const float shift = threshold * 0.01f * percent;
  301. int y, x;
  302. for (y = 0; y < height; y++) {
  303. for (x = 0; x < width; x++) {
  304. const float temp = FFABS(block[x]);
  305. if (temp <= threshold)
  306. block[x] *= frac;
  307. else
  308. block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
  309. }
  310. block += stride;
  311. }
  312. }
  313. static void qian_thresholding(float *block, const int width, const int height,
  314. const int stride, const float threshold,
  315. const float percent)
  316. {
  317. const float percent01 = percent * 0.01f;
  318. const float tr2 = threshold * threshold * percent01;
  319. const float frac = 1.f - percent01;
  320. int y, x;
  321. for (y = 0; y < height; y++) {
  322. for (x = 0; x < width; x++) {
  323. const float temp = FFABS(block[x]);
  324. if (temp <= threshold) {
  325. block[x] *= frac;
  326. } else {
  327. const float tp2 = temp * temp;
  328. block[x] *= (tp2 - tr2) / tp2;
  329. }
  330. }
  331. block += stride;
  332. }
  333. }
  334. static float bayes_threshold(float *block, const int width, const int height,
  335. const int stride, const float threshold)
  336. {
  337. float mean = 0.f;
  338. for (int y = 0; y < height; y++) {
  339. for (int x = 0; x < width; x++) {
  340. mean += block[x] * block[x];
  341. }
  342. block += stride;
  343. }
  344. mean /= width * height;
  345. return threshold * threshold / (FFMAX(sqrtf(mean - threshold), FLT_EPSILON));
  346. }
  347. static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
  348. {
  349. int p, y, x, i, j;
  350. for (p = 0; p < s->nb_planes; p++) {
  351. const int height = s->planeheight[p];
  352. const int width = s->planewidth[p];
  353. const uint8_t *srcp8 = in->data[p];
  354. const uint16_t *srcp16 = (const uint16_t *)in->data[p];
  355. uint8_t *dstp8 = out->data[p];
  356. uint16_t *dstp16 = (uint16_t *)out->data[p];
  357. float *output = s->block;
  358. int h_low_size0 = width;
  359. int v_low_size0 = height;
  360. int nsteps_transform = s->nsteps;
  361. int nsteps_invert = s->nsteps;
  362. const float *input = s->block;
  363. if (!((1 << p) & s->planes)) {
  364. av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
  365. s->planewidth[p] * s->bpc, s->planeheight[p]);
  366. continue;
  367. }
  368. if (s->depth <= 8) {
  369. for (y = 0; y < height; y++) {
  370. for (x = 0; x < width; x++)
  371. output[x] = srcp8[x];
  372. srcp8 += in->linesize[p];
  373. output += width;
  374. }
  375. } else {
  376. for (y = 0; y < height; y++) {
  377. for (x = 0; x < width; x++)
  378. output[x] = srcp16[x];
  379. srcp16 += in->linesize[p] / 2;
  380. output += width;
  381. }
  382. }
  383. while (nsteps_transform--) {
  384. int low_size = (h_low_size0 + 1) >> 1;
  385. float *input = s->block;
  386. for (j = 0; j < v_low_size0; j++) {
  387. copy(input, s->in + NPAD, h_low_size0);
  388. transform_step(s->in, s->out, h_low_size0, low_size, s);
  389. copy(s->out + NPAD, input, h_low_size0);
  390. input += width;
  391. }
  392. low_size = (v_low_size0 + 1) >> 1;
  393. input = s->block;
  394. for (j = 0; j < h_low_size0; j++) {
  395. copyv(input, width, s->in + NPAD, v_low_size0);
  396. transform_step(s->in, s->out, v_low_size0, low_size, s);
  397. copyh(s->out + NPAD, input, width, v_low_size0);
  398. input++;
  399. }
  400. h_low_size0 = (h_low_size0 + 1) >> 1;
  401. v_low_size0 = (v_low_size0 + 1) >> 1;
  402. }
  403. if (s->type == 0) {
  404. s->thresholding(s->block, width, height, width, s->threshold, s->percent);
  405. } else {
  406. for (int n = 0; n < s->nsteps; n++) {
  407. float threshold;
  408. float *block;
  409. if (n == s->nsteps - 1) {
  410. threshold = bayes_threshold(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, s->threshold);
  411. s->thresholding(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
  412. }
  413. block = s->block + s->hlowsize[p][n];
  414. threshold = bayes_threshold(block, s->hhighsize[p][n], s->vlowsize[p][n], width, s->threshold);
  415. s->thresholding(block, s->hhighsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
  416. block = s->block + s->vlowsize[p][n] * width;
  417. threshold = bayes_threshold(block, s->hlowsize[p][n], s->vhighsize[p][n], width, s->threshold);
  418. s->thresholding(block, s->hlowsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
  419. block = s->block + s->hlowsize[p][n] + s->vlowsize[p][n] * width;
  420. threshold = bayes_threshold(block, s->hhighsize[p][n], s->vhighsize[p][n], width, s->threshold);
  421. s->thresholding(block, s->hhighsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
  422. }
  423. }
  424. while (nsteps_invert--) {
  425. const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
  426. const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
  427. float * idx3 = s->block;
  428. for (i = 0; i < idx2; i++) {
  429. copyv(idx3, width, s->in + NPAD, idx);
  430. invert_step(s->in, s->out, s->tmp, idx, s);
  431. copyh(s->out + NPAD, idx3, width, idx);
  432. idx3++;
  433. }
  434. idx3 = s->block;
  435. for (i = 0; i < idx; i++) {
  436. copy(idx3, s->in + NPAD, idx2);
  437. invert_step(s->in, s->out, s->tmp, idx2, s);
  438. copy(s->out + NPAD, idx3, idx2);
  439. idx3 += width;
  440. }
  441. }
  442. if (s->depth <= 8) {
  443. for (y = 0; y < height; y++) {
  444. for (x = 0; x < width; x++)
  445. dstp8[x] = av_clip_uint8(input[x] + 0.5f);
  446. input += width;
  447. dstp8 += out->linesize[p];
  448. }
  449. } else {
  450. for (y = 0; y < height; y++) {
  451. for (x = 0; x < width; x++)
  452. dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
  453. input += width;
  454. dstp16 += out->linesize[p] / 2;
  455. }
  456. }
  457. }
  458. }
  459. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  460. {
  461. AVFilterContext *ctx = inlink->dst;
  462. VagueDenoiserContext *s = ctx->priv;
  463. AVFilterLink *outlink = ctx->outputs[0];
  464. AVFrame *out;
  465. int direct = av_frame_is_writable(in);
  466. if (direct) {
  467. out = in;
  468. } else {
  469. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  470. if (!out) {
  471. av_frame_free(&in);
  472. return AVERROR(ENOMEM);
  473. }
  474. av_frame_copy_props(out, in);
  475. }
  476. filter(s, in, out);
  477. if (!direct)
  478. av_frame_free(&in);
  479. return ff_filter_frame(outlink, out);
  480. }
  481. static av_cold int init(AVFilterContext *ctx)
  482. {
  483. VagueDenoiserContext *s = ctx->priv;
  484. switch (s->method) {
  485. case 0:
  486. s->thresholding = hard_thresholding;
  487. break;
  488. case 1:
  489. s->thresholding = soft_thresholding;
  490. break;
  491. case 2:
  492. s->thresholding = qian_thresholding;
  493. break;
  494. }
  495. return 0;
  496. }
  497. static av_cold void uninit(AVFilterContext *ctx)
  498. {
  499. VagueDenoiserContext *s = ctx->priv;
  500. av_freep(&s->block);
  501. av_freep(&s->in);
  502. av_freep(&s->out);
  503. av_freep(&s->tmp);
  504. }
  505. static const AVFilterPad vaguedenoiser_inputs[] = {
  506. {
  507. .name = "default",
  508. .type = AVMEDIA_TYPE_VIDEO,
  509. .config_props = config_input,
  510. .filter_frame = filter_frame,
  511. },
  512. { NULL }
  513. };
  514. static const AVFilterPad vaguedenoiser_outputs[] = {
  515. {
  516. .name = "default",
  517. .type = AVMEDIA_TYPE_VIDEO
  518. },
  519. { NULL }
  520. };
  521. AVFilter ff_vf_vaguedenoiser = {
  522. .name = "vaguedenoiser",
  523. .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
  524. .priv_size = sizeof(VagueDenoiserContext),
  525. .priv_class = &vaguedenoiser_class,
  526. .init = init,
  527. .uninit = uninit,
  528. .query_formats = query_formats,
  529. .inputs = vaguedenoiser_inputs,
  530. .outputs = vaguedenoiser_outputs,
  531. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  532. };