You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

674 lines
22KB

  1. /*
  2. * Copyright (c) 2017 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h>
  21. #include "libavutil/imgutils.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "libavcodec/avfft.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "framesync.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #define MAX_THREADS 16
  31. typedef struct ConvolveContext {
  32. const AVClass *class;
  33. FFFrameSync fs;
  34. FFTContext *fft[4][MAX_THREADS];
  35. FFTContext *ifft[4][MAX_THREADS];
  36. int fft_bits[4];
  37. int fft_len[4];
  38. int planewidth[4];
  39. int planeheight[4];
  40. FFTComplex *fft_hdata[4];
  41. FFTComplex *fft_vdata[4];
  42. FFTComplex *fft_hdata_impulse[4];
  43. FFTComplex *fft_vdata_impulse[4];
  44. int depth;
  45. int planes;
  46. int impulse;
  47. float noise;
  48. int nb_planes;
  49. int got_impulse[4];
  50. int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  51. } ConvolveContext;
  52. #define OFFSET(x) offsetof(ConvolveContext, x)
  53. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  54. static const AVOption convolve_options[] = {
  55. { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
  56. { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
  57. { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
  58. { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
  59. { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
  60. { NULL },
  61. };
  62. static int query_formats(AVFilterContext *ctx)
  63. {
  64. static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
  65. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
  66. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  67. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
  68. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
  69. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  70. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  71. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  72. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
  73. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
  74. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  75. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  76. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  77. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
  78. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  79. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  80. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  81. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
  82. AV_PIX_FMT_NONE
  83. };
  84. AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt);
  85. if (!fmts_list)
  86. return AVERROR(ENOMEM);
  87. return ff_set_common_formats(ctx, fmts_list);
  88. }
  89. static int config_input_main(AVFilterLink *inlink)
  90. {
  91. ConvolveContext *s = inlink->dst->priv;
  92. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  93. int fft_bits, i;
  94. s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
  95. s->planewidth[0] = s->planewidth[3] = inlink->w;
  96. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  97. s->planeheight[0] = s->planeheight[3] = inlink->h;
  98. s->nb_planes = desc->nb_components;
  99. s->depth = desc->comp[0].depth;
  100. for (i = 0; i < s->nb_planes; i++) {
  101. int w = s->planewidth[i];
  102. int h = s->planeheight[i];
  103. int n = FFMAX(w, h);
  104. for (fft_bits = 1; 1 << fft_bits < n; fft_bits++);
  105. s->fft_bits[i] = fft_bits;
  106. s->fft_len[i] = 1 << s->fft_bits[i];
  107. if (!(s->fft_hdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
  108. return AVERROR(ENOMEM);
  109. if (!(s->fft_vdata[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
  110. return AVERROR(ENOMEM);
  111. if (!(s->fft_hdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
  112. return AVERROR(ENOMEM);
  113. if (!(s->fft_vdata_impulse[i] = av_calloc(s->fft_len[i], s->fft_len[i] * sizeof(FFTComplex))))
  114. return AVERROR(ENOMEM);
  115. }
  116. return 0;
  117. }
  118. static int config_input_impulse(AVFilterLink *inlink)
  119. {
  120. AVFilterContext *ctx = inlink->dst;
  121. if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
  122. ctx->inputs[0]->h != ctx->inputs[1]->h) {
  123. av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
  124. return AVERROR(EINVAL);
  125. }
  126. if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
  127. av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
  128. return AVERROR(EINVAL);
  129. }
  130. return 0;
  131. }
  132. typedef struct ThreadData {
  133. FFTComplex *hdata, *vdata;
  134. int plane, n;
  135. } ThreadData;
  136. static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  137. {
  138. ConvolveContext *s = ctx->priv;
  139. ThreadData *td = arg;
  140. FFTComplex *hdata = td->hdata;
  141. const int plane = td->plane;
  142. const int n = td->n;
  143. int start = (n * jobnr) / nb_jobs;
  144. int end = (n * (jobnr+1)) / nb_jobs;
  145. int y;
  146. for (y = start; y < end; y++) {
  147. av_fft_permute(s->fft[plane][jobnr], hdata + y * n);
  148. av_fft_calc(s->fft[plane][jobnr], hdata + y * n);
  149. }
  150. return 0;
  151. }
  152. static void get_input(ConvolveContext *s, FFTComplex *fft_hdata,
  153. AVFrame *in, int w, int h, int n, int plane, float scale)
  154. {
  155. const int iw = (n - w) / 2, ih = (n - h) / 2;
  156. int y, x;
  157. if (s->depth == 8) {
  158. for (y = 0; y < h; y++) {
  159. const uint8_t *src = in->data[plane] + in->linesize[plane] * y;
  160. for (x = 0; x < w; x++) {
  161. fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
  162. fft_hdata[(y + ih) * n + iw + x].im = 0;
  163. }
  164. for (x = 0; x < iw; x++) {
  165. fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
  166. fft_hdata[(y + ih) * n + x].im = 0;
  167. }
  168. for (x = n - iw; x < n; x++) {
  169. fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
  170. fft_hdata[(y + ih) * n + x].im = 0;
  171. }
  172. }
  173. for (y = 0; y < ih; y++) {
  174. for (x = 0; x < n; x++) {
  175. fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
  176. fft_hdata[y * n + x].im = 0;
  177. }
  178. }
  179. for (y = n - ih; y < n; y++) {
  180. for (x = 0; x < n; x++) {
  181. fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
  182. fft_hdata[y * n + x].im = 0;
  183. }
  184. }
  185. } else {
  186. for (y = 0; y < h; y++) {
  187. const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y);
  188. for (x = 0; x < w; x++) {
  189. fft_hdata[(y + ih) * n + iw + x].re = src[x] * scale;
  190. fft_hdata[(y + ih) * n + iw + x].im = 0;
  191. }
  192. for (x = 0; x < iw; x++) {
  193. fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + iw].re;
  194. fft_hdata[(y + ih) * n + x].im = 0;
  195. }
  196. for (x = n - iw; x < n; x++) {
  197. fft_hdata[(y + ih) * n + x].re = fft_hdata[(y + ih) * n + n - iw - 1].re;
  198. fft_hdata[(y + ih) * n + x].im = 0;
  199. }
  200. }
  201. for (y = 0; y < ih; y++) {
  202. for (x = 0; x < n; x++) {
  203. fft_hdata[y * n + x].re = fft_hdata[ih * n + x].re;
  204. fft_hdata[y * n + x].im = 0;
  205. }
  206. }
  207. for (y = n - ih; y < n; y++) {
  208. for (x = 0; x < n; x++) {
  209. fft_hdata[y * n + x].re = fft_hdata[(n - ih - 1) * n + x].re;
  210. fft_hdata[y * n + x].im = 0;
  211. }
  212. }
  213. }
  214. }
  215. static int fft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  216. {
  217. ConvolveContext *s = ctx->priv;
  218. ThreadData *td = arg;
  219. FFTComplex *hdata = td->hdata;
  220. FFTComplex *vdata = td->vdata;
  221. const int plane = td->plane;
  222. const int n = td->n;
  223. int start = (n * jobnr) / nb_jobs;
  224. int end = (n * (jobnr+1)) / nb_jobs;
  225. int y, x;
  226. for (y = start; y < end; y++) {
  227. for (x = 0; x < n; x++) {
  228. vdata[y * n + x].re = hdata[x * n + y].re;
  229. vdata[y * n + x].im = hdata[x * n + y].im;
  230. }
  231. av_fft_permute(s->fft[plane][jobnr], vdata + y * n);
  232. av_fft_calc(s->fft[plane][jobnr], vdata + y * n);
  233. }
  234. return 0;
  235. }
  236. static int ifft_vertical(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  237. {
  238. ConvolveContext *s = ctx->priv;
  239. ThreadData *td = arg;
  240. FFTComplex *hdata = td->hdata;
  241. FFTComplex *vdata = td->vdata;
  242. const int plane = td->plane;
  243. const int n = td->n;
  244. int start = (n * jobnr) / nb_jobs;
  245. int end = (n * (jobnr+1)) / nb_jobs;
  246. int y, x;
  247. for (y = start; y < end; y++) {
  248. av_fft_permute(s->ifft[plane][jobnr], vdata + y * n);
  249. av_fft_calc(s->ifft[plane][jobnr], vdata + y * n);
  250. for (x = 0; x < n; x++) {
  251. hdata[x * n + y].re = vdata[y * n + x].re;
  252. hdata[x * n + y].im = vdata[y * n + x].im;
  253. }
  254. }
  255. return 0;
  256. }
  257. static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  258. {
  259. ConvolveContext *s = ctx->priv;
  260. ThreadData *td = arg;
  261. FFTComplex *hdata = td->hdata;
  262. const int plane = td->plane;
  263. const int n = td->n;
  264. int start = (n * jobnr) / nb_jobs;
  265. int end = (n * (jobnr+1)) / nb_jobs;
  266. int y;
  267. for (y = start; y < end; y++) {
  268. av_fft_permute(s->ifft[plane][jobnr], hdata + y * n);
  269. av_fft_calc(s->ifft[plane][jobnr], hdata + y * n);
  270. }
  271. return 0;
  272. }
  273. static void get_output(ConvolveContext *s, FFTComplex *input, AVFrame *out,
  274. int w, int h, int n, int plane, float scale)
  275. {
  276. const int max = (1 << s->depth) - 1;
  277. const int hh = h / 2;
  278. const int hw = w / 2;
  279. int y, x;
  280. if (s->depth == 8) {
  281. for (y = 0; y < hh; y++) {
  282. uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane] + hw;
  283. for (x = 0; x < hw; x++)
  284. dst[x] = av_clip_uint8(input[y * n + x].re * scale);
  285. }
  286. for (y = 0; y < hh; y++) {
  287. uint8_t *dst = out->data[plane] + (y + hh) * out->linesize[plane];
  288. for (x = 0; x < hw; x++)
  289. dst[x] = av_clip_uint8(input[y * n + n - hw + x].re * scale);
  290. }
  291. for (y = 0; y < hh; y++) {
  292. uint8_t *dst = out->data[plane] + y * out->linesize[plane] + hw;
  293. for (x = 0; x < hw; x++)
  294. dst[x] = av_clip_uint8(input[(n - hh + y) * n + x].re * scale);
  295. }
  296. for (y = 0; y < hh; y++) {
  297. uint8_t *dst = out->data[plane] + y * out->linesize[plane];
  298. for (x = 0; x < hw; x++)
  299. dst[x] = av_clip_uint8(input[(n - hh + y) * n + n - hw + x].re * scale);
  300. }
  301. } else {
  302. for (y = 0; y < hh; y++) {
  303. uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane] + hw * 2);
  304. for (x = 0; x < hw; x++)
  305. dst[x] = av_clip(input[y * n + x].re * scale, 0, max);
  306. }
  307. for (y = 0; y < hh; y++) {
  308. uint16_t *dst = (uint16_t *)(out->data[plane] + (y + hh) * out->linesize[plane]);
  309. for (x = 0; x < hw; x++)
  310. dst[x] = av_clip(input[y * n + n - hw + x].re * scale, 0, max);
  311. }
  312. for (y = 0; y < hh; y++) {
  313. uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane] + hw * 2);
  314. for (x = 0; x < hw; x++)
  315. dst[x] = av_clip(input[(n - hh + y) * n + x].re * scale, 0, max);
  316. }
  317. for (y = 0; y < hh; y++) {
  318. uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]);
  319. for (x = 0; x < hw; x++)
  320. dst[x] = av_clip(input[(n - hh + y) * n + n - hw + x].re * scale, 0, max);
  321. }
  322. }
  323. }
  324. static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  325. {
  326. ConvolveContext *s = ctx->priv;
  327. ThreadData *td = arg;
  328. FFTComplex *input = td->hdata;
  329. FFTComplex *filter = td->vdata;
  330. const float noise = s->noise;
  331. const int n = td->n;
  332. int start = (n * jobnr) / nb_jobs;
  333. int end = (n * (jobnr+1)) / nb_jobs;
  334. int y, x;
  335. for (y = start; y < end; y++) {
  336. int yn = y * n;
  337. for (x = 0; x < n; x++) {
  338. FFTSample re, im, ire, iim;
  339. re = input[yn + x].re;
  340. im = input[yn + x].im;
  341. ire = filter[yn + x].re + noise;
  342. iim = filter[yn + x].im;
  343. input[yn + x].re = ire * re - iim * im;
  344. input[yn + x].im = iim * re + ire * im;
  345. }
  346. }
  347. return 0;
  348. }
  349. static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  350. {
  351. ConvolveContext *s = ctx->priv;
  352. ThreadData *td = arg;
  353. FFTComplex *input = td->hdata;
  354. FFTComplex *filter = td->vdata;
  355. const float noise = s->noise;
  356. const int n = td->n;
  357. int start = (n * jobnr) / nb_jobs;
  358. int end = (n * (jobnr+1)) / nb_jobs;
  359. int y, x;
  360. for (y = start; y < end; y++) {
  361. int yn = y * n;
  362. for (x = 0; x < n; x++) {
  363. FFTSample re, im, ire, iim, div;
  364. re = input[yn + x].re;
  365. im = input[yn + x].im;
  366. ire = filter[yn + x].re;
  367. iim = filter[yn + x].im;
  368. div = ire * ire + iim * iim + noise;
  369. input[yn + x].re = (ire * re + iim * im) / div;
  370. input[yn + x].im = (ire * im - iim * re) / div;
  371. }
  372. }
  373. return 0;
  374. }
  375. static int do_convolve(FFFrameSync *fs)
  376. {
  377. AVFilterContext *ctx = fs->parent;
  378. AVFilterLink *outlink = ctx->outputs[0];
  379. ConvolveContext *s = ctx->priv;
  380. AVFrame *mainpic = NULL, *impulsepic = NULL;
  381. int ret, y, x, plane;
  382. ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic);
  383. if (ret < 0)
  384. return ret;
  385. if (!impulsepic)
  386. return ff_filter_frame(outlink, mainpic);
  387. for (plane = 0; plane < s->nb_planes; plane++) {
  388. FFTComplex *filter = s->fft_vdata_impulse[plane];
  389. FFTComplex *input = s->fft_vdata[plane];
  390. const int n = s->fft_len[plane];
  391. const int w = s->planewidth[plane];
  392. const int h = s->planeheight[plane];
  393. float total = 0;
  394. ThreadData td;
  395. if (!(s->planes & (1 << plane))) {
  396. continue;
  397. }
  398. td.plane = plane, td.n = n;
  399. get_input(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f);
  400. td.hdata = s->fft_hdata[plane];
  401. td.vdata = s->fft_vdata[plane];
  402. ctx->internal->execute(ctx, fft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  403. ctx->internal->execute(ctx, fft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  404. if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) {
  405. if (s->depth == 8) {
  406. for (y = 0; y < h; y++) {
  407. const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
  408. for (x = 0; x < w; x++) {
  409. total += src[x];
  410. }
  411. }
  412. } else {
  413. for (y = 0; y < h; y++) {
  414. const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ;
  415. for (x = 0; x < w; x++) {
  416. total += src[x];
  417. }
  418. }
  419. }
  420. total = FFMAX(1, total);
  421. get_input(s, s->fft_hdata_impulse[plane], impulsepic, w, h, n, plane, 1.f / total);
  422. td.hdata = s->fft_hdata_impulse[plane];
  423. td.vdata = s->fft_vdata_impulse[plane];
  424. ctx->internal->execute(ctx, fft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  425. ctx->internal->execute(ctx, fft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  426. s->got_impulse[plane] = 1;
  427. }
  428. td.hdata = input;
  429. td.vdata = filter;
  430. ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  431. td.hdata = s->fft_hdata[plane];
  432. td.vdata = s->fft_vdata[plane];
  433. ctx->internal->execute(ctx, ifft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  434. ctx->internal->execute(ctx, ifft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
  435. get_output(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f / (n * n));
  436. }
  437. return ff_filter_frame(outlink, mainpic);
  438. }
  439. static int config_output(AVFilterLink *outlink)
  440. {
  441. AVFilterContext *ctx = outlink->src;
  442. ConvolveContext *s = ctx->priv;
  443. AVFilterLink *mainlink = ctx->inputs[0];
  444. int ret, i, j;
  445. s->fs.on_event = do_convolve;
  446. ret = ff_framesync_init_dualinput(&s->fs, ctx);
  447. if (ret < 0)
  448. return ret;
  449. outlink->w = mainlink->w;
  450. outlink->h = mainlink->h;
  451. outlink->time_base = mainlink->time_base;
  452. outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
  453. outlink->frame_rate = mainlink->frame_rate;
  454. if ((ret = ff_framesync_configure(&s->fs)) < 0)
  455. return ret;
  456. for (i = 0; i < s->nb_planes; i++) {
  457. for (j = 0; j < MAX_THREADS; j++) {
  458. s->fft[i][j] = av_fft_init(s->fft_bits[i], 0);
  459. s->ifft[i][j] = av_fft_init(s->fft_bits[i], 1);
  460. if (!s->fft[i][j] || !s->ifft[i][j])
  461. return AVERROR(ENOMEM);
  462. }
  463. }
  464. return 0;
  465. }
  466. static int activate(AVFilterContext *ctx)
  467. {
  468. ConvolveContext *s = ctx->priv;
  469. return ff_framesync_activate(&s->fs);
  470. }
  471. static av_cold int init(AVFilterContext *ctx)
  472. {
  473. ConvolveContext *s = ctx->priv;
  474. if (!strcmp(ctx->filter->name, "convolve")) {
  475. s->filter = complex_multiply;
  476. } else if (!strcmp(ctx->filter->name, "deconvolve")) {
  477. s->filter = complex_divide;
  478. } else {
  479. return AVERROR_BUG;
  480. }
  481. return 0;
  482. }
  483. static av_cold void uninit(AVFilterContext *ctx)
  484. {
  485. ConvolveContext *s = ctx->priv;
  486. int i, j;
  487. for (i = 0; i < 4; i++) {
  488. av_freep(&s->fft_hdata[i]);
  489. av_freep(&s->fft_vdata[i]);
  490. av_freep(&s->fft_hdata_impulse[i]);
  491. av_freep(&s->fft_vdata_impulse[i]);
  492. for (j = 0; j < MAX_THREADS; j++) {
  493. av_fft_end(s->fft[i][j]);
  494. s->fft[i][j] = NULL;
  495. av_fft_end(s->ifft[i][j]);
  496. s->ifft[i][j] = NULL;
  497. }
  498. }
  499. ff_framesync_uninit(&s->fs);
  500. }
  501. static const AVFilterPad convolve_inputs[] = {
  502. {
  503. .name = "main",
  504. .type = AVMEDIA_TYPE_VIDEO,
  505. .config_props = config_input_main,
  506. },{
  507. .name = "impulse",
  508. .type = AVMEDIA_TYPE_VIDEO,
  509. .config_props = config_input_impulse,
  510. },
  511. { NULL }
  512. };
  513. static const AVFilterPad convolve_outputs[] = {
  514. {
  515. .name = "default",
  516. .type = AVMEDIA_TYPE_VIDEO,
  517. .config_props = config_output,
  518. },
  519. { NULL }
  520. };
  521. #if CONFIG_CONVOLVE_FILTER
  522. FRAMESYNC_DEFINE_CLASS(convolve, ConvolveContext, fs);
  523. AVFilter ff_vf_convolve = {
  524. .name = "convolve",
  525. .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
  526. .preinit = convolve_framesync_preinit,
  527. .init = init,
  528. .uninit = uninit,
  529. .query_formats = query_formats,
  530. .activate = activate,
  531. .priv_size = sizeof(ConvolveContext),
  532. .priv_class = &convolve_class,
  533. .inputs = convolve_inputs,
  534. .outputs = convolve_outputs,
  535. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  536. };
  537. #endif /* CONFIG_CONVOLVE_FILTER */
  538. #if CONFIG_DECONVOLVE_FILTER
  539. static const AVOption deconvolve_options[] = {
  540. { "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
  541. { "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
  542. { "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
  543. { "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
  544. { "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
  545. { NULL },
  546. };
  547. FRAMESYNC_DEFINE_CLASS(deconvolve, ConvolveContext, fs);
  548. AVFilter ff_vf_deconvolve = {
  549. .name = "deconvolve",
  550. .description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
  551. .preinit = deconvolve_framesync_preinit,
  552. .init = init,
  553. .uninit = uninit,
  554. .query_formats = query_formats,
  555. .activate = activate,
  556. .priv_size = sizeof(ConvolveContext),
  557. .priv_class = &deconvolve_class,
  558. .inputs = convolve_inputs,
  559. .outputs = convolve_outputs,
  560. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
  561. };
  562. #endif /* CONFIG_DECONVOLVE_FILTER */