You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1150 lines
39KB

  1. /*
  2. * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. typedef struct ConvolutionContext {
  30. const AVClass *class;
  31. char *matrix_str[4];
  32. float rdiv[4];
  33. float bias[4];
  34. float scale;
  35. float delta;
  36. int planes;
  37. int size[4];
  38. int depth;
  39. int bpc;
  40. int bstride;
  41. uint8_t *buffer;
  42. uint8_t **bptrs;
  43. int nb_planes;
  44. int nb_threads;
  45. int planewidth[4];
  46. int planeheight[4];
  47. int matrix[4][49];
  48. int matrix_length[4];
  49. int copy[4];
  50. int (*filter[4])(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  51. } ConvolutionContext;
  52. #define OFFSET(x) offsetof(ConvolutionContext, x)
  53. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  54. static const AVOption convolution_options[] = {
  55. { "0m", "set matrix for 1st plane", OFFSET(matrix_str[0]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  56. { "1m", "set matrix for 2nd plane", OFFSET(matrix_str[1]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  57. { "2m", "set matrix for 3rd plane", OFFSET(matrix_str[2]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  58. { "3m", "set matrix for 4th plane", OFFSET(matrix_str[3]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  59. { "0rdiv", "set rdiv for 1st plane", OFFSET(rdiv[0]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  60. { "1rdiv", "set rdiv for 2nd plane", OFFSET(rdiv[1]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  61. { "2rdiv", "set rdiv for 3rd plane", OFFSET(rdiv[2]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  62. { "3rdiv", "set rdiv for 4th plane", OFFSET(rdiv[3]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  63. { "0bias", "set bias for 1st plane", OFFSET(bias[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  64. { "1bias", "set bias for 2nd plane", OFFSET(bias[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  65. { "2bias", "set bias for 3rd plane", OFFSET(bias[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  66. { "3bias", "set bias for 4th plane", OFFSET(bias[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  67. { NULL }
  68. };
  69. AVFILTER_DEFINE_CLASS(convolution);
  70. static const int same3x3[9] = {0, 0, 0,
  71. 0, 1, 0,
  72. 0, 0, 0};
  73. static const int same5x5[25] = {0, 0, 0, 0, 0,
  74. 0, 0, 0, 0, 0,
  75. 0, 0, 1, 0, 0,
  76. 0, 0, 0, 0, 0,
  77. 0, 0, 0, 0, 0};
  78. static const int same7x7[49] = {0, 0, 0, 0, 0, 0, 0,
  79. 0, 0, 0, 0, 0, 0, 0,
  80. 0, 0, 0, 0, 0, 0, 0,
  81. 0, 0, 0, 1, 0, 0, 0,
  82. 0, 0, 0, 0, 0, 0, 0,
  83. 0, 0, 0, 0, 0, 0, 0,
  84. 0, 0, 0, 0, 0, 0, 0};
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. static const enum AVPixelFormat pix_fmts[] = {
  88. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
  89. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  90. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
  91. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
  92. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  93. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  94. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  95. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
  96. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
  97. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  98. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  99. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  100. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
  101. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  102. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  103. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  104. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
  105. AV_PIX_FMT_NONE
  106. };
  107. return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  108. }
  109. static inline void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
  110. {
  111. int i;
  112. memcpy(line, srcp, width);
  113. for (i = mergin; i > 0; i--) {
  114. line[-i] = line[i];
  115. line[width - 1 + i] = line[width - 1 - i];
  116. }
  117. }
  118. static inline void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
  119. {
  120. int i;
  121. memcpy(line, srcp, width * 2);
  122. for (i = mergin; i > 0; i--) {
  123. line[-i] = line[i];
  124. line[width - 1 + i] = line[width - 1 - i];
  125. }
  126. }
  127. typedef struct ThreadData {
  128. AVFrame *in, *out;
  129. int plane;
  130. } ThreadData;
  131. static int filter16_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  132. {
  133. ConvolutionContext *s = ctx->priv;
  134. ThreadData *td = arg;
  135. AVFrame *in = td->in;
  136. AVFrame *out = td->out;
  137. const int plane = td->plane;
  138. const int peak = (1 << s->depth) - 1;
  139. const int stride = in->linesize[plane] / 2;
  140. const int bstride = s->bstride;
  141. const int height = s->planeheight[plane];
  142. const int width = s->planewidth[plane];
  143. const int slice_start = (height * jobnr) / nb_jobs;
  144. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  145. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  146. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  147. const float scale = s->scale;
  148. const float delta = s->delta;
  149. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
  150. uint16_t *p1 = p0 + bstride;
  151. uint16_t *p2 = p1 + bstride;
  152. uint16_t *orig = p0, *end = p2;
  153. int y, x;
  154. line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  155. line_copy16(p1, src, width, 1);
  156. for (y = slice_start; y < slice_end; y++) {
  157. src += stride * (y < height - 1 ? 1 : -1);
  158. line_copy16(p2, src, width, 1);
  159. for (x = 0; x < width; x++) {
  160. int suma = p0[x - 1] * -1 +
  161. p0[x] * -1 +
  162. p0[x + 1] * -1 +
  163. p2[x - 1] * 1 +
  164. p2[x] * 1 +
  165. p2[x + 1] * 1;
  166. int sumb = p0[x - 1] * -1 +
  167. p0[x + 1] * 1 +
  168. p1[x - 1] * -1 +
  169. p1[x + 1] * 1 +
  170. p2[x - 1] * -1 +
  171. p2[x + 1] * 1;
  172. dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
  173. }
  174. p0 = p1;
  175. p1 = p2;
  176. p2 = (p2 == end) ? orig: p2 + bstride;
  177. dst += out->linesize[plane] / 2;
  178. }
  179. return 0;
  180. }
  181. static int filter16_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  182. {
  183. ConvolutionContext *s = ctx->priv;
  184. ThreadData *td = arg;
  185. AVFrame *in = td->in;
  186. AVFrame *out = td->out;
  187. const int plane = td->plane;
  188. const int peak = (1 << s->depth) - 1;
  189. const int stride = in->linesize[plane] / 2;
  190. const int bstride = s->bstride;
  191. const int height = s->planeheight[plane];
  192. const int width = s->planewidth[plane];
  193. const int slice_start = (height * jobnr) / nb_jobs;
  194. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  195. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  196. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  197. const float scale = s->scale;
  198. const float delta = s->delta;
  199. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
  200. uint16_t *p1 = p0 + bstride;
  201. uint16_t *p2 = p1 + bstride;
  202. uint16_t *orig = p0, *end = p2;
  203. int y, x;
  204. line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  205. line_copy16(p1, src, width, 1);
  206. for (y = slice_start; y < slice_end; y++) {
  207. src += stride * (y < height - 1 ? 1 : -1);
  208. line_copy16(p2, src, width, 1);
  209. for (x = 0; x < width; x++) {
  210. int suma = p0[x - 1] * 1 +
  211. p1[x ] * -1;
  212. int sumb = p0[x ] * 1 +
  213. p1[x - 1] * -1;
  214. dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
  215. }
  216. p0 = p1;
  217. p1 = p2;
  218. p2 = (p2 == end) ? orig: p2 + bstride;
  219. dst += out->linesize[plane] / 2;
  220. }
  221. return 0;
  222. }
  223. static int filter16_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  224. {
  225. ConvolutionContext *s = ctx->priv;
  226. ThreadData *td = arg;
  227. AVFrame *in = td->in;
  228. AVFrame *out = td->out;
  229. const int plane = td->plane;
  230. const int peak = (1 << s->depth) - 1;
  231. const int stride = in->linesize[plane] / 2;
  232. const int bstride = s->bstride;
  233. const int height = s->planeheight[plane];
  234. const int width = s->planewidth[plane];
  235. const int slice_start = (height * jobnr) / nb_jobs;
  236. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  237. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  238. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  239. const float scale = s->scale;
  240. const float delta = s->delta;
  241. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
  242. uint16_t *p1 = p0 + bstride;
  243. uint16_t *p2 = p1 + bstride;
  244. uint16_t *orig = p0, *end = p2;
  245. int y, x;
  246. line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  247. line_copy16(p1, src, width, 1);
  248. for (y = slice_start; y < slice_end; y++) {
  249. src += stride * (y < height - 1 ? 1 : -1);
  250. line_copy16(p2, src, width, 1);
  251. for (x = 0; x < width; x++) {
  252. int suma = p0[x - 1] * -1 +
  253. p0[x] * -2 +
  254. p0[x + 1] * -1 +
  255. p2[x - 1] * 1 +
  256. p2[x] * 2 +
  257. p2[x + 1] * 1;
  258. int sumb = p0[x - 1] * -1 +
  259. p0[x + 1] * 1 +
  260. p1[x - 1] * -2 +
  261. p1[x + 1] * 2 +
  262. p2[x - 1] * -1 +
  263. p2[x + 1] * 1;
  264. dst[x] = av_clip(sqrt(suma*suma + sumb*sumb) * scale + delta, 0, peak);
  265. }
  266. p0 = p1;
  267. p1 = p2;
  268. p2 = (p2 == end) ? orig: p2 + bstride;
  269. dst += out->linesize[plane] / 2;
  270. }
  271. return 0;
  272. }
  273. static int filter_prewitt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  274. {
  275. ConvolutionContext *s = ctx->priv;
  276. ThreadData *td = arg;
  277. AVFrame *in = td->in;
  278. AVFrame *out = td->out;
  279. const int plane = td->plane;
  280. const int stride = in->linesize[plane];
  281. const int bstride = s->bstride;
  282. const int height = s->planeheight[plane];
  283. const int width = s->planewidth[plane];
  284. const int slice_start = (height * jobnr) / nb_jobs;
  285. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  286. const uint8_t *src = in->data[plane] + slice_start * stride;
  287. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  288. const float scale = s->scale;
  289. const float delta = s->delta;
  290. uint8_t *p0 = s->bptrs[jobnr] + 16;
  291. uint8_t *p1 = p0 + bstride;
  292. uint8_t *p2 = p1 + bstride;
  293. uint8_t *orig = p0, *end = p2;
  294. int y, x;
  295. line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  296. line_copy8(p1, src, width, 1);
  297. for (y = slice_start; y < slice_end; y++) {
  298. src += stride * (y < height - 1 ? 1 : -1);
  299. line_copy8(p2, src, width, 1);
  300. for (x = 0; x < width; x++) {
  301. int suma = p0[x - 1] * -1 +
  302. p0[x] * -1 +
  303. p0[x + 1] * -1 +
  304. p2[x - 1] * 1 +
  305. p2[x] * 1 +
  306. p2[x + 1] * 1;
  307. int sumb = p0[x - 1] * -1 +
  308. p0[x + 1] * 1 +
  309. p1[x - 1] * -1 +
  310. p1[x + 1] * 1 +
  311. p2[x - 1] * -1 +
  312. p2[x + 1] * 1;
  313. dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
  314. }
  315. p0 = p1;
  316. p1 = p2;
  317. p2 = (p2 == end) ? orig: p2 + bstride;
  318. dst += out->linesize[plane];
  319. }
  320. return 0;
  321. }
  322. static int filter_roberts(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  323. {
  324. ConvolutionContext *s = ctx->priv;
  325. ThreadData *td = arg;
  326. AVFrame *in = td->in;
  327. AVFrame *out = td->out;
  328. const int plane = td->plane;
  329. const int stride = in->linesize[plane];
  330. const int bstride = s->bstride;
  331. const int height = s->planeheight[plane];
  332. const int width = s->planewidth[plane];
  333. const int slice_start = (height * jobnr) / nb_jobs;
  334. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  335. const uint8_t *src = in->data[plane] + slice_start * stride;
  336. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  337. const float scale = s->scale;
  338. const float delta = s->delta;
  339. uint8_t *p0 = s->bptrs[jobnr] + 16;
  340. uint8_t *p1 = p0 + bstride;
  341. uint8_t *p2 = p1 + bstride;
  342. uint8_t *orig = p0, *end = p2;
  343. int y, x;
  344. line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  345. line_copy8(p1, src, width, 1);
  346. for (y = slice_start; y < slice_end; y++) {
  347. src += stride * (y < height - 1 ? 1 : -1);
  348. line_copy8(p2, src, width, 1);
  349. for (x = 0; x < width; x++) {
  350. int suma = p0[x - 1] * 1 +
  351. p1[x ] * -1;
  352. int sumb = p0[x ] * 1 +
  353. p1[x - 1] * -1;
  354. dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
  355. }
  356. p0 = p1;
  357. p1 = p2;
  358. p2 = (p2 == end) ? orig: p2 + bstride;
  359. dst += out->linesize[plane];
  360. }
  361. return 0;
  362. }
  363. static int filter_sobel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  364. {
  365. ConvolutionContext *s = ctx->priv;
  366. ThreadData *td = arg;
  367. AVFrame *in = td->in;
  368. AVFrame *out = td->out;
  369. const int plane = td->plane;
  370. const int stride = in->linesize[plane];
  371. const int bstride = s->bstride;
  372. const int height = s->planeheight[plane];
  373. const int width = s->planewidth[plane];
  374. const int slice_start = (height * jobnr) / nb_jobs;
  375. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  376. const uint8_t *src = in->data[plane] + slice_start * stride;
  377. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  378. const float scale = s->scale;
  379. const float delta = s->delta;
  380. uint8_t *p0 = s->bptrs[jobnr] + 16;
  381. uint8_t *p1 = p0 + bstride;
  382. uint8_t *p2 = p1 + bstride;
  383. uint8_t *orig = p0, *end = p2;
  384. int y, x;
  385. line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  386. line_copy8(p1, src, width, 1);
  387. for (y = slice_start; y < slice_end; y++) {
  388. src += stride * (y < height - 1 ? 1 : -1);
  389. line_copy8(p2, src, width, 1);
  390. for (x = 0; x < width; x++) {
  391. int suma = p0[x - 1] * -1 +
  392. p0[x] * -2 +
  393. p0[x + 1] * -1 +
  394. p2[x - 1] * 1 +
  395. p2[x] * 2 +
  396. p2[x + 1] * 1;
  397. int sumb = p0[x - 1] * -1 +
  398. p0[x + 1] * 1 +
  399. p1[x - 1] * -2 +
  400. p1[x + 1] * 2 +
  401. p2[x - 1] * -1 +
  402. p2[x + 1] * 1;
  403. dst[x] = av_clip_uint8(sqrt(suma*suma + sumb*sumb) * scale + delta);
  404. }
  405. p0 = p1;
  406. p1 = p2;
  407. p2 = (p2 == end) ? orig: p2 + bstride;
  408. dst += out->linesize[plane];
  409. }
  410. return 0;
  411. }
  412. static int filter16_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  413. {
  414. ConvolutionContext *s = ctx->priv;
  415. ThreadData *td = arg;
  416. AVFrame *in = td->in;
  417. AVFrame *out = td->out;
  418. const int plane = td->plane;
  419. const int peak = (1 << s->depth) - 1;
  420. const int stride = in->linesize[plane] / 2;
  421. const int bstride = s->bstride;
  422. const int height = s->planeheight[plane];
  423. const int width = s->planewidth[plane];
  424. const int slice_start = (height * jobnr) / nb_jobs;
  425. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  426. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  427. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  428. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
  429. uint16_t *p1 = p0 + bstride;
  430. uint16_t *p2 = p1 + bstride;
  431. uint16_t *orig = p0, *end = p2;
  432. const int *matrix = s->matrix[plane];
  433. const float rdiv = s->rdiv[plane];
  434. const float bias = s->bias[plane];
  435. int y, x;
  436. line_copy16(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  437. line_copy16(p1, src, width, 1);
  438. for (y = slice_start; y < slice_end; y++) {
  439. src += stride * (y < height - 1 ? 1 : -1);
  440. line_copy16(p2, src, width, 1);
  441. for (x = 0; x < width; x++) {
  442. int sum = p0[x - 1] * matrix[0] +
  443. p0[x] * matrix[1] +
  444. p0[x + 1] * matrix[2] +
  445. p1[x - 1] * matrix[3] +
  446. p1[x] * matrix[4] +
  447. p1[x + 1] * matrix[5] +
  448. p2[x - 1] * matrix[6] +
  449. p2[x] * matrix[7] +
  450. p2[x + 1] * matrix[8];
  451. sum = (int)(sum * rdiv + bias + 0.5f);
  452. dst[x] = av_clip(sum, 0, peak);
  453. }
  454. p0 = p1;
  455. p1 = p2;
  456. p2 = (p2 == end) ? orig: p2 + bstride;
  457. dst += out->linesize[plane] / 2;
  458. }
  459. return 0;
  460. }
  461. static int filter16_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  462. {
  463. ConvolutionContext *s = ctx->priv;
  464. ThreadData *td = arg;
  465. AVFrame *in = td->in;
  466. AVFrame *out = td->out;
  467. const int plane = td->plane;
  468. const int peak = (1 << s->depth) - 1;
  469. const int stride = in->linesize[plane] / 2;
  470. const int bstride = s->bstride;
  471. const int height = s->planeheight[plane];
  472. const int width = s->planewidth[plane];
  473. const int slice_start = (height * jobnr) / nb_jobs;
  474. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  475. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  476. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  477. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 16;
  478. uint16_t *p1 = p0 + bstride;
  479. uint16_t *p2 = p1 + bstride;
  480. uint16_t *p3 = p2 + bstride;
  481. uint16_t *p4 = p3 + bstride;
  482. uint16_t *orig = p0, *end = p4;
  483. const int *matrix = s->matrix[plane];
  484. float rdiv = s->rdiv[plane];
  485. float bias = s->bias[plane];
  486. int y, x, i;
  487. line_copy16(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
  488. line_copy16(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
  489. line_copy16(p2, src, width, 2);
  490. src += stride;
  491. line_copy16(p3, src, width, 2);
  492. for (y = slice_start; y < slice_end; y++) {
  493. uint16_t *array[] = {
  494. p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
  495. p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
  496. p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
  497. p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
  498. p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
  499. };
  500. src += stride * (y < height - 2 ? 1 : -1);
  501. line_copy16(p4, src, width, 2);
  502. for (x = 0; x < width; x++) {
  503. int sum = 0;
  504. for (i = 0; i < 25; i++) {
  505. sum += *(array[i] + x) * matrix[i];
  506. }
  507. sum = (int)(sum * rdiv + bias + 0.5f);
  508. dst[x] = av_clip(sum, 0, peak);
  509. }
  510. p0 = p1;
  511. p1 = p2;
  512. p2 = p3;
  513. p3 = p4;
  514. p4 = (p4 == end) ? orig: p4 + bstride;
  515. dst += out->linesize[plane] / 2;
  516. }
  517. return 0;
  518. }
  519. static int filter16_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  520. {
  521. ConvolutionContext *s = ctx->priv;
  522. ThreadData *td = arg;
  523. AVFrame *in = td->in;
  524. AVFrame *out = td->out;
  525. const int plane = td->plane;
  526. const int peak = (1 << s->depth) - 1;
  527. const int stride = in->linesize[plane] / 2;
  528. const int bstride = s->bstride;
  529. const int height = s->planeheight[plane];
  530. const int width = s->planewidth[plane];
  531. const int slice_start = (height * jobnr) / nb_jobs;
  532. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  533. const uint16_t *src = (const uint16_t *)in->data[plane] + slice_start * stride;
  534. uint16_t *dst = (uint16_t *)out->data[plane] + slice_start * (out->linesize[plane] / 2);
  535. uint16_t *p0 = (uint16_t *)s->bptrs[jobnr] + 32;
  536. uint16_t *p1 = p0 + bstride;
  537. uint16_t *p2 = p1 + bstride;
  538. uint16_t *p3 = p2 + bstride;
  539. uint16_t *p4 = p3 + bstride;
  540. uint16_t *p5 = p4 + bstride;
  541. uint16_t *p6 = p5 + bstride;
  542. uint16_t *orig = p0, *end = p6;
  543. const int *matrix = s->matrix[plane];
  544. float rdiv = s->rdiv[plane];
  545. float bias = s->bias[plane];
  546. int y, x, i;
  547. line_copy16(p0, src + 3 * stride * (slice_start < 3 ? 1 : -1), width, 3);
  548. line_copy16(p1, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 3);
  549. line_copy16(p2, src + stride * (slice_start == 0 ? 1 : -1), width, 3);
  550. line_copy16(p3, src, width, 3);
  551. src += stride;
  552. line_copy16(p4, src, width, 3);
  553. src += stride;
  554. line_copy16(p5, src, width, 3);
  555. for (y = slice_start; y < slice_end; y++) {
  556. uint16_t *array[] = {
  557. p0 - 3, p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2, p0 + 3,
  558. p1 - 3, p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2, p1 + 3,
  559. p2 - 3, p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2, p2 + 3,
  560. p3 - 3, p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2, p3 + 3,
  561. p4 - 3, p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2, p4 + 3,
  562. p5 - 3, p5 - 2, p5 - 1, p5, p5 + 1, p5 + 2, p5 + 3,
  563. p6 - 3, p6 - 2, p6 - 1, p6, p6 + 1, p6 + 2, p6 + 3,
  564. };
  565. src += stride * (y < height - 3 ? 1 : -1);
  566. line_copy16(p6, src, width, 3);
  567. for (x = 0; x < width; x++) {
  568. int sum = 0;
  569. for (i = 0; i < 25; i++) {
  570. sum += *(array[i] + x) * matrix[i];
  571. }
  572. sum = (int)(sum * rdiv + bias + 0.5f);
  573. dst[x] = av_clip(sum, 0, peak);
  574. }
  575. p0 = p1;
  576. p1 = p2;
  577. p2 = p3;
  578. p3 = p4;
  579. p4 = p5;
  580. p5 = p6;
  581. p6 = (p6 == end) ? orig: p6 + bstride;
  582. dst += out->linesize[plane] / 2;
  583. }
  584. return 0;
  585. }
  586. static int filter_3x3(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  587. {
  588. ConvolutionContext *s = ctx->priv;
  589. ThreadData *td = arg;
  590. AVFrame *in = td->in;
  591. AVFrame *out = td->out;
  592. const int plane = td->plane;
  593. const int stride = in->linesize[plane];
  594. const int bstride = s->bstride;
  595. const int height = s->planeheight[plane];
  596. const int width = s->planewidth[plane];
  597. const int slice_start = (height * jobnr) / nb_jobs;
  598. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  599. const uint8_t *src = in->data[plane] + slice_start * stride;
  600. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  601. uint8_t *p0 = s->bptrs[jobnr] + 16;
  602. uint8_t *p1 = p0 + bstride;
  603. uint8_t *p2 = p1 + bstride;
  604. uint8_t *orig = p0, *end = p2;
  605. const int *matrix = s->matrix[plane];
  606. const float rdiv = s->rdiv[plane];
  607. const float bias = s->bias[plane];
  608. int y, x;
  609. line_copy8(p0, src + stride * (slice_start == 0 ? 1 : -1), width, 1);
  610. line_copy8(p1, src, width, 1);
  611. for (y = slice_start; y < slice_end; y++) {
  612. src += stride * (y < height - 1 ? 1 : -1);
  613. line_copy8(p2, src, width, 1);
  614. for (x = 0; x < width; x++) {
  615. int sum = p0[x - 1] * matrix[0] +
  616. p0[x] * matrix[1] +
  617. p0[x + 1] * matrix[2] +
  618. p1[x - 1] * matrix[3] +
  619. p1[x] * matrix[4] +
  620. p1[x + 1] * matrix[5] +
  621. p2[x - 1] * matrix[6] +
  622. p2[x] * matrix[7] +
  623. p2[x + 1] * matrix[8];
  624. sum = (int)(sum * rdiv + bias + 0.5f);
  625. dst[x] = av_clip_uint8(sum);
  626. }
  627. p0 = p1;
  628. p1 = p2;
  629. p2 = (p2 == end) ? orig: p2 + bstride;
  630. dst += out->linesize[plane];
  631. }
  632. return 0;
  633. }
  634. static int filter_5x5(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  635. {
  636. ConvolutionContext *s = ctx->priv;
  637. ThreadData *td = arg;
  638. AVFrame *in = td->in;
  639. AVFrame *out = td->out;
  640. const int plane = td->plane;
  641. const int stride = in->linesize[plane];
  642. const int bstride = s->bstride;
  643. const int height = s->planeheight[plane];
  644. const int width = s->planewidth[plane];
  645. const int slice_start = (height * jobnr) / nb_jobs;
  646. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  647. const uint8_t *src = in->data[plane] + slice_start * stride;
  648. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  649. uint8_t *p0 = s->bptrs[jobnr] + 16;
  650. uint8_t *p1 = p0 + bstride;
  651. uint8_t *p2 = p1 + bstride;
  652. uint8_t *p3 = p2 + bstride;
  653. uint8_t *p4 = p3 + bstride;
  654. uint8_t *orig = p0, *end = p4;
  655. const int *matrix = s->matrix[plane];
  656. float rdiv = s->rdiv[plane];
  657. float bias = s->bias[plane];
  658. int y, x, i;
  659. line_copy8(p0, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 2);
  660. line_copy8(p1, src + stride * (slice_start == 0 ? 1 : -1), width, 2);
  661. line_copy8(p2, src, width, 2);
  662. src += stride;
  663. line_copy8(p3, src, width, 2);
  664. for (y = slice_start; y < slice_end; y++) {
  665. uint8_t *array[] = {
  666. p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
  667. p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
  668. p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
  669. p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
  670. p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
  671. };
  672. src += stride * (y < height - 2 ? 1 : -1);
  673. line_copy8(p4, src, width, 2);
  674. for (x = 0; x < width; x++) {
  675. int sum = 0;
  676. for (i = 0; i < 25; i++) {
  677. sum += *(array[i] + x) * matrix[i];
  678. }
  679. sum = (int)(sum * rdiv + bias + 0.5f);
  680. dst[x] = av_clip_uint8(sum);
  681. }
  682. p0 = p1;
  683. p1 = p2;
  684. p2 = p3;
  685. p3 = p4;
  686. p4 = (p4 == end) ? orig: p4 + bstride;
  687. dst += out->linesize[plane];
  688. }
  689. return 0;
  690. }
  691. static int filter_7x7(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  692. {
  693. ConvolutionContext *s = ctx->priv;
  694. ThreadData *td = arg;
  695. AVFrame *in = td->in;
  696. AVFrame *out = td->out;
  697. const int plane = td->plane;
  698. const int stride = in->linesize[plane];
  699. const int bstride = s->bstride;
  700. const int height = s->planeheight[plane];
  701. const int width = s->planewidth[plane];
  702. const int slice_start = (height * jobnr) / nb_jobs;
  703. const int slice_end = (height * (jobnr+1)) / nb_jobs;
  704. const uint8_t *src = in->data[plane] + slice_start * stride;
  705. uint8_t *dst = out->data[plane] + slice_start * out->linesize[plane];
  706. uint8_t *p0 = s->bptrs[jobnr] + 32;
  707. uint8_t *p1 = p0 + bstride;
  708. uint8_t *p2 = p1 + bstride;
  709. uint8_t *p3 = p2 + bstride;
  710. uint8_t *p4 = p3 + bstride;
  711. uint8_t *p5 = p4 + bstride;
  712. uint8_t *p6 = p5 + bstride;
  713. uint8_t *orig = p0, *end = p6;
  714. const int *matrix = s->matrix[plane];
  715. float rdiv = s->rdiv[plane];
  716. float bias = s->bias[plane];
  717. int y, x, i;
  718. line_copy8(p0, src + 3 * stride * (slice_start < 3 ? 1 : -1), width, 3);
  719. line_copy8(p1, src + 2 * stride * (slice_start < 2 ? 1 : -1), width, 3);
  720. line_copy8(p2, src + stride * (slice_start == 0 ? 1 : -1), width, 3);
  721. line_copy8(p3, src, width, 3);
  722. src += stride;
  723. line_copy8(p4, src, width, 3);
  724. src += stride;
  725. line_copy8(p5, src, width, 3);
  726. for (y = slice_start; y < slice_end; y++) {
  727. uint8_t *array[] = {
  728. p0 - 3, p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2, p0 + 3,
  729. p1 - 3, p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2, p1 + 3,
  730. p2 - 3, p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2, p2 + 3,
  731. p3 - 3, p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2, p3 + 3,
  732. p4 - 3, p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2, p4 + 3,
  733. p5 - 3, p5 - 2, p5 - 1, p5, p5 + 1, p5 + 2, p5 + 3,
  734. p6 - 3, p6 - 2, p6 - 1, p6, p6 + 1, p6 + 2, p6 + 3,
  735. };
  736. src += stride * (y < height - 3 ? 1 : -1);
  737. line_copy8(p6, src, width, 3);
  738. for (x = 0; x < width; x++) {
  739. int sum = 0;
  740. for (i = 0; i < 49; i++) {
  741. sum += *(array[i] + x) * matrix[i];
  742. }
  743. sum = (int)(sum * rdiv + bias + 0.5f);
  744. dst[x] = av_clip_uint8(sum);
  745. }
  746. p0 = p1;
  747. p1 = p2;
  748. p2 = p3;
  749. p3 = p4;
  750. p4 = p5;
  751. p5 = p6;
  752. p6 = (p6 == end) ? orig: p6 + bstride;
  753. dst += out->linesize[plane];
  754. }
  755. return 0;
  756. }
  757. static int config_input(AVFilterLink *inlink)
  758. {
  759. AVFilterContext *ctx = inlink->dst;
  760. ConvolutionContext *s = ctx->priv;
  761. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  762. int p;
  763. s->depth = desc->comp[0].depth;
  764. s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
  765. s->planewidth[0] = s->planewidth[3] = inlink->w;
  766. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  767. s->planeheight[0] = s->planeheight[3] = inlink->h;
  768. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  769. s->nb_threads = ff_filter_get_nb_threads(ctx);
  770. s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
  771. if (!s->bptrs)
  772. return AVERROR(ENOMEM);
  773. s->bstride = s->planewidth[0] + 64;
  774. s->bpc = (s->depth + 7) / 8;
  775. s->buffer = av_malloc_array(7 * s->bstride * s->nb_threads, s->bpc);
  776. if (!s->buffer)
  777. return AVERROR(ENOMEM);
  778. for (p = 0; p < s->nb_threads; p++) {
  779. s->bptrs[p] = s->buffer + 7 * s->bstride * s->bpc * p;
  780. }
  781. if (!strcmp(ctx->filter->name, "convolution")) {
  782. if (s->depth > 8) {
  783. for (p = 0; p < s->nb_planes; p++) {
  784. if (s->size[p] == 3)
  785. s->filter[p] = filter16_3x3;
  786. else if (s->size[p] == 5)
  787. s->filter[p] = filter16_5x5;
  788. else if (s->size[p] == 7)
  789. s->filter[p] = filter16_7x7;
  790. }
  791. }
  792. } else if (!strcmp(ctx->filter->name, "prewitt")) {
  793. if (s->depth > 8)
  794. for (p = 0; p < s->nb_planes; p++)
  795. s->filter[p] = filter16_prewitt;
  796. } else if (!strcmp(ctx->filter->name, "roberts")) {
  797. if (s->depth > 8)
  798. for (p = 0; p < s->nb_planes; p++)
  799. s->filter[p] = filter16_roberts;
  800. } else if (!strcmp(ctx->filter->name, "sobel")) {
  801. if (s->depth > 8)
  802. for (p = 0; p < s->nb_planes; p++)
  803. s->filter[p] = filter16_sobel;
  804. }
  805. return 0;
  806. }
  807. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  808. {
  809. AVFilterContext *ctx = inlink->dst;
  810. ConvolutionContext *s = ctx->priv;
  811. AVFilterLink *outlink = ctx->outputs[0];
  812. AVFrame *out;
  813. int plane;
  814. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  815. if (!out) {
  816. av_frame_free(&in);
  817. return AVERROR(ENOMEM);
  818. }
  819. av_frame_copy_props(out, in);
  820. for (plane = 0; plane < s->nb_planes; plane++) {
  821. ThreadData td;
  822. if (s->copy[plane]) {
  823. av_image_copy_plane(out->data[plane], out->linesize[plane],
  824. in->data[plane], in->linesize[plane],
  825. s->planewidth[plane] * s->bpc,
  826. s->planeheight[plane]);
  827. continue;
  828. }
  829. td.in = in;
  830. td.out = out;
  831. td.plane = plane;
  832. ctx->internal->execute(ctx, s->filter[plane], &td, NULL, FFMIN(s->planeheight[plane], s->nb_threads));
  833. }
  834. av_frame_free(&in);
  835. return ff_filter_frame(outlink, out);
  836. }
  837. static av_cold int init(AVFilterContext *ctx)
  838. {
  839. ConvolutionContext *s = ctx->priv;
  840. int i;
  841. if (!strcmp(ctx->filter->name, "convolution")) {
  842. for (i = 0; i < 4; i++) {
  843. int *matrix = (int *)s->matrix[i];
  844. char *p, *arg, *saveptr = NULL;
  845. p = s->matrix_str[i];
  846. while (s->matrix_length[i] < 49) {
  847. if (!(arg = av_strtok(p, " ", &saveptr)))
  848. break;
  849. p = NULL;
  850. sscanf(arg, "%d", &matrix[s->matrix_length[i]]);
  851. s->matrix_length[i]++;
  852. }
  853. if (s->matrix_length[i] == 9) {
  854. s->size[i] = 3;
  855. if (!memcmp(matrix, same3x3, sizeof(same3x3)))
  856. s->copy[i] = 1;
  857. else
  858. s->filter[i] = filter_3x3;
  859. } else if (s->matrix_length[i] == 25) {
  860. s->size[i] = 5;
  861. if (!memcmp(matrix, same5x5, sizeof(same5x5)))
  862. s->copy[i] = 1;
  863. else
  864. s->filter[i] = filter_5x5;
  865. } else if (s->matrix_length[i] == 49) {
  866. s->size[i] = 7;
  867. if (!memcmp(matrix, same7x7, sizeof(same7x7)))
  868. s->copy[i] = 1;
  869. else
  870. s->filter[i] = filter_7x7;
  871. } else {
  872. return AVERROR(EINVAL);
  873. }
  874. if (s->copy[i] && (s->rdiv[i] != 1. || s->bias[i] != 0.))
  875. s->copy[i] = 0;
  876. }
  877. } else if (!strcmp(ctx->filter->name, "prewitt")) {
  878. for (i = 0; i < 4; i++) {
  879. if ((1 << i) & s->planes)
  880. s->filter[i] = filter_prewitt;
  881. else
  882. s->copy[i] = 1;
  883. }
  884. } else if (!strcmp(ctx->filter->name, "roberts")) {
  885. for (i = 0; i < 4; i++) {
  886. if ((1 << i) & s->planes)
  887. s->filter[i] = filter_roberts;
  888. else
  889. s->copy[i] = 1;
  890. }
  891. } else if (!strcmp(ctx->filter->name, "sobel")) {
  892. for (i = 0; i < 4; i++) {
  893. if ((1 << i) & s->planes)
  894. s->filter[i] = filter_sobel;
  895. else
  896. s->copy[i] = 1;
  897. }
  898. }
  899. return 0;
  900. }
  901. static av_cold void uninit(AVFilterContext *ctx)
  902. {
  903. ConvolutionContext *s = ctx->priv;
  904. av_freep(&s->bptrs);
  905. av_freep(&s->buffer);
  906. }
  907. static const AVFilterPad convolution_inputs[] = {
  908. {
  909. .name = "default",
  910. .type = AVMEDIA_TYPE_VIDEO,
  911. .config_props = config_input,
  912. .filter_frame = filter_frame,
  913. },
  914. { NULL }
  915. };
  916. static const AVFilterPad convolution_outputs[] = {
  917. {
  918. .name = "default",
  919. .type = AVMEDIA_TYPE_VIDEO,
  920. },
  921. { NULL }
  922. };
  923. #if CONFIG_CONVOLUTION_FILTER
  924. AVFilter ff_vf_convolution = {
  925. .name = "convolution",
  926. .description = NULL_IF_CONFIG_SMALL("Apply convolution filter."),
  927. .priv_size = sizeof(ConvolutionContext),
  928. .priv_class = &convolution_class,
  929. .init = init,
  930. .uninit = uninit,
  931. .query_formats = query_formats,
  932. .inputs = convolution_inputs,
  933. .outputs = convolution_outputs,
  934. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  935. };
  936. #endif /* CONFIG_CONVOLUTION_FILTER */
  937. #if CONFIG_PREWITT_FILTER
  938. static const AVOption prewitt_options[] = {
  939. { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
  940. { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
  941. { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
  942. { NULL }
  943. };
  944. AVFILTER_DEFINE_CLASS(prewitt);
  945. AVFilter ff_vf_prewitt = {
  946. .name = "prewitt",
  947. .description = NULL_IF_CONFIG_SMALL("Apply prewitt operator."),
  948. .priv_size = sizeof(ConvolutionContext),
  949. .priv_class = &prewitt_class,
  950. .init = init,
  951. .uninit = uninit,
  952. .query_formats = query_formats,
  953. .inputs = convolution_inputs,
  954. .outputs = convolution_outputs,
  955. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  956. };
  957. #endif /* CONFIG_PREWITT_FILTER */
  958. #if CONFIG_SOBEL_FILTER
  959. static const AVOption sobel_options[] = {
  960. { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
  961. { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
  962. { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
  963. { NULL }
  964. };
  965. AVFILTER_DEFINE_CLASS(sobel);
  966. AVFilter ff_vf_sobel = {
  967. .name = "sobel",
  968. .description = NULL_IF_CONFIG_SMALL("Apply sobel operator."),
  969. .priv_size = sizeof(ConvolutionContext),
  970. .priv_class = &sobel_class,
  971. .init = init,
  972. .uninit = uninit,
  973. .query_formats = query_formats,
  974. .inputs = convolution_inputs,
  975. .outputs = convolution_outputs,
  976. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  977. };
  978. #endif /* CONFIG_SOBEL_FILTER */
  979. #if CONFIG_ROBERTS_FILTER
  980. static const AVOption roberts_options[] = {
  981. { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15}, 0, 15, FLAGS},
  982. { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, 65535, FLAGS},
  983. { "delta", "set delta", OFFSET(delta), AV_OPT_TYPE_FLOAT, {.dbl=0}, -65535, 65535, FLAGS},
  984. { NULL }
  985. };
  986. AVFILTER_DEFINE_CLASS(roberts);
  987. AVFilter ff_vf_roberts = {
  988. .name = "roberts",
  989. .description = NULL_IF_CONFIG_SMALL("Apply roberts cross operator."),
  990. .priv_size = sizeof(ConvolutionContext),
  991. .priv_class = &roberts_class,
  992. .init = init,
  993. .uninit = uninit,
  994. .query_formats = query_formats,
  995. .inputs = convolution_inputs,
  996. .outputs = convolution_outputs,
  997. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  998. };
  999. #endif /* CONFIG_ROBERTS_FILTER */