You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

477 lines
15KB

  1. /*
  2. * Copyright (c) 2012-2013 Oka Motofumi (chikuzen.mo at gmail dot com)
  3. * Copyright (c) 2015 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. typedef struct ConvolutionContext {
  30. const AVClass *class;
  31. char *matrix_str[4];
  32. float rdiv[4];
  33. float bias[4];
  34. int size[4];
  35. int depth;
  36. int bstride;
  37. uint8_t *buffer;
  38. int nb_planes;
  39. int planewidth[4];
  40. int planeheight[4];
  41. int matrix[4][25];
  42. int matrix_length[4];
  43. int copy[4];
  44. void (*filter[4])(struct ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane);
  45. } ConvolutionContext;
  46. #define OFFSET(x) offsetof(ConvolutionContext, x)
  47. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  48. static const AVOption convolution_options[] = {
  49. { "0m", "set matrix for 1st plane", OFFSET(matrix_str[0]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  50. { "1m", "set matrix for 2nd plane", OFFSET(matrix_str[1]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  51. { "2m", "set matrix for 3rd plane", OFFSET(matrix_str[2]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  52. { "3m", "set matrix for 4th plane", OFFSET(matrix_str[3]), AV_OPT_TYPE_STRING, {.str="0 0 0 0 1 0 0 0 0"}, 0, 0, FLAGS },
  53. { "0rdiv", "set rdiv for 1st plane", OFFSET(rdiv[0]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  54. { "1rdiv", "set rdiv for 2nd plane", OFFSET(rdiv[1]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  55. { "2rdiv", "set rdiv for 3rd plane", OFFSET(rdiv[2]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  56. { "3rdiv", "set rdiv for 4th plane", OFFSET(rdiv[3]), AV_OPT_TYPE_FLOAT, {.dbl=1.0}, 0.0, INT_MAX, FLAGS},
  57. { "0bias", "set bias for 1st plane", OFFSET(bias[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  58. { "1bias", "set bias for 2nd plane", OFFSET(bias[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  59. { "2bias", "set bias for 3rd plane", OFFSET(bias[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  60. { "3bias", "set bias for 4th plane", OFFSET(bias[3]), AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, INT_MAX, FLAGS},
  61. { NULL }
  62. };
  63. AVFILTER_DEFINE_CLASS(convolution);
  64. static const int same3x3[9] = {0, 0, 0,
  65. 0, 1, 0,
  66. 0, 0, 0};
  67. static const int same5x5[25] = {0, 0, 0, 0, 0,
  68. 0, 0, 0, 0, 0,
  69. 0, 0, 1, 0, 0,
  70. 0, 0, 0, 0, 0,
  71. 0, 0, 0, 0, 0};
  72. static int query_formats(AVFilterContext *ctx)
  73. {
  74. static const enum AVPixelFormat pix_fmts[] = {
  75. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
  76. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  77. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
  78. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
  79. AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  80. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  81. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  82. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
  83. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
  84. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  85. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  86. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  87. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
  88. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  89. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  90. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  91. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
  92. AV_PIX_FMT_NONE
  93. };
  94. return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  95. }
  96. static inline void line_copy8(uint8_t *line, const uint8_t *srcp, int width, int mergin)
  97. {
  98. int i;
  99. memcpy(line, srcp, width);
  100. for (i = mergin; i > 0; i--) {
  101. line[-i] = line[i];
  102. line[width - 1 + i] = line[width - 1 - i];
  103. }
  104. }
  105. static inline void line_copy16(uint16_t *line, const uint16_t *srcp, int width, int mergin)
  106. {
  107. int i;
  108. memcpy(line, srcp, width * 2);
  109. for (i = mergin; i > 0; i--) {
  110. line[-i] = line[i];
  111. line[width - 1 + i] = line[width - 1 - i];
  112. }
  113. }
  114. static void filter16_3x3(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
  115. {
  116. const uint16_t *src = (const uint16_t *)in->data[plane];
  117. uint16_t *dst = (uint16_t *)out->data[plane];
  118. const int peak = (1 << s->depth) - 1;
  119. const int stride = in->linesize[plane] / 2;
  120. const int bstride = s->bstride;
  121. const int height = s->planeheight[plane];
  122. const int width = s->planewidth[plane];
  123. uint16_t *p0 = (uint16_t *)s->buffer + 16;
  124. uint16_t *p1 = p0 + bstride;
  125. uint16_t *p2 = p1 + bstride;
  126. uint16_t *orig = p0, *end = p2;
  127. const int *matrix = s->matrix[plane];
  128. const float rdiv = s->rdiv[plane];
  129. const float bias = s->bias[plane];
  130. int y, x;
  131. line_copy16(p0, src + stride, width, 1);
  132. line_copy16(p1, src, width, 1);
  133. for (y = 0; y < height; y++) {
  134. src += stride * (y < height - 1 ? 1 : -1);
  135. line_copy16(p2, src, width, 1);
  136. for (x = 0; x < width; x++) {
  137. int sum = p0[x - 1] * matrix[0] +
  138. p0[x] * matrix[1] +
  139. p0[x + 1] * matrix[2] +
  140. p1[x - 1] * matrix[3] +
  141. p1[x] * matrix[4] +
  142. p1[x + 1] * matrix[5] +
  143. p2[x - 1] * matrix[6] +
  144. p2[x] * matrix[7] +
  145. p2[x + 1] * matrix[8];
  146. sum = (int)(sum * rdiv + bias + 0.5f);
  147. dst[x] = av_clip(sum, 0, peak);
  148. }
  149. p0 = p1;
  150. p1 = p2;
  151. p2 = (p2 == end) ? orig: p2 + bstride;
  152. dst += out->linesize[plane] / 2;
  153. }
  154. }
  155. static void filter16_5x5(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
  156. {
  157. const uint16_t *src = (const uint16_t *)in->data[plane];
  158. uint16_t *dst = (uint16_t *)out->data[plane];
  159. const int peak = (1 << s->depth) - 1;
  160. const int stride = in->linesize[plane] / 2;
  161. const int bstride = s->bstride;
  162. const int height = s->planeheight[plane];
  163. const int width = s->planewidth[plane];
  164. uint16_t *p0 = (uint16_t *)s->buffer + 16;
  165. uint16_t *p1 = p0 + bstride;
  166. uint16_t *p2 = p1 + bstride;
  167. uint16_t *p3 = p2 + bstride;
  168. uint16_t *p4 = p3 + bstride;
  169. uint16_t *orig = p0, *end = p4;
  170. const int *matrix = s->matrix[plane];
  171. float rdiv = s->rdiv[plane];
  172. float bias = s->bias[plane];
  173. int y, x, i;
  174. line_copy16(p0, src + 2 * stride, width, 2);
  175. line_copy16(p1, src + stride, width, 2);
  176. line_copy16(p2, src, width, 2);
  177. src += stride;
  178. line_copy16(p3, src, width, 2);
  179. for (y = 0; y < height; y++) {
  180. uint16_t *array[] = {
  181. p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
  182. p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
  183. p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
  184. p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
  185. p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
  186. };
  187. src += stride * (y < height - 2 ? 1 : -1);
  188. line_copy16(p4, src, width, 2);
  189. for (x = 0; x < width; x++) {
  190. int sum = 0;
  191. for (i = 0; i < 25; i++) {
  192. sum += *(array[i] + x) * matrix[i];
  193. }
  194. sum = (int)(sum * rdiv + bias + 0.5f);
  195. dst[x] = av_clip(sum, 0, peak);
  196. }
  197. p0 = p1;
  198. p1 = p2;
  199. p2 = p3;
  200. p3 = p4;
  201. p4 = (p4 == end) ? orig: p4 + bstride;
  202. dst += out->linesize[plane] / 2;
  203. }
  204. }
  205. static void filter_3x3(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
  206. {
  207. const uint8_t *src = in->data[plane];
  208. uint8_t *dst = out->data[plane];
  209. const int stride = in->linesize[plane];
  210. const int bstride = s->bstride;
  211. const int height = s->planeheight[plane];
  212. const int width = s->planewidth[plane];
  213. uint8_t *p0 = s->buffer + 16;
  214. uint8_t *p1 = p0 + bstride;
  215. uint8_t *p2 = p1 + bstride;
  216. uint8_t *orig = p0, *end = p2;
  217. const int *matrix = s->matrix[plane];
  218. const float rdiv = s->rdiv[plane];
  219. const float bias = s->bias[plane];
  220. int y, x;
  221. line_copy8(p0, src + stride, width, 1);
  222. line_copy8(p1, src, width, 1);
  223. for (y = 0; y < height; y++) {
  224. src += stride * (y < height - 1 ? 1 : -1);
  225. line_copy8(p2, src, width, 1);
  226. for (x = 0; x < width; x++) {
  227. int sum = p0[x - 1] * matrix[0] +
  228. p0[x] * matrix[1] +
  229. p0[x + 1] * matrix[2] +
  230. p1[x - 1] * matrix[3] +
  231. p1[x] * matrix[4] +
  232. p1[x + 1] * matrix[5] +
  233. p2[x - 1] * matrix[6] +
  234. p2[x] * matrix[7] +
  235. p2[x + 1] * matrix[8];
  236. sum = (int)(sum * rdiv + bias + 0.5f);
  237. dst[x] = av_clip_uint8(sum);
  238. }
  239. p0 = p1;
  240. p1 = p2;
  241. p2 = (p2 == end) ? orig: p2 + bstride;
  242. dst += out->linesize[plane];
  243. }
  244. }
  245. static void filter_5x5(ConvolutionContext *s, AVFrame *in, AVFrame *out, int plane)
  246. {
  247. const uint8_t *src = in->data[plane];
  248. uint8_t *dst = out->data[plane];
  249. const int stride = in->linesize[plane];
  250. const int bstride = s->bstride;
  251. const int height = s->planeheight[plane];
  252. const int width = s->planewidth[plane];
  253. uint8_t *p0 = s->buffer + 16;
  254. uint8_t *p1 = p0 + bstride;
  255. uint8_t *p2 = p1 + bstride;
  256. uint8_t *p3 = p2 + bstride;
  257. uint8_t *p4 = p3 + bstride;
  258. uint8_t *orig = p0, *end = p4;
  259. const int *matrix = s->matrix[plane];
  260. float rdiv = s->rdiv[plane];
  261. float bias = s->bias[plane];
  262. int y, x, i;
  263. line_copy8(p0, src + 2 * stride, width, 2);
  264. line_copy8(p1, src + stride, width, 2);
  265. line_copy8(p2, src, width, 2);
  266. src += stride;
  267. line_copy8(p3, src, width, 2);
  268. for (y = 0; y < height; y++) {
  269. uint8_t *array[] = {
  270. p0 - 2, p0 - 1, p0, p0 + 1, p0 + 2,
  271. p1 - 2, p1 - 1, p1, p1 + 1, p1 + 2,
  272. p2 - 2, p2 - 1, p2, p2 + 1, p2 + 2,
  273. p3 - 2, p3 - 1, p3, p3 + 1, p3 + 2,
  274. p4 - 2, p4 - 1, p4, p4 + 1, p4 + 2
  275. };
  276. src += stride * (y < height - 2 ? 1 : -1);
  277. line_copy8(p4, src, width, 2);
  278. for (x = 0; x < width; x++) {
  279. int sum = 0;
  280. for (i = 0; i < 25; i++) {
  281. sum += *(array[i] + x) * matrix[i];
  282. }
  283. sum = (int)(sum * rdiv + bias + 0.5f);
  284. dst[x] = av_clip_uint8(sum);
  285. }
  286. p0 = p1;
  287. p1 = p2;
  288. p2 = p3;
  289. p3 = p4;
  290. p4 = (p4 == end) ? orig: p4 + bstride;
  291. dst += out->linesize[plane];
  292. }
  293. }
  294. static int config_input(AVFilterLink *inlink)
  295. {
  296. ConvolutionContext *s = inlink->dst->priv;
  297. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  298. int ret, p;
  299. s->depth = desc->comp[0].depth;
  300. if ((ret = av_image_fill_linesizes(s->planewidth, inlink->format, inlink->w)) < 0)
  301. return ret;
  302. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  303. s->planeheight[0] = s->planeheight[3] = inlink->h;
  304. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  305. s->bstride = s->planewidth[0] + 32;
  306. s->buffer = av_malloc_array(5 * s->bstride, (s->depth + 7) / 8);
  307. if (!s->buffer)
  308. return AVERROR(ENOMEM);
  309. if (s->depth > 8) {
  310. for (p = 0; p < s->nb_planes; p++) {
  311. if (s->size[p] == 3)
  312. s->filter[p] = filter16_3x3;
  313. else if (s->size[p] == 5)
  314. s->filter[p] = filter16_5x5;
  315. }
  316. }
  317. return 0;
  318. }
  319. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  320. {
  321. ConvolutionContext *s = inlink->dst->priv;
  322. AVFilterLink *outlink = inlink->dst->outputs[0];
  323. AVFrame *out;
  324. int plane;
  325. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  326. if (!out) {
  327. av_frame_free(&in);
  328. return AVERROR(ENOMEM);
  329. }
  330. av_frame_copy_props(out, in);
  331. for (plane = 0; plane < s->nb_planes; plane++) {
  332. if (s->copy[plane]) {
  333. av_image_copy_plane(out->data[plane], out->linesize[plane],
  334. in->data[plane], in->linesize[plane],
  335. s->planewidth[plane],
  336. s->planeheight[plane]);
  337. continue;
  338. }
  339. s->filter[plane](s, in, out, plane);
  340. }
  341. av_frame_free(&in);
  342. return ff_filter_frame(outlink, out);
  343. }
  344. static av_cold int init(AVFilterContext *ctx)
  345. {
  346. ConvolutionContext *s = ctx->priv;
  347. int i;
  348. for (i = 0; i < 4; i++) {
  349. int *matrix = (int *)s->matrix[i];
  350. char *p, *arg, *saveptr = NULL;
  351. p = s->matrix_str[i];
  352. while (s->matrix_length[i] < 25) {
  353. if (!(arg = av_strtok(p, " ", &saveptr)))
  354. break;
  355. p = NULL;
  356. sscanf(arg, "%d", &matrix[s->matrix_length[i]]);
  357. s->matrix_length[i]++;
  358. }
  359. if (s->matrix_length[i] == 9) {
  360. s->size[i] = 3;
  361. if (!memcmp(matrix, same3x3, sizeof(same3x3)))
  362. s->copy[i] = 1;
  363. else
  364. s->filter[i] = filter_3x3;
  365. } else if (s->matrix_length[i] == 25) {
  366. s->size[i] = 5;
  367. if (!memcmp(matrix, same5x5, sizeof(same5x5)))
  368. s->copy[i] = 1;
  369. else
  370. s->filter[i] = filter_5x5;
  371. } else {
  372. return AVERROR(EINVAL);
  373. }
  374. }
  375. return 0;
  376. }
  377. static av_cold void uninit(AVFilterContext *ctx)
  378. {
  379. ConvolutionContext *s = ctx->priv;
  380. av_freep(&s->buffer);
  381. }
  382. static const AVFilterPad convolution_inputs[] = {
  383. {
  384. .name = "default",
  385. .type = AVMEDIA_TYPE_VIDEO,
  386. .config_props = config_input,
  387. .filter_frame = filter_frame,
  388. },
  389. { NULL }
  390. };
  391. static const AVFilterPad convolution_outputs[] = {
  392. {
  393. .name = "default",
  394. .type = AVMEDIA_TYPE_VIDEO,
  395. },
  396. { NULL }
  397. };
  398. AVFilter ff_vf_convolution = {
  399. .name = "convolution",
  400. .description = NULL_IF_CONFIG_SMALL("Apply convolution filter."),
  401. .priv_size = sizeof(ConvolutionContext),
  402. .priv_class = &convolution_class,
  403. .init = init,
  404. .uninit = uninit,
  405. .query_formats = query_formats,
  406. .inputs = convolution_inputs,
  407. .outputs = convolution_outputs,
  408. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
  409. };