You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

315 lines
10KB

  1. /*
  2. * Copyright (c) 2021 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h>
  21. #include "libavutil/opt.h"
  22. #include "libavutil/imgutils.h"
  23. #include "avfilter.h"
  24. #include "formats.h"
  25. #include "internal.h"
  26. #include "video.h"
  27. typedef struct MonochromeContext {
  28. const AVClass *class;
  29. float b, r;
  30. float size;
  31. float high;
  32. int depth;
  33. int subw, subh;
  34. int (*do_slice)(AVFilterContext *s, void *arg,
  35. int jobnr, int nb_jobs);
  36. int (*clear_uv)(AVFilterContext *s, void *arg,
  37. int jobnr, int nb_jobs);
  38. } MonochromeContext;
  39. static float envelope(const float x)
  40. {
  41. const float beta = 0.6f;
  42. if (x < beta) {
  43. const float tmp = fabsf(x / beta - 1.f);
  44. return 1.f - tmp * tmp;
  45. } else {
  46. const float tmp = (1.f - x) / (1.f - beta);
  47. return tmp * tmp * (3.f - 2.f * tmp);
  48. }
  49. }
  50. static float filter(float b, float r, float u, float v, float size)
  51. {
  52. return expf(-av_clipf(((b - u) * (b - u) +
  53. (r - v) * (r - v)) *
  54. size, 0.f, 1.f));
  55. }
  56. #define PROCESS() \
  57. const int cx = x >> subw; \
  58. float y = yptr[x] * imax; \
  59. float u = uptr[cx] * imax - .5f; \
  60. float v = vptr[cx] * imax - .5f; \
  61. float tt, t, ny; \
  62. \
  63. ny = filter(b, r, u, v, size); \
  64. tt = envelope(y); \
  65. t = tt + (1.f - tt) * ihigh; \
  66. ny = (1.f - t) * y + t * ny * y;
  67. static int monochrome_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  68. {
  69. MonochromeContext *s = ctx->priv;
  70. AVFrame *frame = arg;
  71. const int depth = s->depth;
  72. const int subw = s->subw;
  73. const int subh = s->subh;
  74. const float max = (1 << depth) - 1;
  75. const float imax = 1.f / max;
  76. const int width = frame->width;
  77. const int height = frame->height;
  78. const int slice_start = (height * jobnr) / nb_jobs;
  79. const int slice_end = (height * (jobnr + 1)) / nb_jobs;
  80. const int ylinesize = frame->linesize[0];
  81. const int ulinesize = frame->linesize[1];
  82. const int vlinesize = frame->linesize[2];
  83. uint8_t *yptr = frame->data[0] + slice_start * ylinesize;
  84. const float ihigh = 1.f - s->high;
  85. const float size = 1.f / s->size;
  86. const float b = s->b * .5f;
  87. const float r = s->r * .5f;
  88. for (int y = slice_start; y < slice_end; y++) {
  89. const int cy = y >> subh;
  90. uint8_t *uptr = frame->data[1] + cy * ulinesize;
  91. uint8_t *vptr = frame->data[2] + cy * vlinesize;
  92. for (int x = 0; x < width; x++) {
  93. PROCESS()
  94. yptr[x] = av_clip_uint8(ny * max);
  95. }
  96. yptr += ylinesize;
  97. }
  98. return 0;
  99. }
  100. static int monochrome_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  101. {
  102. MonochromeContext *s = ctx->priv;
  103. AVFrame *frame = arg;
  104. const int depth = s->depth;
  105. const int subw = s->subw;
  106. const int subh = s->subh;
  107. const float max = (1 << depth) - 1;
  108. const float imax = 1.f / max;
  109. const int width = frame->width;
  110. const int height = frame->height;
  111. const int slice_start = (height * jobnr) / nb_jobs;
  112. const int slice_end = (height * (jobnr + 1)) / nb_jobs;
  113. const int ylinesize = frame->linesize[0] / 2;
  114. const int ulinesize = frame->linesize[1] / 2;
  115. const int vlinesize = frame->linesize[2] / 2;
  116. uint16_t *yptr = (uint16_t *)frame->data[0] + slice_start * ylinesize;
  117. const float ihigh = 1.f - s->high;
  118. const float size = 1.f / s->size;
  119. const float b = s->b * .5f;
  120. const float r = s->r * .5f;
  121. for (int y = slice_start; y < slice_end; y++) {
  122. const int cy = y >> subh;
  123. uint16_t *uptr = (uint16_t *)frame->data[1] + cy * ulinesize;
  124. uint16_t *vptr = (uint16_t *)frame->data[2] + cy * vlinesize;
  125. for (int x = 0; x < width; x++) {
  126. PROCESS()
  127. yptr[x] = av_clip_uintp2_c(ny * max, depth);
  128. }
  129. yptr += ylinesize;
  130. }
  131. return 0;
  132. }
  133. static int clear_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  134. {
  135. MonochromeContext *s = ctx->priv;
  136. AVFrame *frame = arg;
  137. const int depth = s->depth;
  138. const int half = 1 << (depth - 1);
  139. const int subw = s->subw;
  140. const int subh = s->subh;
  141. const int width = AV_CEIL_RSHIFT(frame->width, subw);
  142. const int height = AV_CEIL_RSHIFT(frame->height, subh);
  143. const int slice_start = (height * jobnr) / nb_jobs;
  144. const int slice_end = (height * (jobnr + 1)) / nb_jobs;
  145. const int ulinesize = frame->linesize[1];
  146. const int vlinesize = frame->linesize[2];
  147. for (int y = slice_start; y < slice_end; y++) {
  148. uint8_t *uptr = frame->data[1] + y * ulinesize;
  149. uint8_t *vptr = frame->data[2] + y * vlinesize;
  150. memset(uptr, half, width);
  151. memset(vptr, half, width);
  152. }
  153. return 0;
  154. }
  155. static int clear_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  156. {
  157. MonochromeContext *s = ctx->priv;
  158. AVFrame *frame = arg;
  159. const int depth = s->depth;
  160. const int half = 1 << (depth - 1);
  161. const int subw = s->subw;
  162. const int subh = s->subh;
  163. const int width = AV_CEIL_RSHIFT(frame->width, subw);
  164. const int height = AV_CEIL_RSHIFT(frame->height, subh);
  165. const int slice_start = (height * jobnr) / nb_jobs;
  166. const int slice_end = (height * (jobnr + 1)) / nb_jobs;
  167. const int ulinesize = frame->linesize[1] / 2;
  168. const int vlinesize = frame->linesize[2] / 2;
  169. for (int y = slice_start; y < slice_end; y++) {
  170. uint16_t *uptr = (uint16_t *)frame->data[1] + y * ulinesize;
  171. uint16_t *vptr = (uint16_t *)frame->data[2] + y * vlinesize;
  172. for (int x = 0; x < width; x++) {
  173. uptr[x] = half;
  174. vptr[x] = half;
  175. }
  176. }
  177. return 0;
  178. }
  179. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  180. {
  181. AVFilterContext *ctx = inlink->dst;
  182. MonochromeContext *s = ctx->priv;
  183. ctx->internal->execute(ctx, s->do_slice, frame, NULL,
  184. FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
  185. ctx->internal->execute(ctx, s->clear_uv, frame, NULL,
  186. FFMIN(frame->height >> s->subh, ff_filter_get_nb_threads(ctx)));
  187. return ff_filter_frame(ctx->outputs[0], frame);
  188. }
  189. static av_cold int query_formats(AVFilterContext *ctx)
  190. {
  191. static const enum AVPixelFormat pixel_fmts[] = {
  192. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
  193. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
  194. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  195. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
  196. AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
  197. AV_PIX_FMT_YUVJ411P,
  198. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  199. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  200. AV_PIX_FMT_YUV440P10,
  201. AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
  202. AV_PIX_FMT_YUV440P12,
  203. AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
  204. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  205. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
  206. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
  207. AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
  208. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
  209. AV_PIX_FMT_NONE
  210. };
  211. AVFilterFormats *formats = NULL;
  212. formats = ff_make_format_list(pixel_fmts);
  213. if (!formats)
  214. return AVERROR(ENOMEM);
  215. return ff_set_common_formats(ctx, formats);
  216. }
  217. static av_cold int config_input(AVFilterLink *inlink)
  218. {
  219. AVFilterContext *ctx = inlink->dst;
  220. MonochromeContext *s = ctx->priv;
  221. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  222. s->depth = desc->comp[0].depth;
  223. s->do_slice = s->depth <= 8 ? monochrome_slice8 : monochrome_slice16;
  224. s->clear_uv = s->depth <= 8 ? clear_slice8 : clear_slice16;
  225. s->subw = desc->log2_chroma_w;
  226. s->subh = desc->log2_chroma_h;
  227. return 0;
  228. }
  229. static const AVFilterPad monochrome_inputs[] = {
  230. {
  231. .name = "default",
  232. .type = AVMEDIA_TYPE_VIDEO,
  233. .needs_writable = 1,
  234. .filter_frame = filter_frame,
  235. .config_props = config_input,
  236. },
  237. { NULL }
  238. };
  239. static const AVFilterPad monochrome_outputs[] = {
  240. {
  241. .name = "default",
  242. .type = AVMEDIA_TYPE_VIDEO,
  243. },
  244. { NULL }
  245. };
  246. #define OFFSET(x) offsetof(MonochromeContext, x)
  247. #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  248. static const AVOption monochrome_options[] = {
  249. { "cb", "set the chroma blue spot", OFFSET(b), AV_OPT_TYPE_FLOAT, {.dbl=0},-1, 1, VF },
  250. { "cr", "set the chroma red spot", OFFSET(r), AV_OPT_TYPE_FLOAT, {.dbl=0},-1, 1, VF },
  251. { "size", "set the color filter size", OFFSET(size), AV_OPT_TYPE_FLOAT, {.dbl=1},.1,10, VF },
  252. { "high", "set the highlights strength", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, VF },
  253. { NULL }
  254. };
  255. AVFILTER_DEFINE_CLASS(monochrome);
  256. AVFilter ff_vf_monochrome = {
  257. .name = "monochrome",
  258. .description = NULL_IF_CONFIG_SMALL("Convert video to gray using custom color filter."),
  259. .priv_size = sizeof(MonochromeContext),
  260. .priv_class = &monochrome_class,
  261. .query_formats = query_formats,
  262. .inputs = monochrome_inputs,
  263. .outputs = monochrome_outputs,
  264. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  265. .process_command = ff_filter_process_command,
  266. };