You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

219 lines
7.0KB

  1. /*
  2. * Copyright (c) 2017 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "libavutil/imgutils.h"
  22. #include "avfilter.h"
  23. #include "formats.h"
  24. #include "internal.h"
  25. #include "video.h"
  26. typedef struct LumakeyContext {
  27. const AVClass *class;
  28. double threshold;
  29. double tolerance;
  30. double softness;
  31. int white;
  32. int black;
  33. int so;
  34. int max;
  35. int (*do_lumakey_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  36. } LumakeyContext;
  37. static int do_lumakey_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  38. {
  39. LumakeyContext *s = ctx->priv;
  40. AVFrame *frame = arg;
  41. const int slice_start = (frame->height * jobnr) / nb_jobs;
  42. const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
  43. uint8_t *alpha = frame->data[3] + slice_start * frame->linesize[3];
  44. const uint8_t *luma = frame->data[0] + slice_start * frame->linesize[0];
  45. const int so = s->so;
  46. const int w = s->white;
  47. const int b = s->black;
  48. int x, y;
  49. for (y = slice_start; y < slice_end; y++) {
  50. for (x = 0; x < frame->width; x++) {
  51. if (luma[x] >= b && luma[x] <= w) {
  52. alpha[x] = 0;
  53. } else if (luma[x] > b - so && luma[x] < w + so) {
  54. if (luma[x] < b) {
  55. alpha[x] = 255 - (luma[x] - b + so) * 255 / so;
  56. } else {
  57. alpha[x] = (luma[x] - w) * 255 / so;
  58. }
  59. }
  60. }
  61. luma += frame->linesize[0];
  62. alpha += frame->linesize[3];
  63. }
  64. return 0;
  65. }
  66. static int do_lumakey_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  67. {
  68. LumakeyContext *s = ctx->priv;
  69. AVFrame *frame = arg;
  70. const int slice_start = (frame->height * jobnr) / nb_jobs;
  71. const int slice_end = (frame->height * (jobnr + 1)) / nb_jobs;
  72. uint16_t *alpha = (uint16_t *)(frame->data[3] + slice_start * frame->linesize[3]);
  73. const uint16_t *luma = (const uint16_t *)(frame->data[0] + slice_start * frame->linesize[0]);
  74. const int so = s->so;
  75. const int w = s->white;
  76. const int b = s->black;
  77. const int m = s->max;
  78. int x, y;
  79. for (y = slice_start; y < slice_end; y++) {
  80. for (x = 0; x < frame->width; x++) {
  81. if (luma[x] >= b && luma[x] <= w) {
  82. alpha[x] = 0;
  83. } else if (luma[x] > b - so && luma[x] < w + so) {
  84. if (luma[x] < b) {
  85. alpha[x] = m - (luma[x] - b + so) * m / so;
  86. } else {
  87. alpha[x] = (luma[x] - w) * m / so;
  88. }
  89. }
  90. }
  91. luma += frame->linesize[0] / 2;
  92. alpha += frame->linesize[3] / 2;
  93. }
  94. return 0;
  95. }
  96. static int config_input(AVFilterLink *inlink)
  97. {
  98. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  99. AVFilterContext *ctx = inlink->dst;
  100. LumakeyContext *s = ctx->priv;
  101. int depth;
  102. depth = desc->comp[0].depth;
  103. if (depth == 8) {
  104. s->white = av_clip_uint8((s->threshold + s->tolerance) * 255);
  105. s->black = av_clip_uint8((s->threshold - s->tolerance) * 255);
  106. s->do_lumakey_slice = do_lumakey_slice8;
  107. s->so = s->softness * 255;
  108. } else {
  109. s->max = (1 << depth) - 1;
  110. s->white = av_clip((s->threshold + s->tolerance) * s->max, 0, s->max);
  111. s->black = av_clip((s->threshold - s->tolerance) * s->max, 0, s->max);
  112. s->do_lumakey_slice = do_lumakey_slice16;
  113. s->so = s->softness * s->max;
  114. }
  115. return 0;
  116. }
  117. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  118. {
  119. AVFilterContext *ctx = link->dst;
  120. LumakeyContext *s = ctx->priv;
  121. int ret;
  122. if (ret = av_frame_make_writable(frame))
  123. return ret;
  124. if (ret = ctx->internal->execute(ctx, s->do_lumakey_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))))
  125. return ret;
  126. return ff_filter_frame(ctx->outputs[0], frame);
  127. }
  128. static av_cold int query_formats(AVFilterContext *ctx)
  129. {
  130. static const enum AVPixelFormat pixel_fmts[] = {
  131. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
  132. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
  133. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
  134. AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA422P12,
  135. AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16,
  136. AV_PIX_FMT_NONE
  137. };
  138. AVFilterFormats *formats;
  139. formats = ff_make_format_list(pixel_fmts);
  140. if (!formats)
  141. return AVERROR(ENOMEM);
  142. return ff_set_common_formats(ctx, formats);
  143. }
  144. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  145. char *res, int res_len, int flags)
  146. {
  147. int ret;
  148. ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
  149. if (ret < 0)
  150. return ret;
  151. return config_input(ctx->inputs[0]);
  152. }
  153. static const AVFilterPad lumakey_inputs[] = {
  154. {
  155. .name = "default",
  156. .type = AVMEDIA_TYPE_VIDEO,
  157. .filter_frame = filter_frame,
  158. .config_props = config_input,
  159. },
  160. { NULL }
  161. };
  162. static const AVFilterPad lumakey_outputs[] = {
  163. {
  164. .name = "default",
  165. .type = AVMEDIA_TYPE_VIDEO,
  166. },
  167. { NULL }
  168. };
  169. #define OFFSET(x) offsetof(LumakeyContext, x)
  170. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  171. static const AVOption lumakey_options[] = {
  172. { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
  173. { "tolerance", "set the tolerance value", OFFSET(tolerance), AV_OPT_TYPE_DOUBLE, {.dbl=0.01}, 0, 1, FLAGS },
  174. { "softness", "set the softness value", OFFSET(softness), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
  175. { NULL }
  176. };
  177. AVFILTER_DEFINE_CLASS(lumakey);
  178. AVFilter ff_vf_lumakey = {
  179. .name = "lumakey",
  180. .description = NULL_IF_CONFIG_SMALL("Turns a certain luma into transparency."),
  181. .priv_size = sizeof(LumakeyContext),
  182. .priv_class = &lumakey_class,
  183. .query_formats = query_formats,
  184. .inputs = lumakey_inputs,
  185. .outputs = lumakey_outputs,
  186. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  187. .process_command = process_command,
  188. };