You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

342 lines
11KB

  1. /*
  2. * Copyright (c) 2019 Vladimir Panteleev
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <float.h>
  21. #include "libavutil/imgutils.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "avfilter.h"
  25. #include "formats.h"
  26. #include "internal.h"
  27. #include "video.h"
  28. #define MAX_FRAMES 240
  29. #define GRID_SIZE 8
  30. #define NUM_CHANNELS 3
  31. typedef struct PhotosensitivityFrame {
  32. uint8_t grid[GRID_SIZE][GRID_SIZE][4];
  33. } PhotosensitivityFrame;
  34. typedef struct PhotosensitivityContext {
  35. const AVClass *class;
  36. int nb_frames;
  37. int skip;
  38. float threshold_multiplier;
  39. int bypass;
  40. int badness_threshold;
  41. /* Circular buffer */
  42. int history[MAX_FRAMES];
  43. int history_pos;
  44. PhotosensitivityFrame last_frame_e;
  45. AVFrame *last_frame_av;
  46. } PhotosensitivityContext;
  47. #define OFFSET(x) offsetof(PhotosensitivityContext, x)
  48. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  49. static const AVOption photosensitivity_options[] = {
  50. { "frames", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
  51. { "f", "set how many frames to use", OFFSET(nb_frames), AV_OPT_TYPE_INT, {.i64=30}, 2, MAX_FRAMES, FLAGS },
  52. { "threshold", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
  53. { "t", "set detection threshold factor (lower is stricter)", OFFSET(threshold_multiplier), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX, FLAGS },
  54. { "skip", "set pixels to skip when sampling frames", OFFSET(skip), AV_OPT_TYPE_INT, {.i64=1}, 1, 1024, FLAGS },
  55. { "bypass", "leave frames unchanged", OFFSET(bypass), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  56. { NULL }
  57. };
  58. AVFILTER_DEFINE_CLASS(photosensitivity);
  59. static int query_formats(AVFilterContext *ctx)
  60. {
  61. static const enum AVPixelFormat pixel_fmts[] = {
  62. AV_PIX_FMT_RGB24,
  63. AV_PIX_FMT_BGR24,
  64. AV_PIX_FMT_NONE
  65. };
  66. AVFilterFormats *formats = ff_make_format_list(pixel_fmts);
  67. if (!formats)
  68. return AVERROR(ENOMEM);
  69. return ff_set_common_formats(ctx, formats);
  70. }
  71. typedef struct ThreadData_convert_frame
  72. {
  73. AVFrame *in;
  74. PhotosensitivityFrame *out;
  75. int skip;
  76. } ThreadData_convert_frame;
  77. #define NUM_CELLS (GRID_SIZE * GRID_SIZE)
  78. static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  79. {
  80. int cell, gx, gy, x0, x1, y0, y1, x, y, c, area;
  81. int sum[NUM_CHANNELS];
  82. const uint8_t *p;
  83. ThreadData_convert_frame *td = arg;
  84. const int slice_start = (NUM_CELLS * jobnr) / nb_jobs;
  85. const int slice_end = (NUM_CELLS * (jobnr+1)) / nb_jobs;
  86. int width = td->in->width, height = td->in->height, linesize = td->in->linesize[0], skip = td->skip;
  87. const uint8_t *data = td->in->data[0];
  88. for (cell = slice_start; cell < slice_end; cell++) {
  89. gx = cell % GRID_SIZE;
  90. gy = cell / GRID_SIZE;
  91. x0 = width * gx / GRID_SIZE;
  92. x1 = width * (gx+1) / GRID_SIZE;
  93. y0 = height * gy / GRID_SIZE;
  94. y1 = height * (gy+1) / GRID_SIZE;
  95. for (c = 0; c < NUM_CHANNELS; c++) {
  96. sum[c] = 0;
  97. }
  98. for (y = y0; y < y1; y += skip) {
  99. p = data + y * linesize + x0 * NUM_CHANNELS;
  100. for (x = x0; x < x1; x += skip) {
  101. //av_log(NULL, AV_LOG_VERBOSE, "%d %d %d : (%d,%d) (%d,%d) -> %d,%d | *%d\n", c, gx, gy, x0, y0, x1, y1, x, y, (int)row);
  102. sum[0] += p[0];
  103. sum[1] += p[1];
  104. sum[2] += p[2];
  105. p += NUM_CHANNELS * skip;
  106. // TODO: variable size
  107. }
  108. }
  109. area = ((x1 - x0 + skip - 1) / skip) * ((y1 - y0 + skip - 1) / skip);
  110. for (c = 0; c < NUM_CHANNELS; c++) {
  111. if (area)
  112. sum[c] /= area;
  113. td->out->grid[gy][gx][c] = sum[c];
  114. }
  115. }
  116. return 0;
  117. }
  118. static void convert_frame(AVFilterContext *ctx, AVFrame *in, PhotosensitivityFrame *out, int skip)
  119. {
  120. ThreadData_convert_frame td;
  121. td.in = in;
  122. td.out = out;
  123. td.skip = skip;
  124. ctx->internal->execute(ctx, convert_frame_partial, &td, NULL, FFMIN(NUM_CELLS, ff_filter_get_nb_threads(ctx)));
  125. }
  126. typedef struct ThreadData_blend_frame
  127. {
  128. AVFrame *target;
  129. AVFrame *source;
  130. uint16_t s_mul;
  131. } ThreadData_blend_frame;
  132. static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  133. {
  134. int x, y;
  135. uint8_t *t, *s;
  136. ThreadData_blend_frame *td = arg;
  137. const uint16_t s_mul = td->s_mul;
  138. const uint16_t t_mul = 0x100 - s_mul;
  139. const int slice_start = (td->target->height * jobnr) / nb_jobs;
  140. const int slice_end = (td->target->height * (jobnr+1)) / nb_jobs;
  141. const int linesize = td->target->linesize[0];
  142. for (y = slice_start; y < slice_end; y++) {
  143. t = td->target->data[0] + y * td->target->linesize[0];
  144. s = td->source->data[0] + y * td->source->linesize[0];
  145. for (x = 0; x < linesize; x++) {
  146. *t = (*t * t_mul + *s * s_mul) >> 8;
  147. t++; s++;
  148. }
  149. }
  150. return 0;
  151. }
  152. static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
  153. {
  154. ThreadData_blend_frame td;
  155. td.target = target;
  156. td.source = source;
  157. td.s_mul = (uint16_t)(factor * 0x100);
  158. ctx->internal->execute(ctx, blend_frame_partial, &td, NULL, FFMIN(ctx->outputs[0]->h, ff_filter_get_nb_threads(ctx)));
  159. }
  160. static int get_badness(PhotosensitivityFrame *a, PhotosensitivityFrame *b)
  161. {
  162. int badness, x, y, c;
  163. badness = 0;
  164. for (c = 0; c < NUM_CHANNELS; c++) {
  165. for (y = 0; y < GRID_SIZE; y++) {
  166. for (x = 0; x < GRID_SIZE; x++) {
  167. badness += abs((int)a->grid[y][x][c] - (int)b->grid[y][x][c]);
  168. //av_log(NULL, AV_LOG_VERBOSE, "%d - %d -> %d \n", a->grid[y][x], b->grid[y][x], badness);
  169. //av_log(NULL, AV_LOG_VERBOSE, "%d -> %d \n", abs((int)a->grid[y][x] - (int)b->grid[y][x]), badness);
  170. }
  171. }
  172. }
  173. return badness;
  174. }
  175. static int config_input(AVFilterLink *inlink)
  176. {
  177. /* const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); */
  178. AVFilterContext *ctx = inlink->dst;
  179. PhotosensitivityContext *s = ctx->priv;
  180. s->badness_threshold = (int)(GRID_SIZE * GRID_SIZE * 4 * 256 * s->nb_frames * s->threshold_multiplier / 128);
  181. return 0;
  182. }
  183. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  184. {
  185. int this_badness, current_badness, fixed_badness, new_badness, i, res;
  186. PhotosensitivityFrame ef;
  187. AVFrame *src, *out;
  188. int free_in = 0;
  189. float factor;
  190. AVDictionary **metadata;
  191. AVFilterContext *ctx = inlink->dst;
  192. AVFilterLink *outlink = ctx->outputs[0];
  193. PhotosensitivityContext *s = ctx->priv;
  194. /* weighted moving average */
  195. current_badness = 0;
  196. for (i = 1; i < s->nb_frames; i++)
  197. current_badness += i * s->history[(s->history_pos + i) % s->nb_frames];
  198. current_badness /= s->nb_frames;
  199. convert_frame(ctx, in, &ef, s->skip);
  200. this_badness = get_badness(&ef, &s->last_frame_e);
  201. new_badness = current_badness + this_badness;
  202. av_log(s, AV_LOG_VERBOSE, "badness: %6d -> %6d / %6d (%3d%% - %s)\n",
  203. current_badness, new_badness, s->badness_threshold,
  204. 100 * new_badness / s->badness_threshold, new_badness < s->badness_threshold ? "OK" : "EXCEEDED");
  205. fixed_badness = new_badness;
  206. if (new_badness < s->badness_threshold || !s->last_frame_av || s->bypass) {
  207. factor = 1; /* for metadata */
  208. av_frame_free(&s->last_frame_av);
  209. s->last_frame_av = src = in;
  210. s->last_frame_e = ef;
  211. s->history[s->history_pos] = this_badness;
  212. } else {
  213. factor = (float)(s->badness_threshold - current_badness) / (new_badness - current_badness);
  214. if (factor <= 0) {
  215. /* just duplicate the frame */
  216. s->history[s->history_pos] = 0; /* frame was duplicated, thus, delta is zero */
  217. } else {
  218. res = av_frame_make_writable(s->last_frame_av);
  219. if (res) {
  220. av_frame_free(&in);
  221. return res;
  222. }
  223. blend_frame(ctx, s->last_frame_av, in, factor);
  224. convert_frame(ctx, s->last_frame_av, &ef, s->skip);
  225. this_badness = get_badness(&ef, &s->last_frame_e);
  226. fixed_badness = current_badness + this_badness;
  227. av_log(s, AV_LOG_VERBOSE, " fixed: %6d -> %6d / %6d (%3d%%) factor=%5.3f\n",
  228. current_badness, fixed_badness, s->badness_threshold,
  229. 100 * new_badness / s->badness_threshold, factor);
  230. s->last_frame_e = ef;
  231. s->history[s->history_pos] = this_badness;
  232. }
  233. src = s->last_frame_av;
  234. free_in = 1;
  235. }
  236. s->history_pos = (s->history_pos + 1) % s->nb_frames;
  237. out = ff_get_video_buffer(outlink, in->width, in->height);
  238. if (!out) {
  239. if (free_in == 1)
  240. av_frame_free(&in);
  241. return AVERROR(ENOMEM);
  242. }
  243. av_frame_copy_props(out, in);
  244. metadata = &out->metadata;
  245. if (metadata) {
  246. char value[128];
  247. snprintf(value, sizeof(value), "%f", (float)new_badness / s->badness_threshold);
  248. av_dict_set(metadata, "lavfi.photosensitivity.badness", value, 0);
  249. snprintf(value, sizeof(value), "%f", (float)fixed_badness / s->badness_threshold);
  250. av_dict_set(metadata, "lavfi.photosensitivity.fixed-badness", value, 0);
  251. snprintf(value, sizeof(value), "%f", (float)this_badness / s->badness_threshold);
  252. av_dict_set(metadata, "lavfi.photosensitivity.frame-badness", value, 0);
  253. snprintf(value, sizeof(value), "%f", factor);
  254. av_dict_set(metadata, "lavfi.photosensitivity.factor", value, 0);
  255. }
  256. av_frame_copy(out, src);
  257. if (free_in == 1)
  258. av_frame_free(&in);
  259. return ff_filter_frame(outlink, out);
  260. }
  261. static av_cold void uninit(AVFilterContext *ctx)
  262. {
  263. PhotosensitivityContext *s = ctx->priv;
  264. av_frame_free(&s->last_frame_av);
  265. }
  266. static const AVFilterPad inputs[] = {
  267. {
  268. .name = "default",
  269. .type = AVMEDIA_TYPE_VIDEO,
  270. .filter_frame = filter_frame,
  271. .config_props = config_input,
  272. },
  273. { NULL }
  274. };
  275. static const AVFilterPad outputs[] = {
  276. {
  277. .name = "default",
  278. .type = AVMEDIA_TYPE_VIDEO,
  279. },
  280. { NULL }
  281. };
  282. AVFilter ff_vf_photosensitivity = {
  283. .name = "photosensitivity",
  284. .description = NULL_IF_CONFIG_SMALL("Filter out photosensitive epilepsy seizure-inducing flashes."),
  285. .priv_size = sizeof(PhotosensitivityContext),
  286. .priv_class = &photosensitivity_class,
  287. .uninit = uninit,
  288. .query_formats = query_formats,
  289. .inputs = inputs,
  290. .outputs = outputs,
  291. };