You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

400 lines
13KB

  1. /*
  2. * Copyright (c) 2016 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/opt.h"
  23. #include "libavutil/parseutils.h"
  24. #include "libavutil/pixdesc.h"
  25. #include "libavutil/xga_font_data.h"
  26. #include "avfilter.h"
  27. #include "drawutils.h"
  28. #include "formats.h"
  29. #include "internal.h"
  30. #include "video.h"
  31. typedef struct DatascopeContext {
  32. const AVClass *class;
  33. int ow, oh;
  34. int x, y;
  35. int mode;
  36. int axis;
  37. float opacity;
  38. int nb_planes;
  39. int nb_comps;
  40. int chars;
  41. FFDrawContext draw;
  42. FFDrawColor yellow;
  43. FFDrawColor white;
  44. FFDrawColor black;
  45. FFDrawColor gray;
  46. int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  47. } DatascopeContext;
  48. #define OFFSET(x) offsetof(DatascopeContext, x)
  49. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  50. static const AVOption datascope_options[] = {
  51. { "size", "set output size", OFFSET(ow), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
  52. { "s", "set output size", OFFSET(ow), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
  53. { "x", "set x offset", OFFSET(x), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  54. { "y", "set y offset", OFFSET(y), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  55. { "mode", "set scope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "mode" },
  56. { "mono", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
  57. { "color", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
  58. { "color2", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode" },
  59. { "axis", "draw column/row numbers", OFFSET(axis), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  60. { "opacity", "set background opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS },
  61. { NULL }
  62. };
  63. AVFILTER_DEFINE_CLASS(datascope);
  64. static int query_formats(AVFilterContext *ctx)
  65. {
  66. return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
  67. }
  68. static void draw_text(DatascopeContext *s, AVFrame *frame, FFDrawColor *color,
  69. int x0, int y0, const uint8_t *text, int vertical)
  70. {
  71. int x = x0;
  72. for (; *text; text++) {
  73. if (*text == '\n') {
  74. x = x0;
  75. y0 += 8;
  76. continue;
  77. }
  78. ff_blend_mask(&s->draw, color, frame->data, frame->linesize,
  79. frame->width, frame->height,
  80. avpriv_cga_font + *text * 8, 1, 8, 8, 0, 0, x, y0);
  81. if (vertical) {
  82. x = x0;
  83. y0 += 8;
  84. } else {
  85. x += 8;
  86. }
  87. }
  88. }
  89. static void pick_color(FFDrawContext *draw, FFDrawColor *color, AVFrame *in, int x, int y, int *value)
  90. {
  91. int p, i;
  92. color->rgba[3] = 255;
  93. for (p = 0; p < draw->nb_planes; p++) {
  94. if (draw->desc->comp[p].depth == 8) {
  95. if (draw->nb_planes == 1) {
  96. for (i = 0; i < 4; i++) {
  97. value[i] = in->data[0][y * in->linesize[0] + x * draw->pixelstep[0] + i];
  98. color->comp[0].u8[i] = value[i];
  99. }
  100. } else {
  101. value[p] = in->data[p][(y >> draw->vsub[p]) * in->linesize[p] + (x >> draw->hsub[p])];
  102. color->comp[p].u8[0] = value[p];
  103. }
  104. } else {
  105. if (draw->nb_planes == 1) {
  106. for (i = 0; i < 4; i++) {
  107. value[i] = AV_RL16(in->data[0] + y * in->linesize[0] + x * draw->pixelstep[0] + i * 2);
  108. color->comp[0].u16[i] = value[i];
  109. }
  110. } else {
  111. value[p] = AV_RL16(in->data[p] + (y >> draw->vsub[p]) * in->linesize[p] + (x >> draw->hsub[p]) * 2);
  112. color->comp[p].u16[0] = value[p];
  113. }
  114. }
  115. }
  116. }
  117. static void reverse_color(FFDrawContext *draw, FFDrawColor *color, FFDrawColor *reverse)
  118. {
  119. int p;
  120. reverse->rgba[3] = 255;
  121. for (p = 0; p < draw->nb_planes; p++) {
  122. if (draw->desc->comp[p].depth == 8) {
  123. reverse->comp[p].u8[0] = color->comp[p].u8[0] > 127 ? 0 : 255;
  124. reverse->comp[p].u8[1] = color->comp[p].u8[1] > 127 ? 0 : 255;
  125. reverse->comp[p].u8[2] = color->comp[p].u8[2] > 127 ? 0 : 255;
  126. } else {
  127. const unsigned max = (1 << draw->desc->comp[p].depth) - 1;
  128. const unsigned mid = (max + 1) / 2;
  129. reverse->comp[p].u16[0] = color->comp[p].u16[0] > mid ? 0 : max;
  130. reverse->comp[p].u16[1] = color->comp[p].u16[1] > mid ? 0 : max;
  131. reverse->comp[p].u16[2] = color->comp[p].u16[2] > mid ? 0 : max;
  132. }
  133. }
  134. }
  135. typedef struct ThreadData {
  136. AVFrame *in, *out;
  137. int xoff, yoff;
  138. } ThreadData;
  139. static int filter_color2(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  140. {
  141. DatascopeContext *s = ctx->priv;
  142. AVFilterLink *outlink = ctx->outputs[0];
  143. AVFilterLink *inlink = ctx->inputs[0];
  144. ThreadData *td = arg;
  145. AVFrame *in = td->in;
  146. AVFrame *out = td->out;
  147. const int xoff = td->xoff;
  148. const int yoff = td->yoff;
  149. const int P = FFMAX(s->nb_planes, s->nb_comps);
  150. const int C = s->chars;
  151. const int W = (outlink->w - xoff) / (C * 10);
  152. const int H = (outlink->h - yoff) / (P * 12);
  153. const char *format[2] = {"%02X\n", "%04X\n"};
  154. const int slice_start = (W * jobnr) / nb_jobs;
  155. const int slice_end = (W * (jobnr+1)) / nb_jobs;
  156. int x, y, p;
  157. for (y = 0; y < H && (y + s->y < inlink->h); y++) {
  158. for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
  159. FFDrawColor color = { { 0 } };
  160. FFDrawColor reverse = { { 0 } };
  161. int value[4] = { 0 };
  162. pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
  163. reverse_color(&s->draw, &color, &reverse);
  164. ff_fill_rectangle(&s->draw, &color, out->data, out->linesize,
  165. xoff + x * C * 10, yoff + y * P * 12, C * 10, P * 12);
  166. for (p = 0; p < P; p++) {
  167. char text[256];
  168. snprintf(text, sizeof(text), format[C>>2], value[p]);
  169. draw_text(s, out, &reverse, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
  170. }
  171. }
  172. }
  173. return 0;
  174. }
  175. static int filter_color(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  176. {
  177. DatascopeContext *s = ctx->priv;
  178. AVFilterLink *outlink = ctx->outputs[0];
  179. AVFilterLink *inlink = ctx->inputs[0];
  180. ThreadData *td = arg;
  181. AVFrame *in = td->in;
  182. AVFrame *out = td->out;
  183. const int xoff = td->xoff;
  184. const int yoff = td->yoff;
  185. const int P = FFMAX(s->nb_planes, s->nb_comps);
  186. const int C = s->chars;
  187. const int W = (outlink->w - xoff) / (C * 10);
  188. const int H = (outlink->h - yoff) / (P * 12);
  189. const char *format[2] = {"%02X\n", "%04X\n"};
  190. const int slice_start = (W * jobnr) / nb_jobs;
  191. const int slice_end = (W * (jobnr+1)) / nb_jobs;
  192. int x, y, p;
  193. for (y = 0; y < H && (y + s->y < inlink->h); y++) {
  194. for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
  195. FFDrawColor color = { { 0 } };
  196. int value[4] = { 0 };
  197. pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
  198. for (p = 0; p < P; p++) {
  199. char text[256];
  200. snprintf(text, sizeof(text), format[C>>2], value[p]);
  201. draw_text(s, out, &color, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
  202. }
  203. }
  204. }
  205. return 0;
  206. }
  207. static int filter_mono(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  208. {
  209. DatascopeContext *s = ctx->priv;
  210. AVFilterLink *outlink = ctx->outputs[0];
  211. AVFilterLink *inlink = ctx->inputs[0];
  212. ThreadData *td = arg;
  213. AVFrame *in = td->in;
  214. AVFrame *out = td->out;
  215. const int xoff = td->xoff;
  216. const int yoff = td->yoff;
  217. const int P = FFMAX(s->nb_planes, s->nb_comps);
  218. const int C = s->chars;
  219. const int W = (outlink->w - xoff) / (C * 10);
  220. const int H = (outlink->h - yoff) / (P * 12);
  221. const char *format[2] = {"%02X\n", "%04X\n"};
  222. const int slice_start = (W * jobnr) / nb_jobs;
  223. const int slice_end = (W * (jobnr+1)) / nb_jobs;
  224. int x, y, p;
  225. for (y = 0; y < H && (y + s->y < inlink->h); y++) {
  226. for (x = slice_start; x < slice_end && (x + s->x < inlink->w); x++) {
  227. FFDrawColor color = { { 0 } };
  228. int value[4] = { 0 };
  229. pick_color(&s->draw, &color, in, x + s->x, y + s->y, value);
  230. for (p = 0; p < P; p++) {
  231. char text[256];
  232. snprintf(text, sizeof(text), format[C>>2], value[p]);
  233. draw_text(s, out, &s->white, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0);
  234. }
  235. }
  236. }
  237. return 0;
  238. }
  239. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  240. {
  241. AVFilterContext *ctx = inlink->dst;
  242. DatascopeContext *s = ctx->priv;
  243. AVFilterLink *outlink = ctx->outputs[0];
  244. ThreadData td = { 0 };
  245. int ymaxlen = 0;
  246. int xmaxlen = 0;
  247. AVFrame *out;
  248. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  249. if (!out) {
  250. av_frame_free(&in);
  251. return AVERROR(ENOMEM);
  252. }
  253. out->pts = in->pts;
  254. ff_fill_rectangle(&s->draw, &s->black, out->data, out->linesize,
  255. 0, 0, outlink->w, outlink->h);
  256. if (s->axis) {
  257. const int P = FFMAX(s->nb_planes, s->nb_comps);
  258. const int C = s->chars;
  259. int Y = outlink->h / (P * 12);
  260. int X = outlink->w / (C * 10);
  261. char text[256] = { 0 };
  262. int x, y;
  263. snprintf(text, sizeof(text), "%d", s->y + Y);
  264. ymaxlen = strlen(text);
  265. ymaxlen *= 10;
  266. snprintf(text, sizeof(text), "%d", s->x + X);
  267. xmaxlen = strlen(text);
  268. xmaxlen *= 10;
  269. Y = (outlink->h - xmaxlen) / (P * 12);
  270. X = (outlink->w - ymaxlen) / (C * 10);
  271. for (y = 0; y < Y; y++) {
  272. snprintf(text, sizeof(text), "%d", s->y + y);
  273. ff_fill_rectangle(&s->draw, &s->gray, out->data, out->linesize,
  274. 0, xmaxlen + y * P * 12 + (P + 1) * P - 2, ymaxlen, 10);
  275. draw_text(s, out, &s->yellow, 2, xmaxlen + y * P * 12 + (P + 1) * P, text, 0);
  276. }
  277. for (x = 0; x < X; x++) {
  278. snprintf(text, sizeof(text), "%d", s->x + x);
  279. ff_fill_rectangle(&s->draw, &s->gray, out->data, out->linesize,
  280. ymaxlen + x * C * 10 + 2 * C - 2, 0, 10, xmaxlen);
  281. draw_text(s, out, &s->yellow, ymaxlen + x * C * 10 + 2 * C, 2, text, 1);
  282. }
  283. }
  284. td.in = in; td.out = out, td.yoff = xmaxlen, td.xoff = ymaxlen;
  285. ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN(ff_filter_get_nb_threads(ctx), FFMAX(outlink->w / 20, 1)));
  286. av_frame_free(&in);
  287. return ff_filter_frame(outlink, out);
  288. }
  289. static int config_input(AVFilterLink *inlink)
  290. {
  291. DatascopeContext *s = inlink->dst->priv;
  292. uint8_t alpha = s->opacity * 255;
  293. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  294. ff_draw_init(&s->draw, inlink->format, 0);
  295. ff_draw_color(&s->draw, &s->white, (uint8_t[]){ 255, 255, 255, 255} );
  296. ff_draw_color(&s->draw, &s->black, (uint8_t[]){ 0, 0, 0, alpha} );
  297. ff_draw_color(&s->draw, &s->yellow, (uint8_t[]){ 255, 255, 0, 255} );
  298. ff_draw_color(&s->draw, &s->gray, (uint8_t[]){ 77, 77, 77, 255} );
  299. s->chars = (s->draw.desc->comp[0].depth + 7) / 8 * 2;
  300. s->nb_comps = s->draw.desc->nb_components;
  301. switch (s->mode) {
  302. case 0: s->filter = filter_mono; break;
  303. case 1: s->filter = filter_color; break;
  304. case 2: s->filter = filter_color2; break;
  305. }
  306. return 0;
  307. }
  308. static int config_output(AVFilterLink *outlink)
  309. {
  310. DatascopeContext *s = outlink->src->priv;
  311. outlink->h = s->oh;
  312. outlink->w = s->ow;
  313. outlink->sample_aspect_ratio = (AVRational){1,1};
  314. return 0;
  315. }
  316. static const AVFilterPad inputs[] = {
  317. {
  318. .name = "default",
  319. .type = AVMEDIA_TYPE_VIDEO,
  320. .filter_frame = filter_frame,
  321. .config_props = config_input,
  322. },
  323. { NULL }
  324. };
  325. static const AVFilterPad outputs[] = {
  326. {
  327. .name = "default",
  328. .type = AVMEDIA_TYPE_VIDEO,
  329. .config_props = config_output,
  330. },
  331. { NULL }
  332. };
  333. AVFilter ff_vf_datascope = {
  334. .name = "datascope",
  335. .description = NULL_IF_CONFIG_SMALL("Video data analysis."),
  336. .priv_size = sizeof(DatascopeContext),
  337. .priv_class = &datascope_class,
  338. .query_formats = query_formats,
  339. .inputs = inputs,
  340. .outputs = outputs,
  341. .flags = AVFILTER_FLAG_SLICE_THREADS,
  342. };