You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

446 lines
16KB

  1. /*
  2. * Copyright (c) 2008 vmrsss
  3. * Copyright (c) 2009 Stefano Sabatini
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * video padding filter
  24. */
  25. #include "avfilter.h"
  26. #include "formats.h"
  27. #include "internal.h"
  28. #include "video.h"
  29. #include "libavutil/avstring.h"
  30. #include "libavutil/common.h"
  31. #include "libavutil/eval.h"
  32. #include "libavutil/pixdesc.h"
  33. #include "libavutil/colorspace.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/parseutils.h"
  36. #include "libavutil/mathematics.h"
  37. #include "libavutil/opt.h"
  38. #include "drawutils.h"
  39. static const char *const var_names[] = {
  40. "in_w", "iw",
  41. "in_h", "ih",
  42. "out_w", "ow",
  43. "out_h", "oh",
  44. "x",
  45. "y",
  46. "a",
  47. "sar",
  48. "dar",
  49. "hsub",
  50. "vsub",
  51. NULL
  52. };
  53. enum var_name {
  54. VAR_IN_W, VAR_IW,
  55. VAR_IN_H, VAR_IH,
  56. VAR_OUT_W, VAR_OW,
  57. VAR_OUT_H, VAR_OH,
  58. VAR_X,
  59. VAR_Y,
  60. VAR_A,
  61. VAR_SAR,
  62. VAR_DAR,
  63. VAR_HSUB,
  64. VAR_VSUB,
  65. VARS_NB
  66. };
  67. static int query_formats(AVFilterContext *ctx)
  68. {
  69. return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
  70. }
  71. enum EvalMode {
  72. EVAL_MODE_INIT,
  73. EVAL_MODE_FRAME,
  74. EVAL_MODE_NB
  75. };
  76. typedef struct PadContext {
  77. const AVClass *class;
  78. int w, h; ///< output dimensions, a value of 0 will result in the input size
  79. int x, y; ///< offsets of the input area with respect to the padded area
  80. int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
  81. int inlink_w, inlink_h;
  82. char *w_expr; ///< width expression string
  83. char *h_expr; ///< height expression string
  84. char *x_expr; ///< width expression string
  85. char *y_expr; ///< height expression string
  86. uint8_t rgba_color[4]; ///< color for the padding area
  87. FFDrawContext draw;
  88. FFDrawColor color;
  89. int eval_mode; ///< expression evaluation mode
  90. } PadContext;
  91. static int config_input(AVFilterLink *inlink)
  92. {
  93. AVFilterContext *ctx = inlink->dst;
  94. PadContext *s = ctx->priv;
  95. int ret;
  96. double var_values[VARS_NB], res;
  97. char *expr;
  98. ff_draw_init(&s->draw, inlink->format, 0);
  99. ff_draw_color(&s->draw, &s->color, s->rgba_color);
  100. var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
  101. var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
  102. var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
  103. var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
  104. var_values[VAR_A] = (double) inlink->w / inlink->h;
  105. var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  106. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  107. var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
  108. var_values[VAR_HSUB] = 1 << s->draw.hsub_max;
  109. var_values[VAR_VSUB] = 1 << s->draw.vsub_max;
  110. /* evaluate width and height */
  111. av_expr_parse_and_eval(&res, (expr = s->w_expr),
  112. var_names, var_values,
  113. NULL, NULL, NULL, NULL, NULL, 0, ctx);
  114. s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
  115. if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
  116. var_names, var_values,
  117. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  118. goto eval_fail;
  119. s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
  120. if (!s->h)
  121. var_values[VAR_OUT_H] = var_values[VAR_OH] = s->h = inlink->h;
  122. /* evaluate the width again, as it may depend on the evaluated output height */
  123. if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
  124. var_names, var_values,
  125. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  126. goto eval_fail;
  127. s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
  128. if (!s->w)
  129. var_values[VAR_OUT_W] = var_values[VAR_OW] = s->w = inlink->w;
  130. /* evaluate x and y */
  131. av_expr_parse_and_eval(&res, (expr = s->x_expr),
  132. var_names, var_values,
  133. NULL, NULL, NULL, NULL, NULL, 0, ctx);
  134. s->x = var_values[VAR_X] = res;
  135. if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr),
  136. var_names, var_values,
  137. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  138. goto eval_fail;
  139. s->y = var_values[VAR_Y] = res;
  140. /* evaluate x again, as it may depend on the evaluated y value */
  141. if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr),
  142. var_names, var_values,
  143. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  144. goto eval_fail;
  145. s->x = var_values[VAR_X] = res;
  146. /* sanity check params */
  147. if (s->w < 0 || s->h < 0 || s->x < 0 || s->y < 0) {
  148. av_log(ctx, AV_LOG_ERROR, "Negative values are not acceptable.\n");
  149. return AVERROR(EINVAL);
  150. }
  151. s->w = ff_draw_round_to_sub(&s->draw, 0, -1, s->w);
  152. s->h = ff_draw_round_to_sub(&s->draw, 1, -1, s->h);
  153. s->x = ff_draw_round_to_sub(&s->draw, 0, -1, s->x);
  154. s->y = ff_draw_round_to_sub(&s->draw, 1, -1, s->y);
  155. s->in_w = ff_draw_round_to_sub(&s->draw, 0, -1, inlink->w);
  156. s->in_h = ff_draw_round_to_sub(&s->draw, 1, -1, inlink->h);
  157. s->inlink_w = inlink->w;
  158. s->inlink_h = inlink->h;
  159. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X\n",
  160. inlink->w, inlink->h, s->w, s->h, s->x, s->y,
  161. s->rgba_color[0], s->rgba_color[1], s->rgba_color[2], s->rgba_color[3]);
  162. if (s->x < 0 || s->y < 0 ||
  163. s->w <= 0 || s->h <= 0 ||
  164. (unsigned)s->x + (unsigned)inlink->w > s->w ||
  165. (unsigned)s->y + (unsigned)inlink->h > s->h) {
  166. av_log(ctx, AV_LOG_ERROR,
  167. "Input area %d:%d:%d:%d not within the padded area 0:0:%d:%d or zero-sized\n",
  168. s->x, s->y, s->x + inlink->w, s->y + inlink->h, s->w, s->h);
  169. return AVERROR(EINVAL);
  170. }
  171. return 0;
  172. eval_fail:
  173. av_log(NULL, AV_LOG_ERROR,
  174. "Error when evaluating the expression '%s'\n", expr);
  175. return ret;
  176. }
  177. static int config_output(AVFilterLink *outlink)
  178. {
  179. PadContext *s = outlink->src->priv;
  180. outlink->w = s->w;
  181. outlink->h = s->h;
  182. return 0;
  183. }
  184. static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
  185. {
  186. PadContext *s = inlink->dst->priv;
  187. AVFrame *frame;
  188. int plane;
  189. if (s->inlink_w <= 0)
  190. return NULL;
  191. frame = ff_get_video_buffer(inlink->dst->outputs[0],
  192. w + (s->w - s->in_w),
  193. h + (s->h - s->in_h) + (s->x > 0));
  194. if (!frame)
  195. return NULL;
  196. frame->width = w;
  197. frame->height = h;
  198. for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
  199. int hsub = s->draw.hsub[plane];
  200. int vsub = s->draw.vsub[plane];
  201. frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
  202. (s->y >> vsub) * frame->linesize[plane];
  203. }
  204. return frame;
  205. }
  206. /* check whether each plane in this buffer can be padded without copying */
  207. static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf)
  208. {
  209. int planes[4] = { -1, -1, -1, -1}, *p = planes;
  210. int i, j;
  211. /* get all planes in this buffer */
  212. for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) {
  213. if (av_frame_get_plane_buffer(frame, i) == buf)
  214. *p++ = i;
  215. }
  216. /* for each plane in this buffer, check that it can be padded without
  217. * going over buffer bounds or other planes */
  218. for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) {
  219. int hsub = s->draw.hsub[planes[i]];
  220. int vsub = s->draw.vsub[planes[i]];
  221. uint8_t *start = frame->data[planes[i]];
  222. uint8_t *end = start + (frame->height >> vsub) *
  223. frame->linesize[planes[i]];
  224. /* amount of free space needed before the start and after the end
  225. * of the plane */
  226. ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] +
  227. (s->y >> vsub) * frame->linesize[planes[i]];
  228. ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) *
  229. s->draw.pixelstep[planes[i]] +
  230. ((s->h - s->y - frame->height) >> vsub) * frame->linesize[planes[i]];
  231. if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]])
  232. return 1;
  233. if (start - buf->data < req_start ||
  234. (buf->data + buf->size) - end < req_end)
  235. return 1;
  236. for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) {
  237. int vsub1 = s->draw.vsub[planes[j]];
  238. uint8_t *start1 = frame->data[planes[j]];
  239. uint8_t *end1 = start1 + (frame->height >> vsub1) *
  240. frame->linesize[planes[j]];
  241. if (i == j)
  242. continue;
  243. if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) ||
  244. FFSIGN(end - start1) != FFSIGN(end - start1 + req_end))
  245. return 1;
  246. }
  247. }
  248. return 0;
  249. }
  250. static int frame_needs_copy(PadContext *s, AVFrame *frame)
  251. {
  252. int i;
  253. if (!av_frame_is_writable(frame))
  254. return 1;
  255. for (i = 0; i < 4 && frame->buf[i]; i++)
  256. if (buffer_needs_copy(s, frame, frame->buf[i]))
  257. return 1;
  258. return 0;
  259. }
  260. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  261. {
  262. PadContext *s = inlink->dst->priv;
  263. AVFilterLink *outlink = inlink->dst->outputs[0];
  264. AVFrame *out;
  265. int needs_copy;
  266. if(s->eval_mode == EVAL_MODE_FRAME && (
  267. in->width != s->inlink_w
  268. || in->height != s->inlink_h
  269. || in->format != outlink->format
  270. || in->sample_aspect_ratio.den != outlink->sample_aspect_ratio.den || in->sample_aspect_ratio.num != outlink->sample_aspect_ratio.num)) {
  271. int ret;
  272. inlink->dst->inputs[0]->format = in->format;
  273. inlink->dst->inputs[0]->w = in->width;
  274. inlink->dst->inputs[0]->h = in->height;
  275. inlink->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  276. inlink->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  277. if ((ret = config_input(inlink)) < 0) {
  278. s->inlink_w = -1;
  279. return ret;
  280. }
  281. if ((ret = config_output(outlink)) < 0) {
  282. s->inlink_w = -1;
  283. return ret;
  284. }
  285. }
  286. needs_copy = frame_needs_copy(s, in);
  287. if (needs_copy) {
  288. av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
  289. out = ff_get_video_buffer(inlink->dst->outputs[0],
  290. FFMAX(inlink->w, s->w),
  291. FFMAX(inlink->h, s->h));
  292. if (!out) {
  293. av_frame_free(&in);
  294. return AVERROR(ENOMEM);
  295. }
  296. av_frame_copy_props(out, in);
  297. } else {
  298. int i;
  299. out = in;
  300. for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
  301. int hsub = s->draw.hsub[i];
  302. int vsub = s->draw.vsub[i];
  303. out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
  304. (s->y >> vsub) * out->linesize[i];
  305. }
  306. }
  307. /* top bar */
  308. if (s->y) {
  309. ff_fill_rectangle(&s->draw, &s->color,
  310. out->data, out->linesize,
  311. 0, 0, s->w, s->y);
  312. }
  313. /* bottom bar */
  314. if (s->h > s->y + s->in_h) {
  315. ff_fill_rectangle(&s->draw, &s->color,
  316. out->data, out->linesize,
  317. 0, s->y + s->in_h, s->w, s->h - s->y - s->in_h);
  318. }
  319. /* left border */
  320. ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
  321. 0, s->y, s->x, in->height);
  322. if (needs_copy) {
  323. ff_copy_rectangle2(&s->draw,
  324. out->data, out->linesize, in->data, in->linesize,
  325. s->x, s->y, 0, 0, in->width, in->height);
  326. }
  327. /* right border */
  328. ff_fill_rectangle(&s->draw, &s->color, out->data, out->linesize,
  329. s->x + s->in_w, s->y, s->w - s->x - s->in_w,
  330. in->height);
  331. out->width = s->w;
  332. out->height = s->h;
  333. if (in != out)
  334. av_frame_free(&in);
  335. return ff_filter_frame(inlink->dst->outputs[0], out);
  336. }
  337. #define OFFSET(x) offsetof(PadContext, x)
  338. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  339. static const AVOption pad_options[] = {
  340. { "width", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
  341. { "w", "set the pad area width expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS },
  342. { "height", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
  343. { "h", "set the pad area height expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS },
  344. { "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
  345. { "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
  346. { "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
  347. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  348. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  349. { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  350. { NULL }
  351. };
  352. AVFILTER_DEFINE_CLASS(pad);
  353. static const AVFilterPad avfilter_vf_pad_inputs[] = {
  354. {
  355. .name = "default",
  356. .type = AVMEDIA_TYPE_VIDEO,
  357. .config_props = config_input,
  358. .get_video_buffer = get_video_buffer,
  359. .filter_frame = filter_frame,
  360. },
  361. { NULL }
  362. };
  363. static const AVFilterPad avfilter_vf_pad_outputs[] = {
  364. {
  365. .name = "default",
  366. .type = AVMEDIA_TYPE_VIDEO,
  367. .config_props = config_output,
  368. },
  369. { NULL }
  370. };
  371. AVFilter ff_vf_pad = {
  372. .name = "pad",
  373. .description = NULL_IF_CONFIG_SMALL("Pad the input video."),
  374. .priv_size = sizeof(PadContext),
  375. .priv_class = &pad_class,
  376. .query_formats = query_formats,
  377. .inputs = avfilter_vf_pad_inputs,
  378. .outputs = avfilter_vf_pad_outputs,
  379. };