You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1032 lines
38KB

  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * scale video filter
  23. */
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "scale_eval.h"
  30. #include "video.h"
  31. #include "libavutil/avstring.h"
  32. #include "libavutil/eval.h"
  33. #include "libavutil/internal.h"
  34. #include "libavutil/mathematics.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/parseutils.h"
  37. #include "libavutil/pixdesc.h"
  38. #include "libavutil/imgutils.h"
  39. #include "libavutil/avassert.h"
  40. #include "libswscale/swscale.h"
  41. static const char *const var_names[] = {
  42. "in_w", "iw",
  43. "in_h", "ih",
  44. "out_w", "ow",
  45. "out_h", "oh",
  46. "a",
  47. "sar",
  48. "dar",
  49. "hsub",
  50. "vsub",
  51. "ohsub",
  52. "ovsub",
  53. "n",
  54. "t",
  55. "pos",
  56. "main_w",
  57. "main_h",
  58. "main_a",
  59. "main_sar",
  60. "main_dar", "mdar",
  61. "main_hsub",
  62. "main_vsub",
  63. "main_n",
  64. "main_t",
  65. "main_pos",
  66. NULL
  67. };
  68. enum var_name {
  69. VAR_IN_W, VAR_IW,
  70. VAR_IN_H, VAR_IH,
  71. VAR_OUT_W, VAR_OW,
  72. VAR_OUT_H, VAR_OH,
  73. VAR_A,
  74. VAR_SAR,
  75. VAR_DAR,
  76. VAR_HSUB,
  77. VAR_VSUB,
  78. VAR_OHSUB,
  79. VAR_OVSUB,
  80. VAR_N,
  81. VAR_T,
  82. VAR_POS,
  83. VAR_S2R_MAIN_W,
  84. VAR_S2R_MAIN_H,
  85. VAR_S2R_MAIN_A,
  86. VAR_S2R_MAIN_SAR,
  87. VAR_S2R_MAIN_DAR, VAR_S2R_MDAR,
  88. VAR_S2R_MAIN_HSUB,
  89. VAR_S2R_MAIN_VSUB,
  90. VAR_S2R_MAIN_N,
  91. VAR_S2R_MAIN_T,
  92. VAR_S2R_MAIN_POS,
  93. VARS_NB
  94. };
  95. enum EvalMode {
  96. EVAL_MODE_INIT,
  97. EVAL_MODE_FRAME,
  98. EVAL_MODE_NB
  99. };
  100. typedef struct ScaleContext {
  101. const AVClass *class;
  102. struct SwsContext *sws; ///< software scaler context
  103. struct SwsContext *isws[2]; ///< software scaler context for interlaced material
  104. AVDictionary *opts;
  105. /**
  106. * New dimensions. Special values are:
  107. * 0 = original width/height
  108. * -1 = keep original aspect
  109. * -N = try to keep aspect but make sure it is divisible by N
  110. */
  111. int w, h;
  112. char *size_str;
  113. unsigned int flags; ///sws flags
  114. double param[2]; // sws params
  115. int hsub, vsub; ///< chroma subsampling
  116. int slice_y; ///< top of current output slice
  117. int input_is_pal; ///< set to 1 if the input format is paletted
  118. int output_is_pal; ///< set to 1 if the output format is paletted
  119. int interlaced;
  120. char *w_expr; ///< width expression string
  121. char *h_expr; ///< height expression string
  122. AVExpr *w_pexpr;
  123. AVExpr *h_pexpr;
  124. double var_values[VARS_NB];
  125. char *flags_str;
  126. char *in_color_matrix;
  127. char *out_color_matrix;
  128. int in_range;
  129. int out_range;
  130. int out_h_chr_pos;
  131. int out_v_chr_pos;
  132. int in_h_chr_pos;
  133. int in_v_chr_pos;
  134. int force_original_aspect_ratio;
  135. int force_divisible_by;
  136. int nb_slices;
  137. int eval_mode; ///< expression evaluation mode
  138. } ScaleContext;
  139. AVFilter ff_vf_scale2ref;
  140. static int config_props(AVFilterLink *outlink);
  141. static int check_exprs(AVFilterContext *ctx)
  142. {
  143. ScaleContext *scale = ctx->priv;
  144. unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
  145. if (!scale->w_pexpr && !scale->h_pexpr)
  146. return AVERROR(EINVAL);
  147. if (scale->w_pexpr)
  148. av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
  149. if (scale->h_pexpr)
  150. av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
  151. if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
  152. av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
  153. return AVERROR(EINVAL);
  154. }
  155. if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
  156. av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
  157. return AVERROR(EINVAL);
  158. }
  159. if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
  160. (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
  161. av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
  162. }
  163. if (ctx->filter != &ff_vf_scale2ref &&
  164. (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
  165. vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
  166. vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
  167. vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
  168. vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
  169. vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
  170. vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] ||
  171. vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] ||
  172. vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
  173. vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
  174. vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
  175. av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n");
  176. return AVERROR(EINVAL);
  177. }
  178. if (scale->eval_mode == EVAL_MODE_INIT &&
  179. (vars_w[VAR_N] || vars_h[VAR_N] ||
  180. vars_w[VAR_T] || vars_h[VAR_T] ||
  181. vars_w[VAR_POS] || vars_h[VAR_POS] ||
  182. vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
  183. vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] ||
  184. vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) {
  185. av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n");
  186. return AVERROR(EINVAL);
  187. }
  188. return 0;
  189. }
  190. static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
  191. {
  192. ScaleContext *scale = ctx->priv;
  193. int ret, is_inited = 0;
  194. char *old_str_expr = NULL;
  195. AVExpr *old_pexpr = NULL;
  196. if (str_expr) {
  197. old_str_expr = av_strdup(str_expr);
  198. if (!old_str_expr)
  199. return AVERROR(ENOMEM);
  200. av_opt_set(scale, var, args, 0);
  201. }
  202. if (*pexpr_ptr) {
  203. old_pexpr = *pexpr_ptr;
  204. *pexpr_ptr = NULL;
  205. is_inited = 1;
  206. }
  207. ret = av_expr_parse(pexpr_ptr, args, var_names,
  208. NULL, NULL, NULL, NULL, 0, ctx);
  209. if (ret < 0) {
  210. av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args);
  211. goto revert;
  212. }
  213. ret = check_exprs(ctx);
  214. if (ret < 0)
  215. goto revert;
  216. if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
  217. goto revert;
  218. av_expr_free(old_pexpr);
  219. old_pexpr = NULL;
  220. av_freep(&old_str_expr);
  221. return 0;
  222. revert:
  223. av_expr_free(*pexpr_ptr);
  224. *pexpr_ptr = NULL;
  225. if (old_str_expr) {
  226. av_opt_set(scale, var, old_str_expr, 0);
  227. av_free(old_str_expr);
  228. }
  229. if (old_pexpr)
  230. *pexpr_ptr = old_pexpr;
  231. return ret;
  232. }
  233. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  234. {
  235. ScaleContext *scale = ctx->priv;
  236. int ret;
  237. if (scale->size_str && (scale->w_expr || scale->h_expr)) {
  238. av_log(ctx, AV_LOG_ERROR,
  239. "Size and width/height expressions cannot be set at the same time.\n");
  240. return AVERROR(EINVAL);
  241. }
  242. if (scale->w_expr && !scale->h_expr)
  243. FFSWAP(char *, scale->w_expr, scale->size_str);
  244. if (scale->size_str) {
  245. char buf[32];
  246. if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
  247. av_log(ctx, AV_LOG_ERROR,
  248. "Invalid size '%s'\n", scale->size_str);
  249. return ret;
  250. }
  251. snprintf(buf, sizeof(buf)-1, "%d", scale->w);
  252. av_opt_set(scale, "w", buf, 0);
  253. snprintf(buf, sizeof(buf)-1, "%d", scale->h);
  254. av_opt_set(scale, "h", buf, 0);
  255. }
  256. if (!scale->w_expr)
  257. av_opt_set(scale, "w", "iw", 0);
  258. if (!scale->h_expr)
  259. av_opt_set(scale, "h", "ih", 0);
  260. ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
  261. if (ret < 0)
  262. return ret;
  263. ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
  264. if (ret < 0)
  265. return ret;
  266. av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
  267. scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
  268. scale->flags = 0;
  269. if (scale->flags_str) {
  270. const AVClass *class = sws_get_class();
  271. const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
  272. AV_OPT_SEARCH_FAKE_OBJ);
  273. int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
  274. if (ret < 0)
  275. return ret;
  276. }
  277. scale->opts = *opts;
  278. *opts = NULL;
  279. return 0;
  280. }
  281. static av_cold void uninit(AVFilterContext *ctx)
  282. {
  283. ScaleContext *scale = ctx->priv;
  284. av_expr_free(scale->w_pexpr);
  285. av_expr_free(scale->h_pexpr);
  286. scale->w_pexpr = scale->h_pexpr = NULL;
  287. sws_freeContext(scale->sws);
  288. sws_freeContext(scale->isws[0]);
  289. sws_freeContext(scale->isws[1]);
  290. scale->sws = NULL;
  291. av_dict_free(&scale->opts);
  292. }
  293. static int query_formats(AVFilterContext *ctx)
  294. {
  295. AVFilterFormats *formats;
  296. enum AVPixelFormat pix_fmt;
  297. int ret;
  298. if (ctx->inputs[0]) {
  299. const AVPixFmtDescriptor *desc = NULL;
  300. formats = NULL;
  301. while ((desc = av_pix_fmt_desc_next(desc))) {
  302. pix_fmt = av_pix_fmt_desc_get_id(desc);
  303. if ((sws_isSupportedInput(pix_fmt) ||
  304. sws_isSupportedEndiannessConversion(pix_fmt))
  305. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  306. return ret;
  307. }
  308. }
  309. if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0)
  310. return ret;
  311. }
  312. if (ctx->outputs[0]) {
  313. const AVPixFmtDescriptor *desc = NULL;
  314. formats = NULL;
  315. while ((desc = av_pix_fmt_desc_next(desc))) {
  316. pix_fmt = av_pix_fmt_desc_get_id(desc);
  317. if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
  318. sws_isSupportedEndiannessConversion(pix_fmt))
  319. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  320. return ret;
  321. }
  322. }
  323. if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
  324. return ret;
  325. }
  326. return 0;
  327. }
  328. static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
  329. {
  330. if (!s)
  331. s = "bt601";
  332. if (s && strstr(s, "bt709")) {
  333. colorspace = AVCOL_SPC_BT709;
  334. } else if (s && strstr(s, "fcc")) {
  335. colorspace = AVCOL_SPC_FCC;
  336. } else if (s && strstr(s, "smpte240m")) {
  337. colorspace = AVCOL_SPC_SMPTE240M;
  338. } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
  339. colorspace = AVCOL_SPC_BT470BG;
  340. } else if (s && strstr(s, "bt2020")) {
  341. colorspace = AVCOL_SPC_BT2020_NCL;
  342. }
  343. if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
  344. colorspace = AVCOL_SPC_BT470BG;
  345. }
  346. return sws_getCoefficients(colorspace);
  347. }
  348. static int scale_eval_dimensions(AVFilterContext *ctx)
  349. {
  350. ScaleContext *scale = ctx->priv;
  351. const char scale2ref = ctx->filter == &ff_vf_scale2ref;
  352. const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0];
  353. const AVFilterLink *outlink = ctx->outputs[0];
  354. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  355. const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
  356. char *expr;
  357. int eval_w, eval_h;
  358. int ret;
  359. double res;
  360. const AVPixFmtDescriptor *main_desc;
  361. const AVFilterLink *main_link;
  362. if (scale2ref) {
  363. main_link = ctx->inputs[0];
  364. main_desc = av_pix_fmt_desc_get(main_link->format);
  365. }
  366. scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
  367. scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
  368. scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
  369. scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
  370. scale->var_values[VAR_A] = (double) inlink->w / inlink->h;
  371. scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  372. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  373. scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
  374. scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
  375. scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
  376. scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
  377. scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
  378. if (scale2ref) {
  379. scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
  380. scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
  381. scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h;
  382. scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
  383. (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
  384. scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
  385. scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
  386. scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w;
  387. scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h;
  388. }
  389. res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
  390. eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
  391. res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
  392. if (isnan(res)) {
  393. expr = scale->h_expr;
  394. ret = AVERROR(EINVAL);
  395. goto fail;
  396. }
  397. eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res;
  398. res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
  399. if (isnan(res)) {
  400. expr = scale->w_expr;
  401. ret = AVERROR(EINVAL);
  402. goto fail;
  403. }
  404. eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res;
  405. scale->w = eval_w;
  406. scale->h = eval_h;
  407. return 0;
  408. fail:
  409. av_log(ctx, AV_LOG_ERROR,
  410. "Error when evaluating the expression '%s'.\n", expr);
  411. return ret;
  412. }
  413. static int config_props(AVFilterLink *outlink)
  414. {
  415. AVFilterContext *ctx = outlink->src;
  416. AVFilterLink *inlink0 = outlink->src->inputs[0];
  417. AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
  418. outlink->src->inputs[1] :
  419. outlink->src->inputs[0];
  420. enum AVPixelFormat outfmt = outlink->format;
  421. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  422. ScaleContext *scale = ctx->priv;
  423. int ret;
  424. if ((ret = scale_eval_dimensions(ctx)) < 0)
  425. goto fail;
  426. ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h,
  427. scale->force_original_aspect_ratio,
  428. scale->force_divisible_by);
  429. if (scale->w > INT_MAX ||
  430. scale->h > INT_MAX ||
  431. (scale->h * inlink->w) > INT_MAX ||
  432. (scale->w * inlink->h) > INT_MAX)
  433. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  434. outlink->w = scale->w;
  435. outlink->h = scale->h;
  436. /* TODO: make algorithm configurable */
  437. scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL;
  438. if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
  439. scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
  440. av_pix_fmt_desc_get(outfmt)->flags & FF_PSEUDOPAL;
  441. if (scale->sws)
  442. sws_freeContext(scale->sws);
  443. if (scale->isws[0])
  444. sws_freeContext(scale->isws[0]);
  445. if (scale->isws[1])
  446. sws_freeContext(scale->isws[1]);
  447. scale->isws[0] = scale->isws[1] = scale->sws = NULL;
  448. if (inlink0->w == outlink->w &&
  449. inlink0->h == outlink->h &&
  450. !scale->out_color_matrix &&
  451. scale->in_range == scale->out_range &&
  452. inlink0->format == outlink->format)
  453. ;
  454. else {
  455. struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
  456. int i;
  457. for (i = 0; i < 3; i++) {
  458. int in_v_chr_pos = scale->in_v_chr_pos, out_v_chr_pos = scale->out_v_chr_pos;
  459. struct SwsContext **s = swscs[i];
  460. *s = sws_alloc_context();
  461. if (!*s)
  462. return AVERROR(ENOMEM);
  463. av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
  464. av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
  465. av_opt_set_int(*s, "src_format", inlink0->format, 0);
  466. av_opt_set_int(*s, "dstw", outlink->w, 0);
  467. av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
  468. av_opt_set_int(*s, "dst_format", outfmt, 0);
  469. av_opt_set_int(*s, "sws_flags", scale->flags, 0);
  470. av_opt_set_int(*s, "param0", scale->param[0], 0);
  471. av_opt_set_int(*s, "param1", scale->param[1], 0);
  472. if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
  473. av_opt_set_int(*s, "src_range",
  474. scale->in_range == AVCOL_RANGE_JPEG, 0);
  475. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  476. av_opt_set_int(*s, "dst_range",
  477. scale->out_range == AVCOL_RANGE_JPEG, 0);
  478. if (scale->opts) {
  479. AVDictionaryEntry *e = NULL;
  480. while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  481. if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
  482. return ret;
  483. }
  484. }
  485. /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
  486. * MPEG-2 chroma positions are used by convention
  487. * XXX: support other 4:2:0 pixel formats */
  488. if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
  489. in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  490. }
  491. if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
  492. out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  493. }
  494. av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
  495. av_opt_set_int(*s, "src_v_chr_pos", in_v_chr_pos, 0);
  496. av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
  497. av_opt_set_int(*s, "dst_v_chr_pos", out_v_chr_pos, 0);
  498. if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
  499. return ret;
  500. if (!scale->interlaced)
  501. break;
  502. }
  503. }
  504. if (inlink0->sample_aspect_ratio.num){
  505. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink0->w, outlink->w * inlink0->h}, inlink0->sample_aspect_ratio);
  506. } else
  507. outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
  508. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
  509. inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
  510. inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
  511. outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
  512. outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
  513. scale->flags);
  514. return 0;
  515. fail:
  516. return ret;
  517. }
  518. static int config_props_ref(AVFilterLink *outlink)
  519. {
  520. AVFilterLink *inlink = outlink->src->inputs[1];
  521. outlink->w = inlink->w;
  522. outlink->h = inlink->h;
  523. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  524. outlink->time_base = inlink->time_base;
  525. outlink->frame_rate = inlink->frame_rate;
  526. return 0;
  527. }
  528. static int request_frame(AVFilterLink *outlink)
  529. {
  530. return ff_request_frame(outlink->src->inputs[0]);
  531. }
  532. static int request_frame_ref(AVFilterLink *outlink)
  533. {
  534. return ff_request_frame(outlink->src->inputs[1]);
  535. }
  536. static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
  537. {
  538. ScaleContext *scale = link->dst->priv;
  539. const uint8_t *in[4];
  540. uint8_t *out[4];
  541. int in_stride[4],out_stride[4];
  542. int i;
  543. for (i=0; i<4; i++) {
  544. int vsub= ((i+1)&2) ? scale->vsub : 0;
  545. in_stride[i] = cur_pic->linesize[i] * mul;
  546. out_stride[i] = out_buf->linesize[i] * mul;
  547. in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
  548. out[i] = out_buf->data[i] + field * out_buf->linesize[i];
  549. }
  550. if (scale->input_is_pal)
  551. in[1] = cur_pic->data[1];
  552. if (scale->output_is_pal)
  553. out[1] = out_buf->data[1];
  554. return sws_scale(sws, in, in_stride, y/mul, h,
  555. out,out_stride);
  556. }
  557. #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
  558. static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
  559. {
  560. AVFilterContext *ctx = link->dst;
  561. ScaleContext *scale = ctx->priv;
  562. AVFilterLink *outlink = ctx->outputs[0];
  563. AVFrame *out;
  564. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  565. char buf[32];
  566. int in_range;
  567. int frame_changed;
  568. *frame_out = NULL;
  569. if (in->colorspace == AVCOL_SPC_YCGCO)
  570. av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
  571. frame_changed = in->width != link->w ||
  572. in->height != link->h ||
  573. in->format != link->format ||
  574. in->sample_aspect_ratio.den != link->sample_aspect_ratio.den ||
  575. in->sample_aspect_ratio.num != link->sample_aspect_ratio.num;
  576. if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) {
  577. int ret;
  578. unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
  579. av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
  580. av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
  581. if (scale->eval_mode == EVAL_MODE_FRAME &&
  582. !frame_changed &&
  583. ctx->filter != &ff_vf_scale2ref &&
  584. !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) &&
  585. !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) &&
  586. scale->w && scale->h)
  587. goto scale;
  588. if (scale->eval_mode == EVAL_MODE_INIT) {
  589. snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
  590. av_opt_set(scale, "w", buf, 0);
  591. snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
  592. av_opt_set(scale, "h", buf, 0);
  593. ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
  594. if (ret < 0)
  595. return ret;
  596. ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
  597. if (ret < 0)
  598. return ret;
  599. }
  600. if (ctx->filter == &ff_vf_scale2ref) {
  601. scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out;
  602. scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
  603. scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
  604. } else {
  605. scale->var_values[VAR_N] = link->frame_count_out;
  606. scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
  607. scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
  608. }
  609. link->dst->inputs[0]->format = in->format;
  610. link->dst->inputs[0]->w = in->width;
  611. link->dst->inputs[0]->h = in->height;
  612. link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  613. link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  614. if ((ret = config_props(outlink)) < 0)
  615. return ret;
  616. }
  617. scale:
  618. if (!scale->sws) {
  619. *frame_out = in;
  620. return 0;
  621. }
  622. scale->hsub = desc->log2_chroma_w;
  623. scale->vsub = desc->log2_chroma_h;
  624. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  625. if (!out) {
  626. av_frame_free(&in);
  627. return AVERROR(ENOMEM);
  628. }
  629. *frame_out = out;
  630. av_frame_copy_props(out, in);
  631. out->width = outlink->w;
  632. out->height = outlink->h;
  633. if (scale->output_is_pal)
  634. avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
  635. in_range = in->color_range;
  636. if ( scale->in_color_matrix
  637. || scale->out_color_matrix
  638. || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
  639. || in_range != AVCOL_RANGE_UNSPECIFIED
  640. || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
  641. int in_full, out_full, brightness, contrast, saturation;
  642. const int *inv_table, *table;
  643. sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
  644. (int **)&table, &out_full,
  645. &brightness, &contrast, &saturation);
  646. if (scale->in_color_matrix)
  647. inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
  648. if (scale->out_color_matrix)
  649. table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
  650. else if (scale->in_color_matrix)
  651. table = inv_table;
  652. if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
  653. in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
  654. else if (in_range != AVCOL_RANGE_UNSPECIFIED)
  655. in_full = (in_range == AVCOL_RANGE_JPEG);
  656. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  657. out_full = (scale->out_range == AVCOL_RANGE_JPEG);
  658. sws_setColorspaceDetails(scale->sws, inv_table, in_full,
  659. table, out_full,
  660. brightness, contrast, saturation);
  661. if (scale->isws[0])
  662. sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
  663. table, out_full,
  664. brightness, contrast, saturation);
  665. if (scale->isws[1])
  666. sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
  667. table, out_full,
  668. brightness, contrast, saturation);
  669. out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  670. }
  671. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  672. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  673. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  674. INT_MAX);
  675. if (scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)) {
  676. scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
  677. scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
  678. } else if (scale->nb_slices) {
  679. int i, slice_h, slice_start, slice_end = 0;
  680. const int nb_slices = FFMIN(scale->nb_slices, link->h);
  681. for (i = 0; i < nb_slices; i++) {
  682. slice_start = slice_end;
  683. slice_end = (link->h * (i+1)) / nb_slices;
  684. slice_h = slice_end - slice_start;
  685. scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
  686. }
  687. } else {
  688. scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
  689. }
  690. av_frame_free(&in);
  691. return 0;
  692. }
  693. static int filter_frame(AVFilterLink *link, AVFrame *in)
  694. {
  695. AVFilterContext *ctx = link->dst;
  696. AVFilterLink *outlink = ctx->outputs[0];
  697. AVFrame *out;
  698. int ret;
  699. ret = scale_frame(link, in, &out);
  700. if (out)
  701. return ff_filter_frame(outlink, out);
  702. return ret;
  703. }
  704. static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
  705. {
  706. ScaleContext *scale = link->dst->priv;
  707. AVFilterLink *outlink = link->dst->outputs[1];
  708. int frame_changed;
  709. frame_changed = in->width != link->w ||
  710. in->height != link->h ||
  711. in->format != link->format ||
  712. in->sample_aspect_ratio.den != link->sample_aspect_ratio.den ||
  713. in->sample_aspect_ratio.num != link->sample_aspect_ratio.num;
  714. if (frame_changed) {
  715. link->format = in->format;
  716. link->w = in->width;
  717. link->h = in->height;
  718. link->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  719. link->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  720. config_props_ref(outlink);
  721. }
  722. if (scale->eval_mode == EVAL_MODE_FRAME) {
  723. scale->var_values[VAR_N] = link->frame_count_out;
  724. scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
  725. scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
  726. }
  727. return ff_filter_frame(outlink, in);
  728. }
  729. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  730. char *res, int res_len, int flags)
  731. {
  732. ScaleContext *scale = ctx->priv;
  733. char *str_expr;
  734. AVExpr **pexpr_ptr;
  735. int ret, w, h;
  736. w = !strcmp(cmd, "width") || !strcmp(cmd, "w");
  737. h = !strcmp(cmd, "height") || !strcmp(cmd, "h");
  738. if (w || h) {
  739. str_expr = w ? scale->w_expr : scale->h_expr;
  740. pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr;
  741. ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args);
  742. } else
  743. ret = AVERROR(ENOSYS);
  744. if (ret < 0)
  745. av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n");
  746. return ret;
  747. }
  748. static const AVClass *child_class_next(const AVClass *prev)
  749. {
  750. return prev ? NULL : sws_get_class();
  751. }
  752. #define OFFSET(x) offsetof(ScaleContext, x)
  753. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  754. #define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  755. static const AVOption scale_options[] = {
  756. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  757. { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  758. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  759. { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS },
  760. { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
  761. { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
  762. { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  763. { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  764. { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS, "color" },
  765. { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS, "color"},
  766. { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .str = "auto" }, 0, 0, FLAGS, "color" },
  767. { "bt601", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt601" }, 0, 0, FLAGS, "color" },
  768. { "bt470", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt470" }, 0, 0, FLAGS, "color" },
  769. { "smpte170m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte170m" }, 0, 0, FLAGS, "color" },
  770. { "bt709", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt709" }, 0, 0, FLAGS, "color" },
  771. { "fcc", NULL, 0, AV_OPT_TYPE_CONST, { .str = "fcc" }, 0, 0, FLAGS, "color" },
  772. { "smpte240m", NULL, 0, AV_OPT_TYPE_CONST, { .str = "smpte240m" }, 0, 0, FLAGS, "color" },
  773. { "bt2020", NULL, 0, AV_OPT_TYPE_CONST, { .str = "bt2020" }, 0, 0, FLAGS, "color" },
  774. { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  775. { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  776. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
  777. { "unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
  778. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  779. { "limited",NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  780. { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  781. { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  782. { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  783. { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  784. { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  785. { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  786. { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  787. { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  788. { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
  789. { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
  790. { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
  791. { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
  792. { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
  793. { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  794. { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  795. { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
  796. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  797. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  798. { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  799. { NULL }
  800. };
  801. static const AVClass scale_class = {
  802. .class_name = "scale",
  803. .item_name = av_default_item_name,
  804. .option = scale_options,
  805. .version = LIBAVUTIL_VERSION_INT,
  806. .category = AV_CLASS_CATEGORY_FILTER,
  807. .child_class_next = child_class_next,
  808. };
  809. static const AVFilterPad avfilter_vf_scale_inputs[] = {
  810. {
  811. .name = "default",
  812. .type = AVMEDIA_TYPE_VIDEO,
  813. .filter_frame = filter_frame,
  814. },
  815. { NULL }
  816. };
  817. static const AVFilterPad avfilter_vf_scale_outputs[] = {
  818. {
  819. .name = "default",
  820. .type = AVMEDIA_TYPE_VIDEO,
  821. .config_props = config_props,
  822. },
  823. { NULL }
  824. };
  825. AVFilter ff_vf_scale = {
  826. .name = "scale",
  827. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
  828. .init_dict = init_dict,
  829. .uninit = uninit,
  830. .query_formats = query_formats,
  831. .priv_size = sizeof(ScaleContext),
  832. .priv_class = &scale_class,
  833. .inputs = avfilter_vf_scale_inputs,
  834. .outputs = avfilter_vf_scale_outputs,
  835. .process_command = process_command,
  836. };
  837. static const AVClass scale2ref_class = {
  838. .class_name = "scale2ref",
  839. .item_name = av_default_item_name,
  840. .option = scale_options,
  841. .version = LIBAVUTIL_VERSION_INT,
  842. .category = AV_CLASS_CATEGORY_FILTER,
  843. .child_class_next = child_class_next,
  844. };
  845. static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
  846. {
  847. .name = "default",
  848. .type = AVMEDIA_TYPE_VIDEO,
  849. .filter_frame = filter_frame,
  850. },
  851. {
  852. .name = "ref",
  853. .type = AVMEDIA_TYPE_VIDEO,
  854. .filter_frame = filter_frame_ref,
  855. },
  856. { NULL }
  857. };
  858. static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
  859. {
  860. .name = "default",
  861. .type = AVMEDIA_TYPE_VIDEO,
  862. .config_props = config_props,
  863. .request_frame= request_frame,
  864. },
  865. {
  866. .name = "ref",
  867. .type = AVMEDIA_TYPE_VIDEO,
  868. .config_props = config_props_ref,
  869. .request_frame= request_frame_ref,
  870. },
  871. { NULL }
  872. };
  873. AVFilter ff_vf_scale2ref = {
  874. .name = "scale2ref",
  875. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
  876. .init_dict = init_dict,
  877. .uninit = uninit,
  878. .query_formats = query_formats,
  879. .priv_size = sizeof(ScaleContext),
  880. .priv_class = &scale2ref_class,
  881. .inputs = avfilter_vf_scale2ref_inputs,
  882. .outputs = avfilter_vf_scale2ref_outputs,
  883. .process_command = process_command,
  884. };