You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

588 lines
21KB

  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * scale video filter
  23. */
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/eval.h"
  32. #include "libavutil/internal.h"
  33. #include "libavutil/mathematics.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/parseutils.h"
  36. #include "libavutil/pixdesc.h"
  37. #include "libavutil/imgutils.h"
  38. #include "libavutil/avassert.h"
  39. #include "libswscale/swscale.h"
  40. static const char *const var_names[] = {
  41. "in_w", "iw",
  42. "in_h", "ih",
  43. "out_w", "ow",
  44. "out_h", "oh",
  45. "a",
  46. "sar",
  47. "dar",
  48. "hsub",
  49. "vsub",
  50. "ohsub",
  51. "ovsub",
  52. NULL
  53. };
  54. enum var_name {
  55. VAR_IN_W, VAR_IW,
  56. VAR_IN_H, VAR_IH,
  57. VAR_OUT_W, VAR_OW,
  58. VAR_OUT_H, VAR_OH,
  59. VAR_A,
  60. VAR_SAR,
  61. VAR_DAR,
  62. VAR_HSUB,
  63. VAR_VSUB,
  64. VAR_OHSUB,
  65. VAR_OVSUB,
  66. VARS_NB
  67. };
  68. typedef struct {
  69. const AVClass *class;
  70. struct SwsContext *sws; ///< software scaler context
  71. struct SwsContext *isws[2]; ///< software scaler context for interlaced material
  72. AVDictionary *opts;
  73. /**
  74. * New dimensions. Special values are:
  75. * 0 = original width/height
  76. * -1 = keep original aspect
  77. */
  78. int w, h;
  79. char *size_str;
  80. unsigned int flags; ///sws flags
  81. int hsub, vsub; ///< chroma subsampling
  82. int slice_y; ///< top of current output slice
  83. int input_is_pal; ///< set to 1 if the input format is paletted
  84. int output_is_pal; ///< set to 1 if the output format is paletted
  85. int interlaced;
  86. char *w_expr; ///< width expression string
  87. char *h_expr; ///< height expression string
  88. char *flags_str;
  89. char *in_color_matrix;
  90. char *out_color_matrix;
  91. int in_range;
  92. int out_range;
  93. int out_h_chr_pos;
  94. int out_v_chr_pos;
  95. int in_h_chr_pos;
  96. int in_v_chr_pos;
  97. int force_original_aspect_ratio;
  98. } ScaleContext;
  99. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  100. {
  101. ScaleContext *scale = ctx->priv;
  102. int ret;
  103. if (scale->size_str && (scale->w_expr || scale->h_expr)) {
  104. av_log(ctx, AV_LOG_ERROR,
  105. "Size and width/height expressions cannot be set at the same time.\n");
  106. return AVERROR(EINVAL);
  107. }
  108. if (scale->w_expr && !scale->h_expr)
  109. FFSWAP(char *, scale->w_expr, scale->size_str);
  110. if (scale->size_str) {
  111. char buf[32];
  112. if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
  113. av_log(ctx, AV_LOG_ERROR,
  114. "Invalid size '%s'\n", scale->size_str);
  115. return ret;
  116. }
  117. snprintf(buf, sizeof(buf)-1, "%d", scale->w);
  118. av_opt_set(scale, "w", buf, 0);
  119. snprintf(buf, sizeof(buf)-1, "%d", scale->h);
  120. av_opt_set(scale, "h", buf, 0);
  121. }
  122. if (!scale->w_expr)
  123. av_opt_set(scale, "w", "iw", 0);
  124. if (!scale->h_expr)
  125. av_opt_set(scale, "h", "ih", 0);
  126. av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
  127. scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
  128. scale->flags = 0;
  129. if (scale->flags_str) {
  130. const AVClass *class = sws_get_class();
  131. const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
  132. AV_OPT_SEARCH_FAKE_OBJ);
  133. int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
  134. if (ret < 0)
  135. return ret;
  136. }
  137. scale->opts = *opts;
  138. *opts = NULL;
  139. return 0;
  140. }
  141. static av_cold void uninit(AVFilterContext *ctx)
  142. {
  143. ScaleContext *scale = ctx->priv;
  144. sws_freeContext(scale->sws);
  145. sws_freeContext(scale->isws[0]);
  146. sws_freeContext(scale->isws[1]);
  147. scale->sws = NULL;
  148. av_dict_free(&scale->opts);
  149. }
  150. static int query_formats(AVFilterContext *ctx)
  151. {
  152. AVFilterFormats *formats;
  153. enum AVPixelFormat pix_fmt;
  154. int ret;
  155. if (ctx->inputs[0]) {
  156. formats = NULL;
  157. for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
  158. if ((sws_isSupportedInput(pix_fmt) ||
  159. sws_isSupportedEndiannessConversion(pix_fmt))
  160. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  161. ff_formats_unref(&formats);
  162. return ret;
  163. }
  164. ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
  165. }
  166. if (ctx->outputs[0]) {
  167. formats = NULL;
  168. for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
  169. if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
  170. sws_isSupportedEndiannessConversion(pix_fmt))
  171. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  172. ff_formats_unref(&formats);
  173. return ret;
  174. }
  175. ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
  176. }
  177. return 0;
  178. }
  179. static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
  180. {
  181. if (!s)
  182. s = "bt601";
  183. if (s && strstr(s, "bt709")) {
  184. colorspace = AVCOL_SPC_BT709;
  185. } else if (s && strstr(s, "fcc")) {
  186. colorspace = AVCOL_SPC_FCC;
  187. } else if (s && strstr(s, "smpte240m")) {
  188. colorspace = AVCOL_SPC_SMPTE240M;
  189. } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
  190. colorspace = AVCOL_SPC_BT470BG;
  191. }
  192. if (colorspace < 1 || colorspace > 7) {
  193. colorspace = AVCOL_SPC_BT470BG;
  194. }
  195. return sws_getCoefficients(colorspace);
  196. }
  197. static int config_props(AVFilterLink *outlink)
  198. {
  199. AVFilterContext *ctx = outlink->src;
  200. AVFilterLink *inlink = outlink->src->inputs[0];
  201. enum AVPixelFormat outfmt = outlink->format;
  202. ScaleContext *scale = ctx->priv;
  203. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  204. const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
  205. int64_t w, h;
  206. double var_values[VARS_NB], res;
  207. char *expr;
  208. int ret;
  209. var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
  210. var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
  211. var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
  212. var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
  213. var_values[VAR_A] = (double) inlink->w / inlink->h;
  214. var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  215. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  216. var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
  217. var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
  218. var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
  219. var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
  220. var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
  221. /* evaluate width and height */
  222. av_expr_parse_and_eval(&res, (expr = scale->w_expr),
  223. var_names, var_values,
  224. NULL, NULL, NULL, NULL, NULL, 0, ctx);
  225. scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
  226. if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
  227. var_names, var_values,
  228. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  229. goto fail;
  230. scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
  231. /* evaluate again the width, as it may depend on the output height */
  232. if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
  233. var_names, var_values,
  234. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  235. goto fail;
  236. scale->w = res;
  237. w = scale->w;
  238. h = scale->h;
  239. /* sanity check params */
  240. if (w < -1 || h < -1) {
  241. av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
  242. return AVERROR(EINVAL);
  243. }
  244. if (w == -1 && h == -1)
  245. scale->w = scale->h = 0;
  246. if (!(w = scale->w))
  247. w = inlink->w;
  248. if (!(h = scale->h))
  249. h = inlink->h;
  250. if (w == -1)
  251. w = av_rescale(h, inlink->w, inlink->h);
  252. if (h == -1)
  253. h = av_rescale(w, inlink->h, inlink->w);
  254. if (scale->force_original_aspect_ratio) {
  255. int tmp_w = av_rescale(h, inlink->w, inlink->h);
  256. int tmp_h = av_rescale(w, inlink->h, inlink->w);
  257. if (scale->force_original_aspect_ratio == 1) {
  258. w = FFMIN(tmp_w, w);
  259. h = FFMIN(tmp_h, h);
  260. } else {
  261. w = FFMAX(tmp_w, w);
  262. h = FFMAX(tmp_h, h);
  263. }
  264. }
  265. if (w > INT_MAX || h > INT_MAX ||
  266. (h * inlink->w) > INT_MAX ||
  267. (w * inlink->h) > INT_MAX)
  268. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  269. outlink->w = w;
  270. outlink->h = h;
  271. /* TODO: make algorithm configurable */
  272. scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
  273. desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  274. if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
  275. scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
  276. av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  277. if (scale->sws)
  278. sws_freeContext(scale->sws);
  279. if (scale->isws[0])
  280. sws_freeContext(scale->isws[0]);
  281. if (scale->isws[1])
  282. sws_freeContext(scale->isws[1]);
  283. scale->isws[0] = scale->isws[1] = scale->sws = NULL;
  284. if (inlink->w == outlink->w && inlink->h == outlink->h &&
  285. inlink->format == outlink->format)
  286. ;
  287. else {
  288. struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
  289. int i;
  290. for (i = 0; i < 3; i++) {
  291. struct SwsContext **s = swscs[i];
  292. *s = sws_alloc_context();
  293. if (!*s)
  294. return AVERROR(ENOMEM);
  295. if (scale->opts) {
  296. AVDictionaryEntry *e = NULL;
  297. while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  298. if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
  299. return ret;
  300. }
  301. }
  302. av_opt_set_int(*s, "srcw", inlink ->w, 0);
  303. av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
  304. av_opt_set_int(*s, "src_format", inlink->format, 0);
  305. av_opt_set_int(*s, "dstw", outlink->w, 0);
  306. av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
  307. av_opt_set_int(*s, "dst_format", outfmt, 0);
  308. av_opt_set_int(*s, "sws_flags", scale->flags, 0);
  309. av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
  310. av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
  311. av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
  312. av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
  313. if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
  314. return ret;
  315. if (!scale->interlaced)
  316. break;
  317. }
  318. }
  319. if (inlink->sample_aspect_ratio.num){
  320. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
  321. } else
  322. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  323. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
  324. inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
  325. inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
  326. outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
  327. outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
  328. scale->flags);
  329. return 0;
  330. fail:
  331. av_log(NULL, AV_LOG_ERROR,
  332. "Error when evaluating the expression '%s'.\n"
  333. "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
  334. expr, scale->w_expr, scale->h_expr);
  335. return ret;
  336. }
  337. static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
  338. {
  339. ScaleContext *scale = link->dst->priv;
  340. const uint8_t *in[4];
  341. uint8_t *out[4];
  342. int in_stride[4],out_stride[4];
  343. int i;
  344. for(i=0; i<4; i++){
  345. int vsub= ((i+1)&2) ? scale->vsub : 0;
  346. in_stride[i] = cur_pic->linesize[i] * mul;
  347. out_stride[i] = out_buf->linesize[i] * mul;
  348. in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
  349. out[i] = out_buf->data[i] + field * out_buf->linesize[i];
  350. }
  351. if(scale->input_is_pal)
  352. in[1] = cur_pic->data[1];
  353. if(scale->output_is_pal)
  354. out[1] = out_buf->data[1];
  355. return sws_scale(sws, in, in_stride, y/mul, h,
  356. out,out_stride);
  357. }
  358. static int filter_frame(AVFilterLink *link, AVFrame *in)
  359. {
  360. ScaleContext *scale = link->dst->priv;
  361. AVFilterLink *outlink = link->dst->outputs[0];
  362. AVFrame *out;
  363. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  364. char buf[32];
  365. int in_range;
  366. if( in->width != link->w
  367. || in->height != link->h
  368. || in->format != link->format) {
  369. int ret;
  370. snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
  371. av_opt_set(scale, "w", buf, 0);
  372. snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
  373. av_opt_set(scale, "h", buf, 0);
  374. link->dst->inputs[0]->format = in->format;
  375. link->dst->inputs[0]->w = in->width;
  376. link->dst->inputs[0]->h = in->height;
  377. if ((ret = config_props(outlink)) < 0)
  378. return ret;
  379. }
  380. if (!scale->sws)
  381. return ff_filter_frame(outlink, in);
  382. scale->hsub = desc->log2_chroma_w;
  383. scale->vsub = desc->log2_chroma_h;
  384. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  385. if (!out) {
  386. av_frame_free(&in);
  387. return AVERROR(ENOMEM);
  388. }
  389. av_frame_copy_props(out, in);
  390. out->width = outlink->w;
  391. out->height = outlink->h;
  392. if(scale->output_is_pal)
  393. avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
  394. in_range = av_frame_get_color_range(in);
  395. if ( scale->in_color_matrix
  396. || scale->out_color_matrix
  397. || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
  398. || in_range != AVCOL_RANGE_UNSPECIFIED
  399. || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
  400. int in_full, out_full, brightness, contrast, saturation;
  401. const int *inv_table, *table;
  402. sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
  403. (int **)&table, &out_full,
  404. &brightness, &contrast, &saturation);
  405. if (scale->in_color_matrix)
  406. inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
  407. if (scale->out_color_matrix)
  408. table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
  409. if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
  410. in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
  411. else if (in_range != AVCOL_RANGE_UNSPECIFIED)
  412. in_full = (in_range == AVCOL_RANGE_JPEG);
  413. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  414. out_full = (scale->out_range == AVCOL_RANGE_JPEG);
  415. sws_setColorspaceDetails(scale->sws, inv_table, in_full,
  416. table, out_full,
  417. brightness, contrast, saturation);
  418. if (scale->isws[0])
  419. sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
  420. table, out_full,
  421. brightness, contrast, saturation);
  422. if (scale->isws[1])
  423. sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
  424. table, out_full,
  425. brightness, contrast, saturation);
  426. }
  427. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  428. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  429. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  430. INT_MAX);
  431. if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
  432. scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
  433. scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
  434. }else{
  435. scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
  436. }
  437. av_frame_free(&in);
  438. return ff_filter_frame(outlink, out);
  439. }
  440. static const AVClass *child_class_next(const AVClass *prev)
  441. {
  442. return prev ? NULL : sws_get_class();
  443. }
  444. #define OFFSET(x) offsetof(ScaleContext, x)
  445. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  446. static const AVOption scale_options[] = {
  447. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  448. { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  449. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  450. { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  451. { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
  452. { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
  453. { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  454. { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  455. { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
  456. { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
  457. { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  458. { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  459. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
  460. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  461. { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  462. { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  463. { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  464. { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  465. { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
  466. { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
  467. { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
  468. { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 512, FLAGS },
  469. { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
  470. { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
  471. { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
  472. { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
  473. { NULL }
  474. };
  475. static const AVClass scale_class = {
  476. .class_name = "scale",
  477. .item_name = av_default_item_name,
  478. .option = scale_options,
  479. .version = LIBAVUTIL_VERSION_INT,
  480. .child_class_next = child_class_next,
  481. };
  482. static const AVFilterPad avfilter_vf_scale_inputs[] = {
  483. {
  484. .name = "default",
  485. .type = AVMEDIA_TYPE_VIDEO,
  486. .filter_frame = filter_frame,
  487. },
  488. { NULL }
  489. };
  490. static const AVFilterPad avfilter_vf_scale_outputs[] = {
  491. {
  492. .name = "default",
  493. .type = AVMEDIA_TYPE_VIDEO,
  494. .config_props = config_props,
  495. },
  496. { NULL }
  497. };
  498. AVFilter ff_vf_scale = {
  499. .name = "scale",
  500. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
  501. .init_dict = init_dict,
  502. .uninit = uninit,
  503. .query_formats = query_formats,
  504. .priv_size = sizeof(ScaleContext),
  505. .priv_class = &scale_class,
  506. .inputs = avfilter_vf_scale_inputs,
  507. .outputs = avfilter_vf_scale_outputs,
  508. };