You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

692 lines
26KB

  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * scale video filter
  23. */
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "scale.h"
  30. #include "video.h"
  31. #include "libavutil/avstring.h"
  32. #include "libavutil/internal.h"
  33. #include "libavutil/mathematics.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/parseutils.h"
  36. #include "libavutil/pixdesc.h"
  37. #include "libavutil/imgutils.h"
  38. #include "libavutil/avassert.h"
  39. #include "libswscale/swscale.h"
  40. enum EvalMode {
  41. EVAL_MODE_INIT,
  42. EVAL_MODE_FRAME,
  43. EVAL_MODE_NB
  44. };
  45. typedef struct ScaleContext {
  46. const AVClass *class;
  47. struct SwsContext *sws; ///< software scaler context
  48. struct SwsContext *isws[2]; ///< software scaler context for interlaced material
  49. AVDictionary *opts;
  50. /**
  51. * New dimensions. Special values are:
  52. * 0 = original width/height
  53. * -1 = keep original aspect
  54. * -N = try to keep aspect but make sure it is divisible by N
  55. */
  56. int w, h;
  57. char *size_str;
  58. unsigned int flags; ///sws flags
  59. double param[2]; // sws params
  60. int hsub, vsub; ///< chroma subsampling
  61. int slice_y; ///< top of current output slice
  62. int input_is_pal; ///< set to 1 if the input format is paletted
  63. int output_is_pal; ///< set to 1 if the output format is paletted
  64. int interlaced;
  65. char *w_expr; ///< width expression string
  66. char *h_expr; ///< height expression string
  67. char *flags_str;
  68. char *in_color_matrix;
  69. char *out_color_matrix;
  70. int in_range;
  71. int out_range;
  72. int out_h_chr_pos;
  73. int out_v_chr_pos;
  74. int in_h_chr_pos;
  75. int in_v_chr_pos;
  76. int force_original_aspect_ratio;
  77. int nb_slices;
  78. int eval_mode; ///< expression evaluation mode
  79. } ScaleContext;
  80. AVFilter ff_vf_scale2ref;
  81. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  82. {
  83. ScaleContext *scale = ctx->priv;
  84. int ret;
  85. if (scale->size_str && (scale->w_expr || scale->h_expr)) {
  86. av_log(ctx, AV_LOG_ERROR,
  87. "Size and width/height expressions cannot be set at the same time.\n");
  88. return AVERROR(EINVAL);
  89. }
  90. if (scale->w_expr && !scale->h_expr)
  91. FFSWAP(char *, scale->w_expr, scale->size_str);
  92. if (scale->size_str) {
  93. char buf[32];
  94. if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
  95. av_log(ctx, AV_LOG_ERROR,
  96. "Invalid size '%s'\n", scale->size_str);
  97. return ret;
  98. }
  99. snprintf(buf, sizeof(buf)-1, "%d", scale->w);
  100. av_opt_set(scale, "w", buf, 0);
  101. snprintf(buf, sizeof(buf)-1, "%d", scale->h);
  102. av_opt_set(scale, "h", buf, 0);
  103. }
  104. if (!scale->w_expr)
  105. av_opt_set(scale, "w", "iw", 0);
  106. if (!scale->h_expr)
  107. av_opt_set(scale, "h", "ih", 0);
  108. av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
  109. scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
  110. scale->flags = 0;
  111. if (scale->flags_str) {
  112. const AVClass *class = sws_get_class();
  113. const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
  114. AV_OPT_SEARCH_FAKE_OBJ);
  115. int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
  116. if (ret < 0)
  117. return ret;
  118. }
  119. scale->opts = *opts;
  120. *opts = NULL;
  121. return 0;
  122. }
  123. static av_cold void uninit(AVFilterContext *ctx)
  124. {
  125. ScaleContext *scale = ctx->priv;
  126. sws_freeContext(scale->sws);
  127. sws_freeContext(scale->isws[0]);
  128. sws_freeContext(scale->isws[1]);
  129. scale->sws = NULL;
  130. av_dict_free(&scale->opts);
  131. }
  132. static int query_formats(AVFilterContext *ctx)
  133. {
  134. AVFilterFormats *formats;
  135. enum AVPixelFormat pix_fmt;
  136. int ret;
  137. if (ctx->inputs[0]) {
  138. const AVPixFmtDescriptor *desc = NULL;
  139. formats = NULL;
  140. while ((desc = av_pix_fmt_desc_next(desc))) {
  141. pix_fmt = av_pix_fmt_desc_get_id(desc);
  142. if ((sws_isSupportedInput(pix_fmt) ||
  143. sws_isSupportedEndiannessConversion(pix_fmt))
  144. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  145. return ret;
  146. }
  147. }
  148. if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0)
  149. return ret;
  150. }
  151. if (ctx->outputs[0]) {
  152. const AVPixFmtDescriptor *desc = NULL;
  153. formats = NULL;
  154. while ((desc = av_pix_fmt_desc_next(desc))) {
  155. pix_fmt = av_pix_fmt_desc_get_id(desc);
  156. if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
  157. sws_isSupportedEndiannessConversion(pix_fmt))
  158. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  159. return ret;
  160. }
  161. }
  162. if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
  163. return ret;
  164. }
  165. return 0;
  166. }
  167. static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
  168. {
  169. if (!s)
  170. s = "bt601";
  171. if (s && strstr(s, "bt709")) {
  172. colorspace = AVCOL_SPC_BT709;
  173. } else if (s && strstr(s, "fcc")) {
  174. colorspace = AVCOL_SPC_FCC;
  175. } else if (s && strstr(s, "smpte240m")) {
  176. colorspace = AVCOL_SPC_SMPTE240M;
  177. } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
  178. colorspace = AVCOL_SPC_BT470BG;
  179. } else if (s && strstr(s, "bt2020")) {
  180. colorspace = AVCOL_SPC_BT2020_NCL;
  181. }
  182. if (colorspace < 1 || colorspace > 10 || colorspace == 8) {
  183. colorspace = AVCOL_SPC_BT470BG;
  184. }
  185. return sws_getCoefficients(colorspace);
  186. }
  187. static int config_props(AVFilterLink *outlink)
  188. {
  189. AVFilterContext *ctx = outlink->src;
  190. AVFilterLink *inlink0 = outlink->src->inputs[0];
  191. AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
  192. outlink->src->inputs[1] :
  193. outlink->src->inputs[0];
  194. enum AVPixelFormat outfmt = outlink->format;
  195. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  196. ScaleContext *scale = ctx->priv;
  197. int w, h;
  198. int ret;
  199. if ((ret = ff_scale_eval_dimensions(ctx,
  200. scale->w_expr, scale->h_expr,
  201. inlink, outlink,
  202. &w, &h)) < 0)
  203. goto fail;
  204. /* Note that force_original_aspect_ratio may overwrite the previous set
  205. * dimensions so that it is not divisible by the set factors anymore. */
  206. if (scale->force_original_aspect_ratio) {
  207. int tmp_w = av_rescale(h, inlink->w, inlink->h);
  208. int tmp_h = av_rescale(w, inlink->h, inlink->w);
  209. if (scale->force_original_aspect_ratio == 1) {
  210. w = FFMIN(tmp_w, w);
  211. h = FFMIN(tmp_h, h);
  212. } else {
  213. w = FFMAX(tmp_w, w);
  214. h = FFMAX(tmp_h, h);
  215. }
  216. }
  217. if (w > INT_MAX || h > INT_MAX ||
  218. (h * inlink->w) > INT_MAX ||
  219. (w * inlink->h) > INT_MAX)
  220. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  221. outlink->w = w;
  222. outlink->h = h;
  223. /* TODO: make algorithm configurable */
  224. scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
  225. desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  226. if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
  227. scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
  228. av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  229. if (scale->sws)
  230. sws_freeContext(scale->sws);
  231. if (scale->isws[0])
  232. sws_freeContext(scale->isws[0]);
  233. if (scale->isws[1])
  234. sws_freeContext(scale->isws[1]);
  235. scale->isws[0] = scale->isws[1] = scale->sws = NULL;
  236. if (inlink0->w == outlink->w &&
  237. inlink0->h == outlink->h &&
  238. !scale->out_color_matrix &&
  239. scale->in_range == scale->out_range &&
  240. inlink0->format == outlink->format)
  241. ;
  242. else {
  243. struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
  244. int i;
  245. for (i = 0; i < 3; i++) {
  246. struct SwsContext **s = swscs[i];
  247. *s = sws_alloc_context();
  248. if (!*s)
  249. return AVERROR(ENOMEM);
  250. av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
  251. av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
  252. av_opt_set_int(*s, "src_format", inlink0->format, 0);
  253. av_opt_set_int(*s, "dstw", outlink->w, 0);
  254. av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
  255. av_opt_set_int(*s, "dst_format", outfmt, 0);
  256. av_opt_set_int(*s, "sws_flags", scale->flags, 0);
  257. av_opt_set_int(*s, "param0", scale->param[0], 0);
  258. av_opt_set_int(*s, "param1", scale->param[1], 0);
  259. if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
  260. av_opt_set_int(*s, "src_range",
  261. scale->in_range == AVCOL_RANGE_JPEG, 0);
  262. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  263. av_opt_set_int(*s, "dst_range",
  264. scale->out_range == AVCOL_RANGE_JPEG, 0);
  265. if (scale->opts) {
  266. AVDictionaryEntry *e = NULL;
  267. while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  268. if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
  269. return ret;
  270. }
  271. }
  272. /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
  273. * MPEG-2 chroma positions are used by convention
  274. * XXX: support other 4:2:0 pixel formats */
  275. if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
  276. scale->in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  277. }
  278. if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
  279. scale->out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  280. }
  281. av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
  282. av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
  283. av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
  284. av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
  285. if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
  286. return ret;
  287. if (!scale->interlaced)
  288. break;
  289. }
  290. }
  291. if (inlink->sample_aspect_ratio.num){
  292. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
  293. } else
  294. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  295. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
  296. inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
  297. inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
  298. outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
  299. outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
  300. scale->flags);
  301. return 0;
  302. fail:
  303. return ret;
  304. }
  305. static int config_props_ref(AVFilterLink *outlink)
  306. {
  307. AVFilterLink *inlink = outlink->src->inputs[1];
  308. outlink->w = inlink->w;
  309. outlink->h = inlink->h;
  310. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  311. outlink->time_base = inlink->time_base;
  312. return 0;
  313. }
  314. static int request_frame(AVFilterLink *outlink)
  315. {
  316. return ff_request_frame(outlink->src->inputs[0]);
  317. }
  318. static int request_frame_ref(AVFilterLink *outlink)
  319. {
  320. return ff_request_frame(outlink->src->inputs[1]);
  321. }
  322. static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
  323. {
  324. ScaleContext *scale = link->dst->priv;
  325. const uint8_t *in[4];
  326. uint8_t *out[4];
  327. int in_stride[4],out_stride[4];
  328. int i;
  329. for(i=0; i<4; i++){
  330. int vsub= ((i+1)&2) ? scale->vsub : 0;
  331. in_stride[i] = cur_pic->linesize[i] * mul;
  332. out_stride[i] = out_buf->linesize[i] * mul;
  333. in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
  334. out[i] = out_buf->data[i] + field * out_buf->linesize[i];
  335. }
  336. if(scale->input_is_pal)
  337. in[1] = cur_pic->data[1];
  338. if(scale->output_is_pal)
  339. out[1] = out_buf->data[1];
  340. return sws_scale(sws, in, in_stride, y/mul, h,
  341. out,out_stride);
  342. }
  343. static int filter_frame(AVFilterLink *link, AVFrame *in)
  344. {
  345. ScaleContext *scale = link->dst->priv;
  346. AVFilterLink *outlink = link->dst->outputs[0];
  347. AVFrame *out;
  348. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  349. char buf[32];
  350. int in_range;
  351. if (av_frame_get_colorspace(in) == AVCOL_SPC_YCGCO)
  352. av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
  353. if( in->width != link->w
  354. || in->height != link->h
  355. || in->format != link->format
  356. || in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || in->sample_aspect_ratio.num != link->sample_aspect_ratio.num) {
  357. int ret;
  358. if (scale->eval_mode == EVAL_MODE_INIT) {
  359. snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
  360. av_opt_set(scale, "w", buf, 0);
  361. snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
  362. av_opt_set(scale, "h", buf, 0);
  363. }
  364. link->dst->inputs[0]->format = in->format;
  365. link->dst->inputs[0]->w = in->width;
  366. link->dst->inputs[0]->h = in->height;
  367. link->dst->inputs[0]->sample_aspect_ratio.den = in->sample_aspect_ratio.den;
  368. link->dst->inputs[0]->sample_aspect_ratio.num = in->sample_aspect_ratio.num;
  369. if ((ret = config_props(outlink)) < 0)
  370. return ret;
  371. }
  372. if (!scale->sws)
  373. return ff_filter_frame(outlink, in);
  374. scale->hsub = desc->log2_chroma_w;
  375. scale->vsub = desc->log2_chroma_h;
  376. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  377. if (!out) {
  378. av_frame_free(&in);
  379. return AVERROR(ENOMEM);
  380. }
  381. av_frame_copy_props(out, in);
  382. out->width = outlink->w;
  383. out->height = outlink->h;
  384. if(scale->output_is_pal)
  385. avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
  386. in_range = av_frame_get_color_range(in);
  387. if ( scale->in_color_matrix
  388. || scale->out_color_matrix
  389. || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
  390. || in_range != AVCOL_RANGE_UNSPECIFIED
  391. || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
  392. int in_full, out_full, brightness, contrast, saturation;
  393. const int *inv_table, *table;
  394. sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
  395. (int **)&table, &out_full,
  396. &brightness, &contrast, &saturation);
  397. if (scale->in_color_matrix)
  398. inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
  399. if (scale->out_color_matrix)
  400. table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
  401. else if (scale->in_color_matrix)
  402. table = inv_table;
  403. if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
  404. in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
  405. else if (in_range != AVCOL_RANGE_UNSPECIFIED)
  406. in_full = (in_range == AVCOL_RANGE_JPEG);
  407. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  408. out_full = (scale->out_range == AVCOL_RANGE_JPEG);
  409. sws_setColorspaceDetails(scale->sws, inv_table, in_full,
  410. table, out_full,
  411. brightness, contrast, saturation);
  412. if (scale->isws[0])
  413. sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
  414. table, out_full,
  415. brightness, contrast, saturation);
  416. if (scale->isws[1])
  417. sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
  418. table, out_full,
  419. brightness, contrast, saturation);
  420. av_frame_set_color_range(out, out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG);
  421. }
  422. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  423. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  424. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  425. INT_MAX);
  426. if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
  427. scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
  428. scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
  429. }else if (scale->nb_slices) {
  430. int i, slice_h, slice_start, slice_end = 0;
  431. const int nb_slices = FFMIN(scale->nb_slices, link->h);
  432. for (i = 0; i < nb_slices; i++) {
  433. slice_start = slice_end;
  434. slice_end = (link->h * (i+1)) / nb_slices;
  435. slice_h = slice_end - slice_start;
  436. scale_slice(link, out, in, scale->sws, slice_start, slice_h, 1, 0);
  437. }
  438. }else{
  439. scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
  440. }
  441. av_frame_free(&in);
  442. return ff_filter_frame(outlink, out);
  443. }
  444. static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
  445. {
  446. AVFilterLink *outlink = link->dst->outputs[1];
  447. return ff_filter_frame(outlink, in);
  448. }
  449. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  450. char *res, int res_len, int flags)
  451. {
  452. ScaleContext *scale = ctx->priv;
  453. int ret;
  454. if ( !strcmp(cmd, "width") || !strcmp(cmd, "w")
  455. || !strcmp(cmd, "height") || !strcmp(cmd, "h")) {
  456. int old_w = scale->w;
  457. int old_h = scale->h;
  458. AVFilterLink *outlink = ctx->outputs[0];
  459. av_opt_set(scale, cmd, args, 0);
  460. if ((ret = config_props(outlink)) < 0) {
  461. scale->w = old_w;
  462. scale->h = old_h;
  463. }
  464. } else
  465. ret = AVERROR(ENOSYS);
  466. return ret;
  467. }
  468. static const AVClass *child_class_next(const AVClass *prev)
  469. {
  470. return prev ? NULL : sws_get_class();
  471. }
  472. #define OFFSET(x) offsetof(ScaleContext, x)
  473. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  474. static const AVOption scale_options[] = {
  475. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  476. { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  477. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  478. { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  479. { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
  480. { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
  481. { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  482. { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  483. { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
  484. { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
  485. { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  486. { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  487. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
  488. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  489. { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  490. { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  491. { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  492. { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  493. { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  494. { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  495. { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  496. { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  497. { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
  498. { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
  499. { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
  500. { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
  501. { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  502. { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  503. { "nb_slices", "set the number of slices (debug purpose only)", OFFSET(nb_slices), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
  504. { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" },
  505. { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" },
  506. { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" },
  507. { NULL }
  508. };
  509. static const AVClass scale_class = {
  510. .class_name = "scale",
  511. .item_name = av_default_item_name,
  512. .option = scale_options,
  513. .version = LIBAVUTIL_VERSION_INT,
  514. .category = AV_CLASS_CATEGORY_FILTER,
  515. .child_class_next = child_class_next,
  516. };
  517. static const AVFilterPad avfilter_vf_scale_inputs[] = {
  518. {
  519. .name = "default",
  520. .type = AVMEDIA_TYPE_VIDEO,
  521. .filter_frame = filter_frame,
  522. },
  523. { NULL }
  524. };
  525. static const AVFilterPad avfilter_vf_scale_outputs[] = {
  526. {
  527. .name = "default",
  528. .type = AVMEDIA_TYPE_VIDEO,
  529. .config_props = config_props,
  530. },
  531. { NULL }
  532. };
  533. AVFilter ff_vf_scale = {
  534. .name = "scale",
  535. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
  536. .init_dict = init_dict,
  537. .uninit = uninit,
  538. .query_formats = query_formats,
  539. .priv_size = sizeof(ScaleContext),
  540. .priv_class = &scale_class,
  541. .inputs = avfilter_vf_scale_inputs,
  542. .outputs = avfilter_vf_scale_outputs,
  543. .process_command = process_command,
  544. };
  545. static const AVClass scale2ref_class = {
  546. .class_name = "scale2ref",
  547. .item_name = av_default_item_name,
  548. .option = scale_options,
  549. .version = LIBAVUTIL_VERSION_INT,
  550. .category = AV_CLASS_CATEGORY_FILTER,
  551. .child_class_next = child_class_next,
  552. };
  553. static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
  554. {
  555. .name = "default",
  556. .type = AVMEDIA_TYPE_VIDEO,
  557. .filter_frame = filter_frame,
  558. },
  559. {
  560. .name = "ref",
  561. .type = AVMEDIA_TYPE_VIDEO,
  562. .filter_frame = filter_frame_ref,
  563. },
  564. { NULL }
  565. };
  566. static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
  567. {
  568. .name = "default",
  569. .type = AVMEDIA_TYPE_VIDEO,
  570. .config_props = config_props,
  571. .request_frame= request_frame,
  572. },
  573. {
  574. .name = "ref",
  575. .type = AVMEDIA_TYPE_VIDEO,
  576. .config_props = config_props_ref,
  577. .request_frame= request_frame_ref,
  578. },
  579. { NULL }
  580. };
  581. AVFilter ff_vf_scale2ref = {
  582. .name = "scale2ref",
  583. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
  584. .init_dict = init_dict,
  585. .uninit = uninit,
  586. .query_formats = query_formats,
  587. .priv_size = sizeof(ScaleContext),
  588. .priv_class = &scale2ref_class,
  589. .inputs = avfilter_vf_scale2ref_inputs,
  590. .outputs = avfilter_vf_scale2ref_outputs,
  591. .process_command = process_command,
  592. };