You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

748 lines
27KB

  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * scale video filter
  23. */
  24. #include <stdio.h>
  25. #include <string.h>
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/eval.h"
  32. #include "libavutil/internal.h"
  33. #include "libavutil/mathematics.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/parseutils.h"
  36. #include "libavutil/pixdesc.h"
  37. #include "libavutil/imgutils.h"
  38. #include "libavutil/avassert.h"
  39. #include "libswscale/swscale.h"
  40. static const char *const var_names[] = {
  41. "in_w", "iw",
  42. "in_h", "ih",
  43. "out_w", "ow",
  44. "out_h", "oh",
  45. "a",
  46. "sar",
  47. "dar",
  48. "hsub",
  49. "vsub",
  50. "ohsub",
  51. "ovsub",
  52. NULL
  53. };
  54. enum var_name {
  55. VAR_IN_W, VAR_IW,
  56. VAR_IN_H, VAR_IH,
  57. VAR_OUT_W, VAR_OW,
  58. VAR_OUT_H, VAR_OH,
  59. VAR_A,
  60. VAR_SAR,
  61. VAR_DAR,
  62. VAR_HSUB,
  63. VAR_VSUB,
  64. VAR_OHSUB,
  65. VAR_OVSUB,
  66. VARS_NB
  67. };
  68. typedef struct ScaleContext {
  69. const AVClass *class;
  70. struct SwsContext *sws; ///< software scaler context
  71. struct SwsContext *isws[2]; ///< software scaler context for interlaced material
  72. AVDictionary *opts;
  73. /**
  74. * New dimensions. Special values are:
  75. * 0 = original width/height
  76. * -1 = keep original aspect
  77. * -N = try to keep aspect but make sure it is divisible by N
  78. */
  79. int w, h;
  80. char *size_str;
  81. unsigned int flags; ///sws flags
  82. double param[2]; // sws params
  83. int hsub, vsub; ///< chroma subsampling
  84. int slice_y; ///< top of current output slice
  85. int input_is_pal; ///< set to 1 if the input format is paletted
  86. int output_is_pal; ///< set to 1 if the output format is paletted
  87. int interlaced;
  88. char *w_expr; ///< width expression string
  89. char *h_expr; ///< height expression string
  90. char *flags_str;
  91. char *in_color_matrix;
  92. char *out_color_matrix;
  93. int in_range;
  94. int out_range;
  95. int out_h_chr_pos;
  96. int out_v_chr_pos;
  97. int in_h_chr_pos;
  98. int in_v_chr_pos;
  99. int force_original_aspect_ratio;
  100. } ScaleContext;
  101. AVFilter ff_vf_scale2ref;
  102. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  103. {
  104. ScaleContext *scale = ctx->priv;
  105. int ret;
  106. if (scale->size_str && (scale->w_expr || scale->h_expr)) {
  107. av_log(ctx, AV_LOG_ERROR,
  108. "Size and width/height expressions cannot be set at the same time.\n");
  109. return AVERROR(EINVAL);
  110. }
  111. if (scale->w_expr && !scale->h_expr)
  112. FFSWAP(char *, scale->w_expr, scale->size_str);
  113. if (scale->size_str) {
  114. char buf[32];
  115. if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
  116. av_log(ctx, AV_LOG_ERROR,
  117. "Invalid size '%s'\n", scale->size_str);
  118. return ret;
  119. }
  120. snprintf(buf, sizeof(buf)-1, "%d", scale->w);
  121. av_opt_set(scale, "w", buf, 0);
  122. snprintf(buf, sizeof(buf)-1, "%d", scale->h);
  123. av_opt_set(scale, "h", buf, 0);
  124. }
  125. if (!scale->w_expr)
  126. av_opt_set(scale, "w", "iw", 0);
  127. if (!scale->h_expr)
  128. av_opt_set(scale, "h", "ih", 0);
  129. av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
  130. scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
  131. scale->flags = 0;
  132. if (scale->flags_str) {
  133. const AVClass *class = sws_get_class();
  134. const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
  135. AV_OPT_SEARCH_FAKE_OBJ);
  136. int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
  137. if (ret < 0)
  138. return ret;
  139. }
  140. scale->opts = *opts;
  141. *opts = NULL;
  142. return 0;
  143. }
  144. static av_cold void uninit(AVFilterContext *ctx)
  145. {
  146. ScaleContext *scale = ctx->priv;
  147. sws_freeContext(scale->sws);
  148. sws_freeContext(scale->isws[0]);
  149. sws_freeContext(scale->isws[1]);
  150. scale->sws = NULL;
  151. av_dict_free(&scale->opts);
  152. }
  153. static int query_formats(AVFilterContext *ctx)
  154. {
  155. AVFilterFormats *formats;
  156. enum AVPixelFormat pix_fmt;
  157. int ret;
  158. if (ctx->inputs[0]) {
  159. const AVPixFmtDescriptor *desc = NULL;
  160. formats = NULL;
  161. while ((desc = av_pix_fmt_desc_next(desc))) {
  162. pix_fmt = av_pix_fmt_desc_get_id(desc);
  163. if ((sws_isSupportedInput(pix_fmt) ||
  164. sws_isSupportedEndiannessConversion(pix_fmt))
  165. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  166. return ret;
  167. }
  168. }
  169. if ((ret = ff_formats_ref(formats, &ctx->inputs[0]->out_formats)) < 0)
  170. return ret;
  171. }
  172. if (ctx->outputs[0]) {
  173. const AVPixFmtDescriptor *desc = NULL;
  174. formats = NULL;
  175. while ((desc = av_pix_fmt_desc_next(desc))) {
  176. pix_fmt = av_pix_fmt_desc_get_id(desc);
  177. if ((sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8 ||
  178. sws_isSupportedEndiannessConversion(pix_fmt))
  179. && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
  180. return ret;
  181. }
  182. }
  183. if ((ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0)
  184. return ret;
  185. }
  186. return 0;
  187. }
  188. static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
  189. {
  190. if (!s)
  191. s = "bt601";
  192. if (s && strstr(s, "bt709")) {
  193. colorspace = AVCOL_SPC_BT709;
  194. } else if (s && strstr(s, "fcc")) {
  195. colorspace = AVCOL_SPC_FCC;
  196. } else if (s && strstr(s, "smpte240m")) {
  197. colorspace = AVCOL_SPC_SMPTE240M;
  198. } else if (s && (strstr(s, "bt601") || strstr(s, "bt470") || strstr(s, "smpte170m"))) {
  199. colorspace = AVCOL_SPC_BT470BG;
  200. }
  201. if (colorspace < 1 || colorspace > 7) {
  202. colorspace = AVCOL_SPC_BT470BG;
  203. }
  204. return sws_getCoefficients(colorspace);
  205. }
  206. static int config_props(AVFilterLink *outlink)
  207. {
  208. AVFilterContext *ctx = outlink->src;
  209. AVFilterLink *inlink0 = outlink->src->inputs[0];
  210. AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
  211. outlink->src->inputs[1] :
  212. outlink->src->inputs[0];
  213. enum AVPixelFormat outfmt = outlink->format;
  214. ScaleContext *scale = ctx->priv;
  215. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  216. const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format);
  217. int64_t w, h;
  218. double var_values[VARS_NB], res;
  219. char *expr;
  220. int ret;
  221. int factor_w, factor_h;
  222. var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
  223. var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
  224. var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
  225. var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
  226. var_values[VAR_A] = (double) inlink->w / inlink->h;
  227. var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
  228. (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
  229. var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
  230. var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
  231. var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
  232. var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w;
  233. var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h;
  234. /* evaluate width and height */
  235. av_expr_parse_and_eval(&res, (expr = scale->w_expr),
  236. var_names, var_values,
  237. NULL, NULL, NULL, NULL, NULL, 0, ctx);
  238. scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
  239. if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
  240. var_names, var_values,
  241. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  242. goto fail;
  243. scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
  244. /* evaluate again the width, as it may depend on the output height */
  245. if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
  246. var_names, var_values,
  247. NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
  248. goto fail;
  249. scale->w = res;
  250. w = scale->w;
  251. h = scale->h;
  252. /* Check if it is requested that the result has to be divisible by a some
  253. * factor (w or h = -n with n being the factor). */
  254. factor_w = 1;
  255. factor_h = 1;
  256. if (w < -1) {
  257. factor_w = -w;
  258. }
  259. if (h < -1) {
  260. factor_h = -h;
  261. }
  262. if (w < 0 && h < 0)
  263. scale->w = scale->h = 0;
  264. if (!(w = scale->w))
  265. w = inlink->w;
  266. if (!(h = scale->h))
  267. h = inlink->h;
  268. /* Make sure that the result is divisible by the factor we determined
  269. * earlier. If no factor was set, it is nothing will happen as the default
  270. * factor is 1 */
  271. if (w < 0)
  272. w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w;
  273. if (h < 0)
  274. h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h;
  275. /* Note that force_original_aspect_ratio may overwrite the previous set
  276. * dimensions so that it is not divisible by the set factors anymore. */
  277. if (scale->force_original_aspect_ratio) {
  278. int tmp_w = av_rescale(h, inlink->w, inlink->h);
  279. int tmp_h = av_rescale(w, inlink->h, inlink->w);
  280. if (scale->force_original_aspect_ratio == 1) {
  281. w = FFMIN(tmp_w, w);
  282. h = FFMIN(tmp_h, h);
  283. } else {
  284. w = FFMAX(tmp_w, w);
  285. h = FFMAX(tmp_h, h);
  286. }
  287. }
  288. if (w > INT_MAX || h > INT_MAX ||
  289. (h * inlink->w) > INT_MAX ||
  290. (w * inlink->h) > INT_MAX)
  291. av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
  292. outlink->w = w;
  293. outlink->h = h;
  294. /* TODO: make algorithm configurable */
  295. scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL ||
  296. desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  297. if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
  298. scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PAL ||
  299. av_pix_fmt_desc_get(outfmt)->flags & AV_PIX_FMT_FLAG_PSEUDOPAL;
  300. if (scale->sws)
  301. sws_freeContext(scale->sws);
  302. if (scale->isws[0])
  303. sws_freeContext(scale->isws[0]);
  304. if (scale->isws[1])
  305. sws_freeContext(scale->isws[1]);
  306. scale->isws[0] = scale->isws[1] = scale->sws = NULL;
  307. if (inlink0->w == outlink->w &&
  308. inlink0->h == outlink->h &&
  309. !scale->out_color_matrix &&
  310. scale->in_range == scale->out_range &&
  311. inlink0->format == outlink->format)
  312. ;
  313. else {
  314. struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
  315. int i;
  316. for (i = 0; i < 3; i++) {
  317. struct SwsContext **s = swscs[i];
  318. *s = sws_alloc_context();
  319. if (!*s)
  320. return AVERROR(ENOMEM);
  321. av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
  322. av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
  323. av_opt_set_int(*s, "src_format", inlink0->format, 0);
  324. av_opt_set_int(*s, "dstw", outlink->w, 0);
  325. av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
  326. av_opt_set_int(*s, "dst_format", outfmt, 0);
  327. av_opt_set_int(*s, "sws_flags", scale->flags, 0);
  328. av_opt_set_int(*s, "param0", scale->param[0], 0);
  329. av_opt_set_int(*s, "param1", scale->param[1], 0);
  330. if (scale->in_range != AVCOL_RANGE_UNSPECIFIED)
  331. av_opt_set_int(*s, "src_range",
  332. scale->in_range == AVCOL_RANGE_JPEG, 0);
  333. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  334. av_opt_set_int(*s, "dst_range",
  335. scale->out_range == AVCOL_RANGE_JPEG, 0);
  336. if (scale->opts) {
  337. AVDictionaryEntry *e = NULL;
  338. while ((e = av_dict_get(scale->opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  339. if ((ret = av_opt_set(*s, e->key, e->value, 0)) < 0)
  340. return ret;
  341. }
  342. }
  343. /* Override YUV420P default settings to have the correct (MPEG-2) chroma positions
  344. * MPEG-2 chroma positions are used by convention
  345. * XXX: support other 4:2:0 pixel formats */
  346. if (inlink0->format == AV_PIX_FMT_YUV420P && scale->in_v_chr_pos == -513) {
  347. scale->in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  348. }
  349. if (outlink->format == AV_PIX_FMT_YUV420P && scale->out_v_chr_pos == -513) {
  350. scale->out_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
  351. }
  352. av_opt_set_int(*s, "src_h_chr_pos", scale->in_h_chr_pos, 0);
  353. av_opt_set_int(*s, "src_v_chr_pos", scale->in_v_chr_pos, 0);
  354. av_opt_set_int(*s, "dst_h_chr_pos", scale->out_h_chr_pos, 0);
  355. av_opt_set_int(*s, "dst_v_chr_pos", scale->out_v_chr_pos, 0);
  356. if ((ret = sws_init_context(*s, NULL, NULL)) < 0)
  357. return ret;
  358. if (!scale->interlaced)
  359. break;
  360. }
  361. }
  362. if (inlink->sample_aspect_ratio.num){
  363. outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
  364. } else
  365. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  366. av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
  367. inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
  368. inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den,
  369. outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
  370. outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
  371. scale->flags);
  372. return 0;
  373. fail:
  374. av_log(NULL, AV_LOG_ERROR,
  375. "Error when evaluating the expression '%s'.\n"
  376. "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
  377. expr, scale->w_expr, scale->h_expr);
  378. return ret;
  379. }
  380. static int config_props_ref(AVFilterLink *outlink)
  381. {
  382. AVFilterLink *inlink = outlink->src->inputs[1];
  383. outlink->w = inlink->w;
  384. outlink->h = inlink->h;
  385. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  386. outlink->time_base = inlink->time_base;
  387. return 0;
  388. }
  389. static int request_frame(AVFilterLink *outlink)
  390. {
  391. return ff_request_frame(outlink->src->inputs[0]);
  392. }
  393. static int request_frame_ref(AVFilterLink *outlink)
  394. {
  395. return ff_request_frame(outlink->src->inputs[1]);
  396. }
  397. static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
  398. {
  399. ScaleContext *scale = link->dst->priv;
  400. const uint8_t *in[4];
  401. uint8_t *out[4];
  402. int in_stride[4],out_stride[4];
  403. int i;
  404. for(i=0; i<4; i++){
  405. int vsub= ((i+1)&2) ? scale->vsub : 0;
  406. in_stride[i] = cur_pic->linesize[i] * mul;
  407. out_stride[i] = out_buf->linesize[i] * mul;
  408. in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
  409. out[i] = out_buf->data[i] + field * out_buf->linesize[i];
  410. }
  411. if(scale->input_is_pal)
  412. in[1] = cur_pic->data[1];
  413. if(scale->output_is_pal)
  414. out[1] = out_buf->data[1];
  415. return sws_scale(sws, in, in_stride, y/mul, h,
  416. out,out_stride);
  417. }
  418. static int filter_frame(AVFilterLink *link, AVFrame *in)
  419. {
  420. ScaleContext *scale = link->dst->priv;
  421. AVFilterLink *outlink = link->dst->outputs[0];
  422. AVFrame *out;
  423. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  424. char buf[32];
  425. int in_range;
  426. if (av_frame_get_colorspace(in) == AVCOL_SPC_YCGCO)
  427. av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
  428. if( in->width != link->w
  429. || in->height != link->h
  430. || in->format != link->format) {
  431. int ret;
  432. snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
  433. av_opt_set(scale, "w", buf, 0);
  434. snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
  435. av_opt_set(scale, "h", buf, 0);
  436. link->dst->inputs[0]->format = in->format;
  437. link->dst->inputs[0]->w = in->width;
  438. link->dst->inputs[0]->h = in->height;
  439. if ((ret = config_props(outlink)) < 0)
  440. return ret;
  441. }
  442. if (!scale->sws)
  443. return ff_filter_frame(outlink, in);
  444. scale->hsub = desc->log2_chroma_w;
  445. scale->vsub = desc->log2_chroma_h;
  446. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  447. if (!out) {
  448. av_frame_free(&in);
  449. return AVERROR(ENOMEM);
  450. }
  451. av_frame_copy_props(out, in);
  452. out->width = outlink->w;
  453. out->height = outlink->h;
  454. if(scale->output_is_pal)
  455. avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
  456. in_range = av_frame_get_color_range(in);
  457. if ( scale->in_color_matrix
  458. || scale->out_color_matrix
  459. || scale-> in_range != AVCOL_RANGE_UNSPECIFIED
  460. || in_range != AVCOL_RANGE_UNSPECIFIED
  461. || scale->out_range != AVCOL_RANGE_UNSPECIFIED) {
  462. int in_full, out_full, brightness, contrast, saturation;
  463. const int *inv_table, *table;
  464. sws_getColorspaceDetails(scale->sws, (int **)&inv_table, &in_full,
  465. (int **)&table, &out_full,
  466. &brightness, &contrast, &saturation);
  467. if (scale->in_color_matrix)
  468. inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
  469. if (scale->out_color_matrix)
  470. table = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
  471. else if (scale->in_color_matrix)
  472. table = inv_table;
  473. if (scale-> in_range != AVCOL_RANGE_UNSPECIFIED)
  474. in_full = (scale-> in_range == AVCOL_RANGE_JPEG);
  475. else if (in_range != AVCOL_RANGE_UNSPECIFIED)
  476. in_full = (in_range == AVCOL_RANGE_JPEG);
  477. if (scale->out_range != AVCOL_RANGE_UNSPECIFIED)
  478. out_full = (scale->out_range == AVCOL_RANGE_JPEG);
  479. sws_setColorspaceDetails(scale->sws, inv_table, in_full,
  480. table, out_full,
  481. brightness, contrast, saturation);
  482. if (scale->isws[0])
  483. sws_setColorspaceDetails(scale->isws[0], inv_table, in_full,
  484. table, out_full,
  485. brightness, contrast, saturation);
  486. if (scale->isws[1])
  487. sws_setColorspaceDetails(scale->isws[1], inv_table, in_full,
  488. table, out_full,
  489. brightness, contrast, saturation);
  490. }
  491. av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
  492. (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
  493. (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
  494. INT_MAX);
  495. if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
  496. scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
  497. scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
  498. }else{
  499. scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
  500. }
  501. av_frame_free(&in);
  502. return ff_filter_frame(outlink, out);
  503. }
  504. static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
  505. {
  506. AVFilterLink *outlink = link->dst->outputs[1];
  507. return ff_filter_frame(outlink, in);
  508. }
  509. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  510. char *res, int res_len, int flags)
  511. {
  512. ScaleContext *scale = ctx->priv;
  513. int ret;
  514. if ( !strcmp(cmd, "width") || !strcmp(cmd, "w")
  515. || !strcmp(cmd, "height") || !strcmp(cmd, "h")) {
  516. int old_w = scale->w;
  517. int old_h = scale->h;
  518. AVFilterLink *outlink = ctx->outputs[0];
  519. av_opt_set(scale, cmd, args, 0);
  520. if ((ret = config_props(outlink)) < 0) {
  521. scale->w = old_w;
  522. scale->h = old_h;
  523. }
  524. } else
  525. ret = AVERROR(ENOSYS);
  526. return ret;
  527. }
  528. static const AVClass *child_class_next(const AVClass *prev)
  529. {
  530. return prev ? NULL : sws_get_class();
  531. }
  532. #define OFFSET(x) offsetof(ScaleContext, x)
  533. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  534. static const AVOption scale_options[] = {
  535. { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  536. { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  537. { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  538. { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
  539. { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
  540. { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS },
  541. { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  542. { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
  543. { "in_color_matrix", "set input YCbCr type", OFFSET(in_color_matrix), AV_OPT_TYPE_STRING, { .str = "auto" }, .flags = FLAGS },
  544. { "out_color_matrix", "set output YCbCr type", OFFSET(out_color_matrix), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
  545. { "in_range", "set input color range", OFFSET( in_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  546. { "out_range", "set output color range", OFFSET(out_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 2, FLAGS, "range" },
  547. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, 0, FLAGS, "range" },
  548. { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  549. { "jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  550. { "mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  551. { "tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
  552. { "pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
  553. { "in_v_chr_pos", "input vertical chroma position in luma grid/256" , OFFSET(in_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  554. { "in_h_chr_pos", "input horizontal chroma position in luma grid/256", OFFSET(in_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  555. { "out_v_chr_pos", "output vertical chroma position in luma grid/256" , OFFSET(out_v_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  556. { "out_h_chr_pos", "output horizontal chroma position in luma grid/256", OFFSET(out_h_chr_pos), AV_OPT_TYPE_INT, { .i64 = -513}, -513, 512, FLAGS },
  557. { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
  558. { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
  559. { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
  560. { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
  561. { "param0", "Scaler param 0", OFFSET(param[0]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  562. { "param1", "Scaler param 1", OFFSET(param[1]), AV_OPT_TYPE_DOUBLE, { .dbl = SWS_PARAM_DEFAULT }, INT_MIN, INT_MAX, FLAGS },
  563. { NULL }
  564. };
  565. static const AVClass scale_class = {
  566. .class_name = "scale",
  567. .item_name = av_default_item_name,
  568. .option = scale_options,
  569. .version = LIBAVUTIL_VERSION_INT,
  570. .category = AV_CLASS_CATEGORY_FILTER,
  571. .child_class_next = child_class_next,
  572. };
  573. static const AVFilterPad avfilter_vf_scale_inputs[] = {
  574. {
  575. .name = "default",
  576. .type = AVMEDIA_TYPE_VIDEO,
  577. .filter_frame = filter_frame,
  578. },
  579. { NULL }
  580. };
  581. static const AVFilterPad avfilter_vf_scale_outputs[] = {
  582. {
  583. .name = "default",
  584. .type = AVMEDIA_TYPE_VIDEO,
  585. .config_props = config_props,
  586. },
  587. { NULL }
  588. };
  589. AVFilter ff_vf_scale = {
  590. .name = "scale",
  591. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format."),
  592. .init_dict = init_dict,
  593. .uninit = uninit,
  594. .query_formats = query_formats,
  595. .priv_size = sizeof(ScaleContext),
  596. .priv_class = &scale_class,
  597. .inputs = avfilter_vf_scale_inputs,
  598. .outputs = avfilter_vf_scale_outputs,
  599. .process_command = process_command,
  600. };
  601. static const AVClass scale2ref_class = {
  602. .class_name = "scale2ref",
  603. .item_name = av_default_item_name,
  604. .option = scale_options,
  605. .version = LIBAVUTIL_VERSION_INT,
  606. .category = AV_CLASS_CATEGORY_FILTER,
  607. .child_class_next = child_class_next,
  608. };
  609. static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
  610. {
  611. .name = "default",
  612. .type = AVMEDIA_TYPE_VIDEO,
  613. .filter_frame = filter_frame,
  614. },
  615. {
  616. .name = "ref",
  617. .type = AVMEDIA_TYPE_VIDEO,
  618. .filter_frame = filter_frame_ref,
  619. },
  620. { NULL }
  621. };
  622. static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
  623. {
  624. .name = "default",
  625. .type = AVMEDIA_TYPE_VIDEO,
  626. .config_props = config_props,
  627. .request_frame= request_frame,
  628. },
  629. {
  630. .name = "ref",
  631. .type = AVMEDIA_TYPE_VIDEO,
  632. .config_props = config_props_ref,
  633. .request_frame= request_frame_ref,
  634. },
  635. { NULL }
  636. };
  637. AVFilter ff_vf_scale2ref = {
  638. .name = "scale2ref",
  639. .description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
  640. .init_dict = init_dict,
  641. .uninit = uninit,
  642. .query_formats = query_formats,
  643. .priv_size = sizeof(ScaleContext),
  644. .priv_class = &scale2ref_class,
  645. .inputs = avfilter_vf_scale2ref_inputs,
  646. .outputs = avfilter_vf_scale2ref_outputs,
  647. .process_command = process_command,
  648. };