* commit '59ee9f78b0cc4fb84ae606fa317d8102ad32a627': lavfi: do not use av_pix_fmt_descriptors directly. Conflicts: libavfilter/buffersrc.c libavfilter/drawutils.c libavfilter/filtfmts.c libavfilter/vf_ass.c libavfilter/vf_boxblur.c libavfilter/vf_drawtext.c libavfilter/vf_lut.c libavfilter/vf_pad.c libavfilter/vf_scale.c libavfilter/vf_showinfo.c libavfilter/vf_transpose.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n1.1
| @@ -311,7 +311,7 @@ void ff_tlog_link(void *ctx, AVFilterLink *link, int end) | |||||
| ff_tlog(ctx, | ff_tlog(ctx, | ||||
| "link[%p s:%dx%d fmt:%s %s->%s]%s", | "link[%p s:%dx%d fmt:%s %s->%s]%s", | ||||
| link, link->w, link->h, | link, link->w, link->h, | ||||
| av_pix_fmt_descriptors[link->format].name, | |||||
| av_get_pix_fmt_name(link->format), | |||||
| link->src ? link->src->filter->name : "", | link->src ? link->src->filter->name : "", | ||||
| link->dst ? link->dst->filter->name : "", | link->dst ? link->dst->filter->name : "", | ||||
| end ? "\n" : ""); | end ? "\n" : ""); | ||||
| @@ -226,7 +226,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) | |||||
| } | } | ||||
| av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", | av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", | ||||
| c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name, | |||||
| c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), | |||||
| c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den, | c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den, | ||||
| c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, "")); | c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, "")); | ||||
| c->warning_limit = 100; | c->warning_limit = 100; | ||||
| @@ -55,7 +55,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t | |||||
| { | { | ||||
| uint8_t rgba_map[4] = {0}; | uint8_t rgba_map[4] = {0}; | ||||
| int i; | int i; | ||||
| const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt]; | |||||
| const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); | |||||
| int hsub = pix_desc->log2_chroma_w; | int hsub = pix_desc->log2_chroma_w; | ||||
| *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; | *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; | ||||
| @@ -134,7 +134,7 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], | |||||
| int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) | int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) | ||||
| { | { | ||||
| const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[format]; | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format); | |||||
| const AVComponentDescriptor *c; | const AVComponentDescriptor *c; | ||||
| unsigned i, nb_planes = 0; | unsigned i, nb_planes = 0; | ||||
| int pixelstep[MAX_PLANES] = { 0 }; | int pixelstep[MAX_PLANES] = { 0 }; | ||||
| @@ -525,7 +525,7 @@ int main(void) | |||||
| int r, i; | int r, i; | ||||
| for (f = 0; f < AV_PIX_FMT_NB; f++) { | for (f = 0; f < AV_PIX_FMT_NB; f++) { | ||||
| desc = &av_pix_fmt_descriptors[f]; | |||||
| desc = av_pix_fmt_desc_get(f); | |||||
| if (!desc->name) | if (!desc->name) | ||||
| continue; | continue; | ||||
| printf("Testing %s...%*s", desc->name, | printf("Testing %s...%*s", desc->name, | ||||
| @@ -268,10 +268,12 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type) | |||||
| int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB : | int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB : | ||||
| type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0; | type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0; | ||||
| for (fmt = 0; fmt < num_formats; fmt++) | |||||
| for (fmt = 0; fmt < num_formats; fmt++) { | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); | |||||
| if ((type != AVMEDIA_TYPE_VIDEO) || | if ((type != AVMEDIA_TYPE_VIDEO) || | ||||
| (type == AVMEDIA_TYPE_VIDEO && !(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_HWACCEL))) | |||||
| (type == AVMEDIA_TYPE_VIDEO && !(desc->flags & PIX_FMT_HWACCEL))) | |||||
| ff_add_format(&ret, fmt); | ff_add_format(&ret, fmt); | ||||
| } | |||||
| return ret; | return ret; | ||||
| } | } | ||||
| @@ -139,9 +139,9 @@ static int query_formats(AVFilterContext *ctx) | |||||
| static int config_input(AVFilterLink *inlink) | static int config_input(AVFilterLink *inlink) | ||||
| { | { | ||||
| AVFilterContext *ctx = inlink->dst; | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| AVFilterContext *ctx = inlink->dst; | |||||
| BoxBlurContext *boxblur = ctx->priv; | BoxBlurContext *boxblur = ctx->priv; | ||||
| const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| int w = inlink->w, h = inlink->h; | int w = inlink->w, h = inlink->h; | ||||
| int cw, ch; | int cw, ch; | ||||
| double var_values[VARS_NB], res; | double var_values[VARS_NB], res; | ||||
| @@ -165,7 +165,7 @@ static int config_input(AVFilterLink *link) | |||||
| { | { | ||||
| AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
| CropContext *crop = ctx->priv; | CropContext *crop = ctx->priv; | ||||
| const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[link->format]; | |||||
| const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format); | |||||
| int ret; | int ret; | ||||
| const char *expr; | const char *expr; | ||||
| double res; | double res; | ||||
| @@ -186,8 +186,8 @@ static int config_input(AVFilterLink *link) | |||||
| crop->var_values[VAR_POS] = NAN; | crop->var_values[VAR_POS] = NAN; | ||||
| av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); | av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); | ||||
| crop->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; | |||||
| crop->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| crop->hsub = pix_desc->log2_chroma_w; | |||||
| crop->vsub = pix_desc->log2_chroma_h; | |||||
| if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr), | if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr), | ||||
| var_names, crop->var_values, | var_names, crop->var_values, | ||||
| @@ -267,6 +267,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
| AVFilterContext *ctx = link->dst; | AVFilterContext *ctx = link->dst; | ||||
| CropContext *crop = ctx->priv; | CropContext *crop = ctx->priv; | ||||
| AVFilterBufferRef *ref2; | AVFilterBufferRef *ref2; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| int i; | int i; | ||||
| ref2 = avfilter_ref_buffer(picref, ~0); | ref2 = avfilter_ref_buffer(picref, ~0); | ||||
| @@ -300,8 +301,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
| ref2->data[0] += crop->y * ref2->linesize[0]; | ref2->data[0] += crop->y * ref2->linesize[0]; | ||||
| ref2->data[0] += crop->x * crop->max_step[0]; | ref2->data[0] += crop->x * crop->max_step[0]; | ||||
| if (!(av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL || | |||||
| av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PSEUDOPAL)) { | |||||
| if (!(desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL)) { | |||||
| for (i = 1; i < 3; i ++) { | for (i = 1; i < 3; i ++) { | ||||
| if (ref2->data[i]) { | if (ref2->data[i]) { | ||||
| ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i]; | ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i]; | ||||
| @@ -107,7 +107,7 @@ static int config_input(AVFilterLink *inlink) | |||||
| CropDetectContext *cd = ctx->priv; | CropDetectContext *cd = ctx->priv; | ||||
| av_image_fill_max_pixsteps(cd->max_pixsteps, NULL, | av_image_fill_max_pixsteps(cd->max_pixsteps, NULL, | ||||
| &av_pix_fmt_descriptors[inlink->format]); | |||||
| av_pix_fmt_desc_get(inlink->format)); | |||||
| cd->x1 = inlink->w - 1; | cd->x1 = inlink->w - 1; | ||||
| cd->y1 = inlink->h - 1; | cd->y1 = inlink->h - 1; | ||||
| @@ -220,9 +220,10 @@ static int end_frame(AVFilterLink *inlink) | |||||
| AVFilterLink *outlink = inlink->dst->outputs[0]; | AVFilterLink *outlink = inlink->dst->outputs[0]; | ||||
| AVFilterBufferRef *inpicref = inlink ->cur_buf; | AVFilterBufferRef *inpicref = inlink ->cur_buf; | ||||
| AVFilterBufferRef *outpicref = outlink->out_buf; | AVFilterBufferRef *outpicref = outlink->out_buf; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| int direct = inpicref->buf == outpicref->buf; | int direct = inpicref->buf == outpicref->buf; | ||||
| int hsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| int vsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| int hsub0 = desc->log2_chroma_w; | |||||
| int vsub0 = desc->log2_chroma_h; | |||||
| int plane; | int plane; | ||||
| int ret; | int ret; | ||||
| @@ -81,9 +81,10 @@ static int query_formats(AVFilterContext *ctx) | |||||
| static int config_input(AVFilterLink *inlink) | static int config_input(AVFilterLink *inlink) | ||||
| { | { | ||||
| DrawBoxContext *drawbox = inlink->dst->priv; | DrawBoxContext *drawbox = inlink->dst->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| drawbox->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| drawbox->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| drawbox->hsub = desc->log2_chroma_w; | |||||
| drawbox->vsub = desc->log2_chroma_h; | |||||
| if (drawbox->w == 0) drawbox->w = inlink->w; | if (drawbox->w == 0) drawbox->w = inlink->w; | ||||
| if (drawbox->h == 0) drawbox->h = inlink->h; | if (drawbox->h == 0) drawbox->h = inlink->h; | ||||
| @@ -179,7 +179,7 @@ static enum AVPixelFormat alpha_pix_fmts[] = { | |||||
| static int config_props(AVFilterLink *inlink) | static int config_props(AVFilterLink *inlink) | ||||
| { | { | ||||
| FadeContext *fade = inlink->dst->priv; | FadeContext *fade = inlink->dst->priv; | ||||
| const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format); | |||||
| fade->hsub = pixdesc->log2_chroma_w; | fade->hsub = pixdesc->log2_chroma_w; | ||||
| fade->vsub = pixdesc->log2_chroma_h; | fade->vsub = pixdesc->log2_chroma_h; | ||||
| @@ -78,15 +78,16 @@ static int query_formats(AVFilterContext *ctx) | |||||
| * a bitstream format, and does not have vertically sub-sampled chroma */ | * a bitstream format, and does not have vertically sub-sampled chroma */ | ||||
| if (ctx->inputs[0]) { | if (ctx->inputs[0]) { | ||||
| formats = NULL; | formats = NULL; | ||||
| for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) | |||||
| if (!( av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL | |||||
| || av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BITSTREAM) | |||||
| && av_pix_fmt_descriptors[pix_fmt].nb_components | |||||
| && !av_pix_fmt_descriptors[pix_fmt].log2_chroma_h | |||||
| && (ret = ff_add_format(&formats, pix_fmt)) < 0) { | |||||
| for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) { | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); | |||||
| if (!(desc->flags & PIX_FMT_HWACCEL || | |||||
| desc->flags & PIX_FMT_BITSTREAM) && | |||||
| desc->nb_components && !desc->log2_chroma_h && | |||||
| (ret = ff_add_format(&formats, pix_fmt)) < 0) { | |||||
| ff_formats_unref(&formats); | ff_formats_unref(&formats); | ||||
| return ret; | return ret; | ||||
| } | } | ||||
| } | |||||
| ff_formats_ref(formats, &ctx->inputs[0]->out_formats); | ff_formats_ref(formats, &ctx->inputs[0]->out_formats); | ||||
| ff_formats_ref(formats, &ctx->outputs[0]->in_formats); | ff_formats_ref(formats, &ctx->outputs[0]->in_formats); | ||||
| } | } | ||||
| @@ -167,8 +167,9 @@ static int query_formats(AVFilterContext *ctx) | |||||
| static int config_input(AVFilterLink *inlink) | static int config_input(AVFilterLink *inlink) | ||||
| { | { | ||||
| GradFunContext *gf = inlink->dst->priv; | GradFunContext *gf = inlink->dst->priv; | ||||
| int hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| int vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| int hsub = desc->log2_chroma_w; | |||||
| int vsub = desc->log2_chroma_h; | |||||
| gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t)); | gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t)); | ||||
| if (!gf->buf) | if (!gf->buf) | ||||
| @@ -77,11 +77,11 @@ static int query_formats(AVFilterContext *ctx) | |||||
| static int config_props(AVFilterLink *inlink) | static int config_props(AVFilterLink *inlink) | ||||
| { | { | ||||
| FlipContext *flip = inlink->dst->priv; | FlipContext *flip = inlink->dst->priv; | ||||
| const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); | |||||
| av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc); | av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc); | ||||
| flip->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| flip->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| flip->hsub = pix_desc->log2_chroma_w; | |||||
| flip->vsub = pix_desc->log2_chroma_h; | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -294,11 +294,12 @@ static int query_formats(AVFilterContext *ctx) | |||||
| static int config_input(AVFilterLink *inlink) | static int config_input(AVFilterLink *inlink) | ||||
| { | { | ||||
| HQDN3DContext *hqdn3d = inlink->dst->priv; | HQDN3DContext *hqdn3d = inlink->dst->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| int i; | int i; | ||||
| hqdn3d->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| hqdn3d->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| hqdn3d->depth = av_pix_fmt_descriptors[inlink->format].comp[0].depth_minus1+1; | |||||
| hqdn3d->hsub = desc->log2_chroma_w; | |||||
| hqdn3d->vsub = desc->log2_chroma_h; | |||||
| hqdn3d->depth = desc->comp[0].depth_minus1+1; | |||||
| hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line)); | hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line)); | ||||
| if (!hqdn3d->line) | if (!hqdn3d->line) | ||||
| @@ -174,7 +174,7 @@ static int config_props(AVFilterLink *inlink) | |||||
| { | { | ||||
| AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
| LutContext *lut = ctx->priv; | LutContext *lut = ctx->priv; | ||||
| const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| int rgba_map[4]; /* component index -> RGBA color index map */ | int rgba_map[4]; /* component index -> RGBA color index map */ | ||||
| int min[4], max[4]; | int min[4], max[4]; | ||||
| int val, comp, ret; | int val, comp, ret; | ||||
| @@ -197,7 +197,7 @@ static const enum AVPixelFormat alpha_pix_fmts[] = { | |||||
| static int config_input_main(AVFilterLink *inlink) | static int config_input_main(AVFilterLink *inlink) | ||||
| { | { | ||||
| OverlayContext *over = inlink->dst->priv; | OverlayContext *over = inlink->dst->priv; | ||||
| const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); | |||||
| av_image_fill_max_pixsteps(over->main_pix_step, NULL, pix_desc); | av_image_fill_max_pixsteps(over->main_pix_step, NULL, pix_desc); | ||||
| @@ -249,10 +249,10 @@ static int config_input_overlay(AVFilterLink *inlink) | |||||
| av_log(ctx, AV_LOG_VERBOSE, | av_log(ctx, AV_LOG_VERBOSE, | ||||
| "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", | "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", | ||||
| ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, | ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, | ||||
| av_pix_fmt_descriptors[ctx->inputs[MAIN]->format].name, | |||||
| av_get_pix_fmt_name(ctx->inputs[MAIN]->format), | |||||
| over->x, over->y, | over->x, over->y, | ||||
| ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h, | ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h, | ||||
| av_pix_fmt_descriptors[ctx->inputs[OVERLAY]->format].name); | |||||
| av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format)); | |||||
| if (over->x < 0 || over->y < 0 || | if (over->x < 0 || over->y < 0 || | ||||
| over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] || | over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] || | ||||
| @@ -44,7 +44,7 @@ static int config_props(AVFilterLink *inlink) | |||||
| { | { | ||||
| PixdescTestContext *priv = inlink->dst->priv; | PixdescTestContext *priv = inlink->dst->priv; | ||||
| priv->pix_desc = &av_pix_fmt_descriptors[inlink->format]; | |||||
| priv->pix_desc = av_pix_fmt_desc_get(inlink->format); | |||||
| if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w))) | if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w))) | ||||
| return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
| @@ -163,6 +163,7 @@ static int config_props(AVFilterLink *outlink) | |||||
| AVFilterLink *inlink = outlink->src->inputs[0]; | AVFilterLink *inlink = outlink->src->inputs[0]; | ||||
| enum AVPixelFormat outfmt = outlink->format; | enum AVPixelFormat outfmt = outlink->format; | ||||
| ScaleContext *scale = ctx->priv; | ScaleContext *scale = ctx->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| int64_t w, h; | int64_t w, h; | ||||
| double var_values[VARS_NB], res; | double var_values[VARS_NB], res; | ||||
| char *expr; | char *expr; | ||||
| @@ -176,8 +177,8 @@ static int config_props(AVFilterLink *outlink) | |||||
| var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? | var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? | ||||
| (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; | (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; | ||||
| var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; | var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; | ||||
| var_values[VAR_HSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| var_values[VAR_VSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; | |||||
| var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; | |||||
| /* evaluate width and height */ | /* evaluate width and height */ | ||||
| av_expr_parse_and_eval(&res, (expr = scale->w_expr), | av_expr_parse_and_eval(&res, (expr = scale->w_expr), | ||||
| @@ -226,11 +227,11 @@ static int config_props(AVFilterLink *outlink) | |||||
| /* TODO: make algorithm configurable */ | /* TODO: make algorithm configurable */ | ||||
| scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL || | |||||
| av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PSEUDOPAL; | |||||
| scale->input_is_pal = desc->flags & PIX_FMT_PAL || | |||||
| desc->flags & PIX_FMT_PSEUDOPAL; | |||||
| if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8; | if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8; | ||||
| scale->output_is_pal = av_pix_fmt_descriptors[outfmt].flags & PIX_FMT_PAL || | |||||
| av_pix_fmt_descriptors[outfmt].flags & PIX_FMT_PSEUDOPAL; | |||||
| scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PAL || | |||||
| av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PSEUDOPAL; | |||||
| if (scale->sws) | if (scale->sws) | ||||
| sws_freeContext(scale->sws); | sws_freeContext(scale->sws); | ||||
| @@ -261,9 +262,9 @@ static int config_props(AVFilterLink *outlink) | |||||
| outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; | outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; | ||||
| av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n", | av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n", | ||||
| inlink ->w, inlink ->h, av_pix_fmt_descriptors[ inlink->format].name, | |||||
| inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format), | |||||
| inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, | inlink->sample_aspect_ratio.num, inlink->sample_aspect_ratio.den, | ||||
| outlink->w, outlink->h, av_pix_fmt_descriptors[outlink->format].name, | |||||
| outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format), | |||||
| outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den, | outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den, | ||||
| scale->flags); | scale->flags); | ||||
| return 0; | return 0; | ||||
| @@ -281,6 +282,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
| ScaleContext *scale = link->dst->priv; | ScaleContext *scale = link->dst->priv; | ||||
| AVFilterLink *outlink = link->dst->outputs[0]; | AVFilterLink *outlink = link->dst->outputs[0]; | ||||
| AVFilterBufferRef *outpicref, *for_next_filter; | AVFilterBufferRef *outpicref, *for_next_filter; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| int ret = 0; | int ret = 0; | ||||
| if( picref->video->w != link->w | if( picref->video->w != link->w | ||||
| @@ -306,8 +308,8 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||||
| return ff_start_frame(outlink, outpicref); | return ff_start_frame(outlink, outpicref); | ||||
| } | } | ||||
| scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; | |||||
| scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| scale->hsub = desc->log2_chroma_w; | |||||
| scale->vsub = desc->log2_chroma_h; | |||||
| outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); | outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); | ||||
| if (!outpicref) | if (!outpicref) | ||||
| @@ -47,8 +47,9 @@ static int end_frame(AVFilterLink *inlink) | |||||
| AVFilterContext *ctx = inlink->dst; | AVFilterContext *ctx = inlink->dst; | ||||
| ShowInfoContext *showinfo = ctx->priv; | ShowInfoContext *showinfo = ctx->priv; | ||||
| AVFilterBufferRef *picref = inlink->cur_buf; | AVFilterBufferRef *picref = inlink->cur_buf; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||||
| uint32_t plane_checksum[4] = {0}, checksum = 0; | uint32_t plane_checksum[4] = {0}, checksum = 0; | ||||
| int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| int i, plane, vsub = desc->log2_chroma_h; | |||||
| for (plane = 0; picref->data[plane] && plane < 4; plane++) { | for (plane = 0; picref->data[plane] && plane < 4; plane++) { | ||||
| size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); | size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); | ||||
| @@ -68,7 +69,7 @@ static int end_frame(AVFilterLink *inlink) | |||||
| "checksum:%08X plane_checksum:[%08X", | "checksum:%08X plane_checksum:[%08X", | ||||
| showinfo->frame, | showinfo->frame, | ||||
| av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), picref->pos, | av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), picref->pos, | ||||
| av_pix_fmt_descriptors[picref->format].name, | |||||
| desc->name, | |||||
| picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den, | picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den, | ||||
| picref->video->w, picref->video->h, | picref->video->w, picref->video->h, | ||||
| !picref->video->interlaced ? 'P' : /* Progressive */ | !picref->video->interlaced ? 'P' : /* Progressive */ | ||||
| @@ -54,8 +54,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args) | |||||
| static int config_props(AVFilterLink *link) | static int config_props(AVFilterLink *link) | ||||
| { | { | ||||
| SliceContext *slice = link->dst->priv; | SliceContext *slice = link->dst->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| slice->vshift = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| slice->vshift = desc->log2_chroma_h; | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -116,7 +116,8 @@ static int config_props_output(AVFilterLink *outlink) | |||||
| AVFilterContext *ctx = outlink->src; | AVFilterContext *ctx = outlink->src; | ||||
| TransContext *trans = ctx->priv; | TransContext *trans = ctx->priv; | ||||
| AVFilterLink *inlink = ctx->inputs[0]; | AVFilterLink *inlink = ctx->inputs[0]; | ||||
| const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[outlink->format]; | |||||
| const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format); | |||||
| const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format); | |||||
| if (trans->dir&4) { | if (trans->dir&4) { | ||||
| av_log(ctx, AV_LOG_WARNING, | av_log(ctx, AV_LOG_WARNING, | ||||
| @@ -135,10 +136,10 @@ static int config_props_output(AVFilterLink *outlink) | |||||
| trans->passthrough = TRANSPOSE_PT_TYPE_NONE; | trans->passthrough = TRANSPOSE_PT_TYPE_NONE; | ||||
| } | } | ||||
| trans->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; | |||||
| trans->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; | |||||
| trans->hsub = desc_in->log2_chroma_w; | |||||
| trans->vsub = desc_in->log2_chroma_h; | |||||
| av_image_fill_max_pixsteps(trans->pixsteps, NULL, pixdesc); | |||||
| av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out); | |||||
| outlink->w = inlink->h; | outlink->w = inlink->h; | ||||
| outlink->h = inlink->w; | outlink->h = inlink->w; | ||||
| @@ -187,9 +187,10 @@ static void init_filter_param(AVFilterContext *ctx, FilterParam *fp, const char | |||||
| static int config_props(AVFilterLink *link) | static int config_props(AVFilterLink *link) | ||||
| { | { | ||||
| UnsharpContext *unsharp = link->dst->priv; | UnsharpContext *unsharp = link->dst->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| unsharp->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; | |||||
| unsharp->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| unsharp->hsub = desc->log2_chroma_w; | |||||
| unsharp->vsub = desc->log2_chroma_h; | |||||
| init_filter_param(link->dst, &unsharp->luma, "luma", link->w); | init_filter_param(link->dst, &unsharp->luma, "luma", link->w); | ||||
| init_filter_param(link->dst, &unsharp->chroma, "chroma", SHIFTUP(link->w, unsharp->hsub)); | init_filter_param(link->dst, &unsharp->chroma, "chroma", SHIFTUP(link->w, unsharp->hsub)); | ||||
| @@ -36,8 +36,9 @@ typedef struct { | |||||
| static int config_input(AVFilterLink *link) | static int config_input(AVFilterLink *link) | ||||
| { | { | ||||
| FlipContext *flip = link->dst->priv; | FlipContext *flip = link->dst->priv; | ||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| flip->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| flip->vsub = desc->log2_chroma_h; | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -190,7 +190,7 @@ static int return_frame(AVFilterContext *ctx, int is_second) | |||||
| } | } | ||||
| if (!yadif->csp) | if (!yadif->csp) | ||||
| yadif->csp = &av_pix_fmt_descriptors[link->format]; | |||||
| yadif->csp = av_pix_fmt_desc_get(link->format); | |||||
| if (yadif->csp->comp[0].depth_minus1 / 8 == 1) | if (yadif->csp->comp[0].depth_minus1 / 8 == 1) | ||||
| yadif->filter_line = (void*)filter_line_c_16bit; | yadif->filter_line = (void*)filter_line_c_16bit; | ||||
| @@ -373,7 +373,8 @@ int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) | |||||
| /* copy the slice if needed for permission reasons */ | /* copy the slice if needed for permission reasons */ | ||||
| if (link->src_buf) { | if (link->src_buf) { | ||||
| vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; | |||||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); | |||||
| vsub = desc->log2_chroma_h; | |||||
| for (i = 0; i < 4; i++) { | for (i = 0; i < 4; i++) { | ||||
| if (link->src_buf->data[i]) { | if (link->src_buf->data[i]) { | ||||