@@ -66,18 +66,12 @@ int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s) | |||
return ff_framesync_configure(&s->fs); | |||
} | |||
int ff_dualinput_filter_frame_main(FFDualInputContext *s, | |||
int ff_dualinput_filter_frame(FFDualInputContext *s, | |||
AVFilterLink *inlink, AVFrame *in) | |||
{ | |||
return ff_framesync_filter_frame(&s->fs, inlink, in); | |||
} | |||
int ff_dualinput_filter_frame_second(FFDualInputContext *s, | |||
AVFilterLink *inlink, AVFrame *in) | |||
{ | |||
return ff_framesync_filter_frame(&s->fs, inlink, in); | |||
} | |||
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink) | |||
{ | |||
return ff_framesync_request_frame(&s->fs, outlink); | |||
@@ -39,8 +39,7 @@ typedef struct { | |||
} FFDualInputContext; | |||
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s); | |||
int ff_dualinput_filter_frame_main(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in); | |||
int ff_dualinput_filter_frame_second(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in); | |||
int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in); | |||
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink); | |||
void ff_dualinput_uninit(FFDualInputContext *s); | |||
@@ -422,27 +422,21 @@ static int request_frame(AVFilterLink *outlink) | |||
return ff_dualinput_request_frame(&b->dinput, outlink); | |||
} | |||
static int filter_frame_top(AVFilterLink *inlink, AVFrame *buf) | |||
static int filter_frame(AVFilterLink *inlink, AVFrame *buf) | |||
{ | |||
BlendContext *b = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_main(&b->dinput, inlink, buf); | |||
} | |||
static int filter_frame_bottom(AVFilterLink *inlink, AVFrame *buf) | |||
{ | |||
BlendContext *b = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_second(&b->dinput, inlink, buf); | |||
return ff_dualinput_filter_frame(&b->dinput, inlink, buf); | |||
} | |||
static const AVFilterPad blend_inputs[] = { | |||
{ | |||
.name = "top", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_top, | |||
.filter_frame = filter_frame, | |||
},{ | |||
.name = "bottom", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_bottom, | |||
.filter_frame = filter_frame, | |||
}, | |||
{ NULL } | |||
}; | |||
@@ -667,16 +667,10 @@ static int config_output(AVFilterLink *outlink) | |||
return 0; | |||
} | |||
static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref) | |||
static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
LUT3DContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref); | |||
} | |||
static int filter_frame_clut(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
LUT3DContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref); | |||
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); | |||
} | |||
static int request_frame(AVFilterLink *outlink) | |||
@@ -766,12 +760,12 @@ static const AVFilterPad haldclut_inputs[] = { | |||
{ | |||
.name = "main", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_main, | |||
.filter_frame = filter_frame_hald, | |||
.config_props = config_input, | |||
},{ | |||
.name = "clut", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_clut, | |||
.filter_frame = filter_frame_hald, | |||
.config_props = config_clut, | |||
}, | |||
{ NULL } | |||
@@ -549,16 +549,10 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic, | |||
return mainpic; | |||
} | |||
static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref) | |||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
OverlayContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref); | |||
} | |||
static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
OverlayContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref); | |||
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); | |||
} | |||
static int request_frame(AVFilterLink *outlink) | |||
@@ -606,14 +600,14 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = { | |||
.name = "main", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.config_props = config_input_main, | |||
.filter_frame = filter_frame_main, | |||
.filter_frame = filter_frame, | |||
.needs_writable = 1, | |||
}, | |||
{ | |||
.name = "overlay", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.config_props = config_input_overlay, | |||
.filter_frame = filter_frame_over, | |||
.filter_frame = filter_frame, | |||
}, | |||
{ NULL } | |||
}; | |||
@@ -320,16 +320,10 @@ static int config_output(AVFilterLink *outlink) | |||
return 0; | |||
} | |||
static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref) | |||
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
PSNRContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref); | |||
} | |||
static int filter_frame_ref(AVFilterLink *inlink, AVFrame *inpicref) | |||
{ | |||
PSNRContext *s = inlink->dst->priv; | |||
return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref); | |||
return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref); | |||
} | |||
static int request_frame(AVFilterLink *outlink) | |||
@@ -359,11 +353,11 @@ static const AVFilterPad psnr_inputs[] = { | |||
{ | |||
.name = "main", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_main, | |||
.filter_frame = filter_frame, | |||
},{ | |||
.name = "reference", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.filter_frame = filter_frame_ref, | |||
.filter_frame = filter_frame, | |||
.config_props = config_input_ref, | |||
}, | |||
{ NULL } | |||