@@ -67,7 +67,7 @@ OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o | |||||
OBJS-$(CONFIG_ASIDEDATA_FILTER) += f_sidedata.o | OBJS-$(CONFIG_ASIDEDATA_FILTER) += f_sidedata.o | ||||
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o | OBJS-$(CONFIG_ASPLIT_FILTER) += split.o | ||||
OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o | OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o | ||||
OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync2.o | |||||
OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync.o | |||||
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o | OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o | ||||
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o | OBJS-$(CONFIG_ATRIM_FILTER) += trim.o | ||||
OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o | OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o | ||||
@@ -137,7 +137,7 @@ OBJS-$(CONFIG_BENCH_FILTER) += f_bench.o | |||||
OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o | OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o | ||||
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o | OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o | ||||
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o | OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o | ||||
OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o framesync2.o | |||||
OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o framesync.o | |||||
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o | OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o | ||||
OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o | OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o | ||||
OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o | OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o | ||||
@@ -150,7 +150,7 @@ OBJS-$(CONFIG_COLORLEVELS_FILTER) += vf_colorlevels.o | |||||
OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o | OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o | ||||
OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspacedsp.o | OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspacedsp.o | ||||
OBJS-$(CONFIG_CONVOLUTION_FILTER) += vf_convolution.o | OBJS-$(CONFIG_CONVOLUTION_FILTER) += vf_convolution.o | ||||
OBJS-$(CONFIG_CONVOLVE_FILTER) += vf_convolve.o framesync2.o | |||||
OBJS-$(CONFIG_CONVOLVE_FILTER) += vf_convolve.o framesync.o | |||||
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o | OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o | ||||
OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o | OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o | ||||
OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o | OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o | ||||
@@ -171,7 +171,7 @@ OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o | |||||
OBJS-$(CONFIG_DESPILL_FILTER) += vf_despill.o | OBJS-$(CONFIG_DESPILL_FILTER) += vf_despill.o | ||||
OBJS-$(CONFIG_DETELECINE_FILTER) += vf_detelecine.o | OBJS-$(CONFIG_DETELECINE_FILTER) += vf_detelecine.o | ||||
OBJS-$(CONFIG_DILATION_FILTER) += vf_neighbor.o | OBJS-$(CONFIG_DILATION_FILTER) += vf_neighbor.o | ||||
OBJS-$(CONFIG_DISPLACE_FILTER) += vf_displace.o framesync2.o | |||||
OBJS-$(CONFIG_DISPLACE_FILTER) += vf_displace.o framesync.o | |||||
OBJS-$(CONFIG_DOUBLEWEAVE_FILTER) += vf_weave.o | OBJS-$(CONFIG_DOUBLEWEAVE_FILTER) += vf_weave.o | ||||
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o | OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o | ||||
OBJS-$(CONFIG_DRAWGRAPH_FILTER) += f_drawgraph.o | OBJS-$(CONFIG_DRAWGRAPH_FILTER) += f_drawgraph.o | ||||
@@ -200,19 +200,19 @@ OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o | |||||
OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o | OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o | ||||
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o | OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o | ||||
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o | OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o | ||||
OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o framesync2.o | |||||
OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o framesync.o | |||||
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o | OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o | ||||
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o | OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o | ||||
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o | OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o | ||||
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o | OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o | ||||
OBJS-$(CONFIG_HQX_FILTER) += vf_hqx.o | OBJS-$(CONFIG_HQX_FILTER) += vf_hqx.o | ||||
OBJS-$(CONFIG_HSTACK_FILTER) += vf_stack.o framesync2.o | |||||
OBJS-$(CONFIG_HSTACK_FILTER) += vf_stack.o framesync.o | |||||
OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o | OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o | ||||
OBJS-$(CONFIG_HWDOWNLOAD_FILTER) += vf_hwdownload.o | OBJS-$(CONFIG_HWDOWNLOAD_FILTER) += vf_hwdownload.o | ||||
OBJS-$(CONFIG_HWMAP_FILTER) += vf_hwmap.o | OBJS-$(CONFIG_HWMAP_FILTER) += vf_hwmap.o | ||||
OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER) += vf_hwupload_cuda.o | OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER) += vf_hwupload_cuda.o | ||||
OBJS-$(CONFIG_HWUPLOAD_FILTER) += vf_hwupload.o | OBJS-$(CONFIG_HWUPLOAD_FILTER) += vf_hwupload.o | ||||
OBJS-$(CONFIG_HYSTERESIS_FILTER) += vf_hysteresis.o framesync2.o | |||||
OBJS-$(CONFIG_HYSTERESIS_FILTER) += vf_hysteresis.o framesync.o | |||||
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o | OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o | ||||
OBJS-$(CONFIG_IL_FILTER) += vf_il.o | OBJS-$(CONFIG_IL_FILTER) += vf_il.o | ||||
OBJS-$(CONFIG_INFLATE_FILTER) += vf_neighbor.o | OBJS-$(CONFIG_INFLATE_FILTER) += vf_neighbor.o | ||||
@@ -220,22 +220,22 @@ OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o | |||||
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o | OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o | ||||
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o | OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o | ||||
OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o | OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o | ||||
OBJS-$(CONFIG_LIBVMAF_FILTER) += vf_libvmaf.o framesync2.o | |||||
OBJS-$(CONFIG_LIBVMAF_FILTER) += vf_libvmaf.o framesync.o | |||||
OBJS-$(CONFIG_LIMITER_FILTER) += vf_limiter.o | OBJS-$(CONFIG_LIMITER_FILTER) += vf_limiter.o | ||||
OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o | OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o | ||||
OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o | OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o | ||||
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o | OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o | ||||
OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync2.o | |||||
OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync.o | |||||
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o | OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o | ||||
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o | OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o | ||||
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o | OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o | ||||
OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync2.o | |||||
OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync2.o | |||||
OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o | |||||
OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o | |||||
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o | OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o | ||||
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync2.o | |||||
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o | |||||
OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation.o | OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation.o | ||||
OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o | OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o | ||||
OBJS-$(CONFIG_MIDEQUALIZER_FILTER) += vf_midequalizer.o framesync2.o | |||||
OBJS-$(CONFIG_MIDEQUALIZER_FILTER) += vf_midequalizer.o framesync.o | |||||
OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o | OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o | ||||
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o | OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o | ||||
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o | OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o | ||||
@@ -248,11 +248,11 @@ OBJS-$(CONFIG_OCR_FILTER) += vf_ocr.o | |||||
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o | OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o | ||||
OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o | OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o | ||||
OBJS-$(CONFIG_OSCILLOSCOPE_FILTER) += vf_datascope.o | OBJS-$(CONFIG_OSCILLOSCOPE_FILTER) += vf_datascope.o | ||||
OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o framesync2.o | |||||
OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o framesync.o | |||||
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o | OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o | ||||
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o | OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o | ||||
OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o | OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o | ||||
OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o framesync2.o | |||||
OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o framesync.o | |||||
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o | OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o | ||||
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o | OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o | ||||
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o | OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o | ||||
@@ -260,17 +260,17 @@ OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o | |||||
OBJS-$(CONFIG_PIXSCOPE_FILTER) += vf_datascope.o | OBJS-$(CONFIG_PIXSCOPE_FILTER) += vf_datascope.o | ||||
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o | OBJS-$(CONFIG_PP_FILTER) += vf_pp.o | ||||
OBJS-$(CONFIG_PP7_FILTER) += vf_pp7.o | OBJS-$(CONFIG_PP7_FILTER) += vf_pp7.o | ||||
OBJS-$(CONFIG_PREMULTIPLY_FILTER) += vf_premultiply.o framesync2.o | |||||
OBJS-$(CONFIG_PREMULTIPLY_FILTER) += vf_premultiply.o framesync.o | |||||
OBJS-$(CONFIG_PREWITT_FILTER) += vf_convolution.o | OBJS-$(CONFIG_PREWITT_FILTER) += vf_convolution.o | ||||
OBJS-$(CONFIG_PSEUDOCOLOR_FILTER) += vf_pseudocolor.o | OBJS-$(CONFIG_PSEUDOCOLOR_FILTER) += vf_pseudocolor.o | ||||
OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o framesync2.o | |||||
OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o framesync.o | |||||
OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o | OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o | ||||
OBJS-$(CONFIG_QP_FILTER) += vf_qp.o | OBJS-$(CONFIG_QP_FILTER) += vf_qp.o | ||||
OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o | OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o | ||||
OBJS-$(CONFIG_READEIA608_FILTER) += vf_readeia608.o | OBJS-$(CONFIG_READEIA608_FILTER) += vf_readeia608.o | ||||
OBJS-$(CONFIG_READVITC_FILTER) += vf_readvitc.o | OBJS-$(CONFIG_READVITC_FILTER) += vf_readvitc.o | ||||
OBJS-$(CONFIG_REALTIME_FILTER) += f_realtime.o | OBJS-$(CONFIG_REALTIME_FILTER) += f_realtime.o | ||||
OBJS-$(CONFIG_REMAP_FILTER) += vf_remap.o framesync2.o | |||||
OBJS-$(CONFIG_REMAP_FILTER) += vf_remap.o framesync.o | |||||
OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += vf_removegrain.o | OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += vf_removegrain.o | ||||
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o | OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o | ||||
OBJS-$(CONFIG_REPEATFIELDS_FILTER) += vf_repeatfields.o | OBJS-$(CONFIG_REPEATFIELDS_FILTER) += vf_repeatfields.o | ||||
@@ -304,24 +304,24 @@ OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o | |||||
OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o | OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o | ||||
OBJS-$(CONFIG_SPLIT_FILTER) += split.o | OBJS-$(CONFIG_SPLIT_FILTER) += split.o | ||||
OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o | OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o | ||||
OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync2.o | |||||
OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync.o | |||||
OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o | OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o | ||||
OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync2.o | |||||
OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync.o | |||||
OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o | OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o | ||||
OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o | OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o | ||||
OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o | OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o | ||||
OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o | OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o | ||||
OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o framesync2.o | |||||
OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o framesync.o | |||||
OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o | OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o | ||||
OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o framesync2.o | |||||
OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o framesync.o | |||||
OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o | OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o | ||||
OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o | OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o | ||||
OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o | OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o | ||||
OBJS-$(CONFIG_TLUT2_FILTER) += vf_lut2.o framesync2.o | |||||
OBJS-$(CONFIG_TLUT2_FILTER) += vf_lut2.o framesync.o | |||||
OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o | OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o | ||||
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o | OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o | ||||
OBJS-$(CONFIG_TRIM_FILTER) += trim.o | OBJS-$(CONFIG_TRIM_FILTER) += trim.o | ||||
OBJS-$(CONFIG_UNPREMULTIPLY_FILTER) += vf_premultiply.o framesync2.o | |||||
OBJS-$(CONFIG_UNPREMULTIPLY_FILTER) += vf_premultiply.o framesync.o | |||||
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o | OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o | ||||
OBJS-$(CONFIG_USPP_FILTER) += vf_uspp.o | OBJS-$(CONFIG_USPP_FILTER) += vf_uspp.o | ||||
OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o | OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o | ||||
@@ -330,7 +330,7 @@ OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o | |||||
OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o | OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o | ||||
OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o | OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o | ||||
OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o | OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o | ||||
OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync2.o | |||||
OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o | |||||
OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o | OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o | ||||
OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o | OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o | ||||
OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o | OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o | ||||
@@ -22,7 +22,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "audio.h" | #include "audio.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -56,7 +56,7 @@ static int process_frame(FFFrameSync *fs) | |||||
int i, j, ret = 0; | int i, j, ret = 0; | ||||
for (i = 0; i < ctx->nb_inputs; i++) { | for (i = 0; i < ctx->nb_inputs; i++) { | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -87,7 +87,7 @@ static int process_frame(FFFrameSync *fs) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
StreamSelectContext *s = ctx->priv; | StreamSelectContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static int config_output(AVFilterLink *outlink) | static int config_output(AVFilterLink *outlink) | ||||
@@ -124,7 +124,7 @@ static int config_output(AVFilterLink *outlink) | |||||
if (s->fs.opaque == s) | if (s->fs.opaque == s) | ||||
return 0; | return 0; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, ctx->nb_inputs)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -142,7 +142,7 @@ static int config_output(AVFilterLink *outlink) | |||||
if (!s->frames) | if (!s->frames) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int parse_definition(AVFilterContext *ctx, int nb_pads, int is_input, int is_audio) | static int parse_definition(AVFilterContext *ctx, int nb_pads, int is_input, int is_audio) | ||||
@@ -289,7 +289,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_freep(&s->last_pts); | av_freep(&s->last_pts); | ||||
av_freep(&s->map); | av_freep(&s->map); | ||||
av_freep(&s->frames); | av_freep(&s->frames); | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static int query_formats(AVFilterContext *ctx) | static int query_formats(AVFilterContext *ctx) | ||||
@@ -22,7 +22,7 @@ | |||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "filters.h" | #include "filters.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#define OFFSET(member) offsetof(FFFrameSync, member) | #define OFFSET(member) offsetof(FFFrameSync, member) | ||||
@@ -61,12 +61,12 @@ enum { | |||||
static int consume_from_fifos(FFFrameSync *fs); | static int consume_from_fifos(FFFrameSync *fs); | ||||
const AVClass *framesync2_get_class(void) | |||||
const AVClass *framesync_get_class(void) | |||||
{ | { | ||||
return &framesync_class; | return &framesync_class; | ||||
} | } | ||||
void ff_framesync2_preinit(FFFrameSync *fs) | |||||
void ff_framesync_preinit(FFFrameSync *fs) | |||||
{ | { | ||||
if (fs->class) | if (fs->class) | ||||
return; | return; | ||||
@@ -74,14 +74,14 @@ void ff_framesync2_preinit(FFFrameSync *fs) | |||||
av_opt_set_defaults(fs); | av_opt_set_defaults(fs); | ||||
} | } | ||||
int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) | |||||
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) | |||||
{ | { | ||||
/* For filters with several outputs, we will not be able to assume which | /* For filters with several outputs, we will not be able to assume which | ||||
output is relevant for ff_outlink_frame_wanted() and | output is relevant for ff_outlink_frame_wanted() and | ||||
ff_outlink_set_status(). To be designed when needed. */ | ff_outlink_set_status(). To be designed when needed. */ | ||||
av_assert0(parent->nb_outputs == 1); | av_assert0(parent->nb_outputs == 1); | ||||
ff_framesync2_preinit(fs); | |||||
ff_framesync_preinit(fs); | |||||
fs->parent = parent; | fs->parent = parent; | ||||
fs->nb_in = nb_in; | fs->nb_in = nb_in; | ||||
@@ -114,7 +114,7 @@ static void framesync_sync_level_update(FFFrameSync *fs) | |||||
framesync_eof(fs); | framesync_eof(fs); | ||||
} | } | ||||
int ff_framesync2_configure(FFFrameSync *fs) | |||||
int ff_framesync_configure(FFFrameSync *fs) | |||||
{ | { | ||||
unsigned i; | unsigned i; | ||||
int64_t gcd, lcm; | int64_t gcd, lcm; | ||||
@@ -253,7 +253,7 @@ static void framesync_inject_status(FFFrameSync *fs, unsigned in, int status, in | |||||
fs->in[in].have_next = 1; | fs->in[in].have_next = 1; | ||||
} | } | ||||
int ff_framesync2_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, | |||||
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, | |||||
unsigned get) | unsigned get) | ||||
{ | { | ||||
AVFrame *frame; | AVFrame *frame; | ||||
@@ -290,7 +290,7 @@ int ff_framesync2_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, | |||||
return 0; | return 0; | ||||
} | } | ||||
void ff_framesync2_uninit(FFFrameSync *fs) | |||||
void ff_framesync_uninit(FFFrameSync *fs) | |||||
{ | { | ||||
unsigned i; | unsigned i; | ||||
@@ -341,7 +341,7 @@ static int consume_from_fifos(FFFrameSync *fs) | |||||
return 1; | return 1; | ||||
} | } | ||||
int ff_framesync2_activate(FFFrameSync *fs) | |||||
int ff_framesync_activate(FFFrameSync *fs) | |||||
{ | { | ||||
int ret; | int ret; | ||||
@@ -358,11 +358,11 @@ int ff_framesync2_activate(FFFrameSync *fs) | |||||
return 0; | return 0; | ||||
} | } | ||||
int ff_framesync2_init_dualinput(FFFrameSync *fs, AVFilterContext *parent) | |||||
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent) | |||||
{ | { | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_init(fs, parent, 2); | |||||
ret = ff_framesync_init(fs, parent, 2); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
fs->in[0].time_base = parent->inputs[0]->time_base; | fs->in[0].time_base = parent->inputs[0]->time_base; | ||||
@@ -376,14 +376,14 @@ int ff_framesync2_init_dualinput(FFFrameSync *fs, AVFilterContext *parent) | |||||
return 0; | return 0; | ||||
} | } | ||||
int ff_framesync2_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) | |||||
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) | |||||
{ | { | ||||
AVFilterContext *ctx = fs->parent; | AVFilterContext *ctx = fs->parent; | ||||
AVFrame *mainpic = NULL, *secondpic = NULL; | AVFrame *mainpic = NULL, *secondpic = NULL; | ||||
int ret = 0; | int ret = 0; | ||||
if ((ret = ff_framesync2_get_frame(fs, 0, &mainpic, 1)) < 0 || | |||||
(ret = ff_framesync2_get_frame(fs, 1, &secondpic, 0)) < 0) { | |||||
if ((ret = ff_framesync_get_frame(fs, 0, &mainpic, 1)) < 0 || | |||||
(ret = ff_framesync_get_frame(fs, 1, &secondpic, 0)) < 0) { | |||||
av_frame_free(&mainpic); | av_frame_free(&mainpic); | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -398,11 +398,11 @@ int ff_framesync2_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) | |||||
return 0; | return 0; | ||||
} | } | ||||
int ff_framesync2_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) | |||||
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1) | |||||
{ | { | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_dualinput_get(fs, f0, f1); | |||||
ret = ff_framesync_dualinput_get(fs, f0, f1); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
ret = ff_inlink_make_frame_writable(fs->parent->inputs[0], f0); | ret = ff_inlink_make_frame_writable(fs->parent->inputs[0], f0); |
@@ -18,8 +18,8 @@ | |||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||||
*/ | */ | ||||
#ifndef AVFILTER_FRAMESYNC2_H | |||||
#define AVFILTER_FRAMESYNC2_H | |||||
#ifndef AVFILTER_FRAMESYNC_H | |||||
#define AVFILTER_FRAMESYNC_H | |||||
#include "bufferqueue.h" | #include "bufferqueue.h" | ||||
@@ -47,7 +47,7 @@ enum EOFAction { | |||||
* others can be configured. | * others can be configured. | ||||
* | * | ||||
* The basic working of this API is the following: set the on_event | * The basic working of this API is the following: set the on_event | ||||
* callback, then call ff_framesync2_activate() from the filter's activate | |||||
* callback, then call ff_framesync_activate() from the filter's activate | |||||
* callback. | * callback. | ||||
*/ | */ | ||||
@@ -209,9 +209,9 @@ typedef struct FFFrameSync { | |||||
} FFFrameSync; | } FFFrameSync; | ||||
/** | /** | ||||
* Get the class for the framesync2 object. | |||||
* Get the class for the framesync object. | |||||
*/ | */ | ||||
const AVClass *framesync2_get_class(void); | |||||
const AVClass *framesync_get_class(void); | |||||
/** | /** | ||||
* Pre-initialize a frame sync structure. | * Pre-initialize a frame sync structure. | ||||
@@ -220,7 +220,7 @@ const AVClass *framesync2_get_class(void); | |||||
* The entire structure is expected to be already set to 0. | * The entire structure is expected to be already set to 0. | ||||
* This step is optional, but necessary to use the options. | * This step is optional, but necessary to use the options. | ||||
*/ | */ | ||||
void ff_framesync2_preinit(FFFrameSync *fs); | |||||
void ff_framesync_preinit(FFFrameSync *fs); | |||||
/** | /** | ||||
* Initialize a frame sync structure. | * Initialize a frame sync structure. | ||||
@@ -232,7 +232,7 @@ void ff_framesync2_preinit(FFFrameSync *fs); | |||||
* @param nb_in number of inputs | * @param nb_in number of inputs | ||||
* @return >= 0 for success or a negative error code | * @return >= 0 for success or a negative error code | ||||
*/ | */ | ||||
int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in); | |||||
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in); | |||||
/** | /** | ||||
* Configure a frame sync structure. | * Configure a frame sync structure. | ||||
@@ -241,12 +241,12 @@ int ff_framesync2_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in) | |||||
* | * | ||||
* @return >= 0 for success or a negative error code | * @return >= 0 for success or a negative error code | ||||
*/ | */ | ||||
int ff_framesync2_configure(FFFrameSync *fs); | |||||
int ff_framesync_configure(FFFrameSync *fs); | |||||
/** | /** | ||||
* Free all memory currently allocated. | * Free all memory currently allocated. | ||||
*/ | */ | ||||
void ff_framesync2_uninit(FFFrameSync *fs); | |||||
void ff_framesync_uninit(FFFrameSync *fs); | |||||
/** | /** | ||||
* Get the current frame in an input. | * Get the current frame in an input. | ||||
@@ -258,16 +258,16 @@ void ff_framesync2_uninit(FFFrameSync *fs); | |||||
* the returned frame; the current frame will either be | * the returned frame; the current frame will either be | ||||
* duplicated or removed from the framesync structure | * duplicated or removed from the framesync structure | ||||
*/ | */ | ||||
int ff_framesync2_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, | |||||
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, | |||||
unsigned get); | unsigned get); | ||||
/** | /** | ||||
* Examine the frames in the filter's input and try to produce output. | * Examine the frames in the filter's input and try to produce output. | ||||
* | * | ||||
* This function can be the complete implementation of the activate | * This function can be the complete implementation of the activate | ||||
* method of a filter using framesync2. | |||||
* method of a filter using framesync. | |||||
*/ | */ | ||||
int ff_framesync2_activate(FFFrameSync *fs); | |||||
int ff_framesync_activate(FFFrameSync *fs); | |||||
/** | /** | ||||
* Initialize a frame sync structure for dualinput. | * Initialize a frame sync structure for dualinput. | ||||
@@ -277,35 +277,35 @@ int ff_framesync2_activate(FFFrameSync *fs); | |||||
* the only one with sync set and generic timeline support will just pass it | * the only one with sync set and generic timeline support will just pass it | ||||
* unchanged when disabled. | * unchanged when disabled. | ||||
* | * | ||||
* Equivalent to ff_framesync2_init(fs, parent, 2) then setting the time | |||||
* Equivalent to ff_framesync_init(fs, parent, 2) then setting the time | |||||
* base, sync and ext modes on the inputs. | * base, sync and ext modes on the inputs. | ||||
*/ | */ | ||||
int ff_framesync2_init_dualinput(FFFrameSync *fs, AVFilterContext *parent); | |||||
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent); | |||||
/** | /** | ||||
* @param f0 used to return the main frame | * @param f0 used to return the main frame | ||||
* @param f1 used to return the second frame, or NULL if disabled | * @param f1 used to return the second frame, or NULL if disabled | ||||
* @return >=0 for success or AVERROR code | * @return >=0 for success or AVERROR code | ||||
*/ | */ | ||||
int ff_framesync2_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1); | |||||
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1); | |||||
/** | /** | ||||
* Same as ff_framesync2_dualinput_get(), but make sure that f0 is writable. | |||||
* Same as ff_framesync_dualinput_get(), but make sure that f0 is writable. | |||||
*/ | */ | ||||
int ff_framesync2_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1); | |||||
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1); | |||||
#define FRAMESYNC_DEFINE_CLASS(name, context, field) \ | #define FRAMESYNC_DEFINE_CLASS(name, context, field) \ | ||||
static int name##_framesync_preinit(AVFilterContext *ctx) { \ | static int name##_framesync_preinit(AVFilterContext *ctx) { \ | ||||
context *s = ctx->priv; \ | context *s = ctx->priv; \ | ||||
ff_framesync2_preinit(&s->field); \ | |||||
ff_framesync_preinit(&s->field); \ | |||||
return 0; \ | return 0; \ | ||||
} \ | } \ | ||||
static const AVClass *name##_child_class_next(const AVClass *prev) { \ | static const AVClass *name##_child_class_next(const AVClass *prev) { \ | ||||
return prev ? NULL : framesync2_get_class(); \ | |||||
return prev ? NULL : framesync_get_class(); \ | |||||
} \ | } \ | ||||
static void *name##_child_next(void *obj, void *prev) { \ | static void *name##_child_next(void *obj, void *prev) { \ | ||||
context *s = obj; \ | context *s = obj; \ | ||||
s->fs.class = framesync2_get_class(); /* FIXME */ \ | |||||
s->fs.class = framesync_get_class(); /* FIXME */ \ | |||||
return prev ? NULL : &s->field; \ | return prev ? NULL : &s->field; \ | ||||
} \ | } \ | ||||
static const AVClass name##_class = { \ | static const AVClass name##_class = { \ | ||||
@@ -318,4 +318,4 @@ static const AVClass name##_class = { \ | |||||
.child_next = name##_child_next, \ | .child_next = name##_child_next, \ | ||||
} | } | ||||
#endif /* AVFILTER_FRAMESYNC2_H */ | |||||
#endif /* AVFILTER_FRAMESYNC_H */ |
@@ -22,7 +22,7 @@ | |||||
#define AVFILTER_MASKEDMERGE_H | #define AVFILTER_MASKEDMERGE_H | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
typedef struct MaskedMergeContext { | typedef struct MaskedMergeContext { | ||||
const AVClass *class; | const AVClass *class; | ||||
@@ -25,7 +25,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "bufferqueue.h" | #include "bufferqueue.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
#include "blend.h" | #include "blend.h" | ||||
@@ -411,7 +411,7 @@ static int blend_frame_for_dualinput(FFFrameSync *fs) | |||||
AVFrame *top_buf, *bottom_buf, *dst_buf; | AVFrame *top_buf, *bottom_buf, *dst_buf; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_dualinput_get(fs, &top_buf, &bottom_buf); | |||||
ret = ff_framesync_dualinput_get(fs, &top_buf, &bottom_buf); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!bottom_buf) | if (!bottom_buf) | ||||
@@ -454,7 +454,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
BlendContext *s = ctx->priv; | BlendContext *s = ctx->priv; | ||||
int i; | int i; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_frame_free(&s->prev_frame); | av_frame_free(&s->prev_frame); | ||||
for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) | for (i = 0; i < FF_ARRAY_ELEMS(s->params); i++) | ||||
@@ -554,7 +554,7 @@ static int config_output(AVFilterLink *outlink) | |||||
s->nb_planes = av_pix_fmt_count_planes(toplink->format); | s->nb_planes = av_pix_fmt_count_planes(toplink->format); | ||||
if (!s->tblend) | if (!s->tblend) | ||||
if ((ret = ff_framesync2_init_dualinput(&s->fs, ctx)) < 0) | |||||
if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) | |||||
return ret; | return ret; | ||||
for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { | for (plane = 0; plane < FF_ARRAY_ELEMS(s->params); plane++) { | ||||
@@ -581,7 +581,7 @@ static int config_output(AVFilterLink *outlink) | |||||
} | } | ||||
} | } | ||||
return s->tblend ? 0 : ff_framesync2_configure(&s->fs); | |||||
return s->tblend ? 0 : ff_framesync_configure(&s->fs); | |||||
} | } | ||||
#if CONFIG_BLEND_FILTER | #if CONFIG_BLEND_FILTER | ||||
@@ -589,7 +589,7 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
BlendContext *s = ctx->priv; | BlendContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static const AVFilterPad blend_inputs[] = { | static const AVFilterPad blend_inputs[] = { | ||||
@@ -25,7 +25,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -262,7 +262,7 @@ static int do_convolve(FFFrameSync *fs) | |||||
AVFrame *mainpic = NULL, *impulsepic = NULL; | AVFrame *mainpic = NULL, *impulsepic = NULL; | ||||
int ret, y, x, plane; | int ret, y, x, plane; | ||||
ret = ff_framesync2_dualinput_get(fs, &mainpic, &impulsepic); | |||||
ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!impulsepic) | if (!impulsepic) | ||||
@@ -336,7 +336,7 @@ static int config_output(AVFilterLink *outlink) | |||||
int ret, i; | int ret, i; | ||||
s->fs.on_event = do_convolve; | s->fs.on_event = do_convolve; | ||||
ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&s->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
outlink->w = mainlink->w; | outlink->w = mainlink->w; | ||||
@@ -345,7 +345,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | ||||
outlink->frame_rate = mainlink->frame_rate; | outlink->frame_rate = mainlink->frame_rate; | ||||
if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&s->fs)) < 0) | |||||
return ret; | return ret; | ||||
for (i = 0; i < s->nb_planes; i++) { | for (i = 0; i < s->nb_planes; i++) { | ||||
@@ -361,7 +361,7 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
ConvolveContext *s = ctx->priv; | ConvolveContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -378,7 +378,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
av_fft_end(s->ifft[i]); | av_fft_end(s->ifft[i]); | ||||
} | } | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad convolve_inputs[] = { | static const AVFilterPad convolve_inputs[] = { | ||||
@@ -23,7 +23,7 @@ | |||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -248,9 +248,9 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *in, *xpic, *ypic; | AVFrame *out, *in, *xpic, *ypic; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &xpic, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 2, &ypic, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -336,7 +336,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = srclink->sample_aspect_ratio; | outlink->sample_aspect_ratio = srclink->sample_aspect_ratio; | ||||
outlink->frame_rate = srclink->frame_rate; | outlink->frame_rate = srclink->frame_rate; | ||||
ret = ff_framesync2_init(&s->fs, ctx, 3); | |||||
ret = ff_framesync_init(&s->fs, ctx, 3); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
@@ -356,20 +356,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
DisplaceContext *s = ctx->priv; | DisplaceContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
DisplaceContext *s = ctx->priv; | DisplaceContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad displace_inputs[] = { | static const AVFilterPad displace_inputs[] = { | ||||
@@ -26,7 +26,7 @@ | |||||
#include "formats.h" | #include "formats.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#define OFFSET(x) offsetof(HysteresisContext, x) | #define OFFSET(x) offsetof(HysteresisContext, x) | ||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||||
@@ -94,8 +94,8 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *base, *alt; | AVFrame *out, *base, *alt; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &alt, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &alt, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -324,7 +324,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = base->sample_aspect_ratio; | outlink->sample_aspect_ratio = base->sample_aspect_ratio; | ||||
outlink->frame_rate = base->frame_rate; | outlink->frame_rate = base->frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -339,20 +339,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
HysteresisContext *s = ctx->priv; | HysteresisContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
HysteresisContext *s = ctx->priv; | HysteresisContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_freep(&s->map); | av_freep(&s->map); | ||||
av_freep(&s->xy); | av_freep(&s->xy); | ||||
} | } | ||||
@@ -32,7 +32,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "drawutils.h" | #include "drawutils.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -177,7 +177,7 @@ static int do_vmaf(FFFrameSync *fs) | |||||
AVFrame *main, *ref; | AVFrame *main, *ref; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_dualinput_get(fs, &main, &ref); | |||||
ret = ff_framesync_dualinput_get(fs, &main, &ref); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!ref) | if (!ref) | ||||
@@ -266,7 +266,7 @@ static int config_output(AVFilterLink *outlink) | |||||
AVFilterLink *mainlink = ctx->inputs[0]; | AVFilterLink *mainlink = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&s->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
outlink->w = mainlink->w; | outlink->w = mainlink->w; | ||||
@@ -274,7 +274,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->time_base = mainlink->time_base; | outlink->time_base = mainlink->time_base; | ||||
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | ||||
outlink->frame_rate = mainlink->frame_rate; | outlink->frame_rate = mainlink->frame_rate; | ||||
if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&s->fs)) < 0) | |||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
@@ -283,14 +283,14 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
LIBVMAFContext *s = ctx->priv; | LIBVMAFContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
LIBVMAFContext *s = ctx->priv; | LIBVMAFContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
pthread_mutex_lock(&s->lock); | pthread_mutex_lock(&s->lock); | ||||
s->eof = 1; | s->eof = 1; | ||||
@@ -28,7 +28,7 @@ | |||||
#include "formats.h" | #include "formats.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
static const char *const var_names[] = { | static const char *const var_names[] = { | ||||
"w", ///< width of the input video | "w", ///< width of the input video | ||||
@@ -85,7 +85,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
LUT2Context *s = ctx->priv; | LUT2Context *s = ctx->priv; | ||||
int i; | int i; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_frame_free(&s->prev_frame); | av_frame_free(&s->prev_frame); | ||||
for (i = 0; i < 4; i++) { | for (i = 0; i < 4; i++) { | ||||
@@ -216,8 +216,8 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *srcx = NULL, *srcy = NULL; | AVFrame *out, *srcx = NULL, *srcy = NULL; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &srcx, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &srcy, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled || !srcy) { | if (ctx->is_disabled || !srcy) { | ||||
@@ -327,7 +327,7 @@ static int lut2_config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = srcx->sample_aspect_ratio; | outlink->sample_aspect_ratio = srcx->sample_aspect_ratio; | ||||
outlink->frame_rate = srcx->frame_rate; | outlink->frame_rate = srcx->frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -345,13 +345,13 @@ static int lut2_config_output(AVFilterLink *outlink) | |||||
if ((ret = config_output(outlink)) < 0) | if ((ret = config_output(outlink)) < 0) | ||||
return ret; | return ret; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
LUT2Context *s = ctx->priv; | LUT2Context *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static const AVFilterPad inputs[] = { | static const AVFilterPad inputs[] = { | ||||
@@ -32,7 +32,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "drawutils.h" | #include "drawutils.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -681,13 +681,13 @@ static int config_output(AVFilterLink *outlink) | |||||
LUT3DContext *lut3d = ctx->priv; | LUT3DContext *lut3d = ctx->priv; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_init_dualinput(&lut3d->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&lut3d->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
outlink->w = ctx->inputs[0]->w; | outlink->w = ctx->inputs[0]->w; | ||||
outlink->h = ctx->inputs[0]->h; | outlink->h = ctx->inputs[0]->h; | ||||
outlink->time_base = ctx->inputs[0]->time_base; | outlink->time_base = ctx->inputs[0]->time_base; | ||||
if ((ret = ff_framesync2_configure(&lut3d->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&lut3d->fs)) < 0) | |||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -695,7 +695,7 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
LUT3DContext *s = ctx->priv; | LUT3DContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static int config_clut(AVFilterLink *inlink) | static int config_clut(AVFilterLink *inlink) | ||||
@@ -755,7 +755,7 @@ static int update_apply_clut(FFFrameSync *fs) | |||||
AVFrame *main, *second, *out; | AVFrame *main, *second, *out; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_dualinput_get(fs, &main, &second); | |||||
ret = ff_framesync_dualinput_get(fs, &main, &second); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!second) | if (!second) | ||||
@@ -775,7 +775,7 @@ static av_cold int haldclut_init(AVFilterContext *ctx) | |||||
static av_cold void haldclut_uninit(AVFilterContext *ctx) | static av_cold void haldclut_uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
LUT3DContext *lut3d = ctx->priv; | LUT3DContext *lut3d = ctx->priv; | ||||
ff_framesync2_uninit(&lut3d->fs); | |||||
ff_framesync_uninit(&lut3d->fs); | |||||
} | } | ||||
static const AVOption haldclut_options[] = { | static const AVOption haldclut_options[] = { | ||||
@@ -25,7 +25,7 @@ | |||||
#include "formats.h" | #include "formats.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#define OFFSET(x) offsetof(MaskedClampContext, x) | #define OFFSET(x) offsetof(MaskedClampContext, x) | ||||
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||||
@@ -93,9 +93,9 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *base, *dark, *bright; | AVFrame *out, *base, *dark, *bright; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &dark, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 2, &bright, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &dark, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 2, &bright, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -265,7 +265,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = base->sample_aspect_ratio; | outlink->sample_aspect_ratio = base->sample_aspect_ratio; | ||||
outlink->frame_rate = base->frame_rate; | outlink->frame_rate = base->frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 3)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -284,20 +284,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
MaskedClampContext *s = ctx->priv; | MaskedClampContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
MaskedClampContext *s = ctx->priv; | MaskedClampContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad maskedclamp_inputs[] = { | static const AVFilterPad maskedclamp_inputs[] = { | ||||
@@ -71,9 +71,9 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *base, *overlay, *mask; | AVFrame *out, *base, *overlay, *mask; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &overlay, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 2, &mask, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &overlay, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 2, &mask, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -232,7 +232,7 @@ static int config_output(AVFilterLink *outlink) | |||||
if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, outlink->w)) < 0) | if ((ret = av_image_fill_linesizes(s->linesize, outlink->format, outlink->w)) < 0) | ||||
return ret; | return ret; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 3)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -251,20 +251,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
MaskedMergeContext *s = ctx->priv; | MaskedMergeContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
MaskedMergeContext *s = ctx->priv; | MaskedMergeContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad maskedmerge_inputs[] = { | static const AVFilterPad maskedmerge_inputs[] = { | ||||
@@ -25,7 +25,7 @@ | |||||
#include "libavutil/pixdesc.h" | #include "libavutil/pixdesc.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
typedef struct InputParam { | typedef struct InputParam { | ||||
int depth[4]; | int depth[4]; | ||||
@@ -143,7 +143,7 @@ static int process_frame(FFFrameSync *fs) | |||||
int i, ret; | int i, ret; | ||||
for (i = 0; i < s->nb_inputs; i++) { | for (i = 0; i < s->nb_inputs; i++) { | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -172,7 +172,7 @@ static int config_output(AVFilterLink *outlink) | |||||
FFFrameSyncIn *in; | FFFrameSyncIn *in; | ||||
int i, ret; | int i, ret; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, s->nb_inputs)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -265,7 +265,7 @@ static int config_output(AVFilterLink *outlink) | |||||
} | } | ||||
} | } | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
fail: | fail: | ||||
return AVERROR(EINVAL); | return AVERROR(EINVAL); | ||||
} | } | ||||
@@ -273,7 +273,7 @@ fail: | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
MergePlanesContext *s = ctx->priv; | MergePlanesContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -281,7 +281,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
MergePlanesContext *s = ctx->priv; | MergePlanesContext *s = ctx->priv; | ||||
int i; | int i; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
for (i = 0; i < ctx->nb_inputs; i++) | for (i = 0; i < ctx->nb_inputs; i++) | ||||
av_freep(&ctx->input_pads[i].name); | av_freep(&ctx->input_pads[i].name); | ||||
@@ -25,7 +25,7 @@ | |||||
#include "formats.h" | #include "formats.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
typedef struct MidEqualizerContext { | typedef struct MidEqualizerContext { | ||||
const AVClass *class; | const AVClass *class; | ||||
@@ -89,8 +89,8 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *in0, *in1; | AVFrame *out, *in0, *in1; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &in0, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &in1, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &in0, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &in1, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -311,7 +311,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = in0->sample_aspect_ratio; | outlink->sample_aspect_ratio = in0->sample_aspect_ratio; | ||||
outlink->frame_rate = in0->frame_rate; | outlink->frame_rate = in0->frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -326,20 +326,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
MidEqualizerContext *s = ctx->priv; | MidEqualizerContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
MidEqualizerContext *s = ctx->priv; | MidEqualizerContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_freep(&s->histogram[0]); | av_freep(&s->histogram[0]); | ||||
av_freep(&s->histogram[1]); | av_freep(&s->histogram[1]); | ||||
av_freep(&s->cchange); | av_freep(&s->cchange); | ||||
@@ -37,7 +37,7 @@ | |||||
#include "libavutil/timestamp.h" | #include "libavutil/timestamp.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "drawutils.h" | #include "drawutils.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "video.h" | #include "video.h" | ||||
static const char *const var_names[] = { | static const char *const var_names[] = { | ||||
@@ -130,7 +130,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
{ | { | ||||
OverlayContext *s = ctx->priv; | OverlayContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_expr_free(s->x_pexpr); s->x_pexpr = NULL; | av_expr_free(s->x_pexpr); s->x_pexpr = NULL; | ||||
av_expr_free(s->y_pexpr); s->y_pexpr = NULL; | av_expr_free(s->y_pexpr); s->y_pexpr = NULL; | ||||
} | } | ||||
@@ -377,14 +377,14 @@ static int config_output(AVFilterLink *outlink) | |||||
OverlayContext *s = ctx->priv; | OverlayContext *s = ctx->priv; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_init_dualinput(&s->fs, ctx)) < 0) | |||||
if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0) | |||||
return ret; | return ret; | ||||
outlink->w = ctx->inputs[MAIN]->w; | outlink->w = ctx->inputs[MAIN]->w; | ||||
outlink->h = ctx->inputs[MAIN]->h; | outlink->h = ctx->inputs[MAIN]->h; | ||||
outlink->time_base = ctx->inputs[MAIN]->time_base; | outlink->time_base = ctx->inputs[MAIN]->time_base; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
// divide by 255 and round to nearest | // divide by 255 and round to nearest | ||||
@@ -765,7 +765,7 @@ static int do_blend(FFFrameSync *fs) | |||||
AVFilterLink *inlink = ctx->inputs[0]; | AVFilterLink *inlink = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_dualinput_get_writable(fs, &mainpic, &second); | |||||
ret = ff_framesync_dualinput_get_writable(fs, &mainpic, &second); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!second) | if (!second) | ||||
@@ -808,7 +808,7 @@ static av_cold int init(AVFilterContext *ctx) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
OverlayContext *s = ctx->priv; | OverlayContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
#define OFFSET(x) offsetof(OverlayContext, x) | #define OFFSET(x) offsetof(OverlayContext, x) | ||||
@@ -29,7 +29,7 @@ | |||||
#include "libavutil/qsort.h" | #include "libavutil/qsort.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "filters.h" | #include "filters.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
enum dithering_mode { | enum dithering_mode { | ||||
@@ -904,7 +904,7 @@ static int config_output(AVFilterLink *outlink) | |||||
AVFilterContext *ctx = outlink->src; | AVFilterContext *ctx = outlink->src; | ||||
PaletteUseContext *s = ctx->priv; | PaletteUseContext *s = ctx->priv; | ||||
ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&s->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
s->fs.opt_repeatlast = 1; // only 1 frame in the palette | s->fs.opt_repeatlast = 1; // only 1 frame in the palette | ||||
@@ -915,7 +915,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->h = ctx->inputs[0]->h; | outlink->h = ctx->inputs[0]->h; | ||||
outlink->time_base = ctx->inputs[0]->time_base; | outlink->time_base = ctx->inputs[0]->time_base; | ||||
if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&s->fs)) < 0) | |||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -971,7 +971,7 @@ static int load_apply_palette(FFFrameSync *fs) | |||||
int ret; | int ret; | ||||
// writable for error diffusal dithering | // writable for error diffusal dithering | ||||
ret = ff_framesync2_dualinput_get_writable(fs, &main, &second); | |||||
ret = ff_framesync_dualinput_get_writable(fs, &main, &second); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!main || !second) { | if (!main || !second) { | ||||
@@ -1052,7 +1052,7 @@ static av_cold int init(AVFilterContext *ctx) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
PaletteUseContext *s = ctx->priv; | PaletteUseContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -1060,7 +1060,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
int i; | int i; | ||||
PaletteUseContext *s = ctx->priv; | PaletteUseContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
for (i = 0; i < CACHE_SIZE; i++) | for (i = 0; i < CACHE_SIZE; i++) | ||||
av_freep(&s->cache[i].entries); | av_freep(&s->cache[i].entries); | ||||
av_frame_free(&s->last_in); | av_frame_free(&s->last_in); | ||||
@@ -24,7 +24,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "filters.h" | #include "filters.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -503,8 +503,8 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out = NULL, *base, *alpha; | AVFrame *out = NULL, *base, *alpha; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &alpha, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &base, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &alpha, 0)) < 0) | |||||
return ret; | return ret; | ||||
if ((ret = filter_frame(ctx, &out, base, alpha)) < 0) | if ((ret = filter_frame(ctx, &out, base, alpha)) < 0) | ||||
@@ -578,7 +578,7 @@ static int config_output(AVFilterLink *outlink) | |||||
if (s->inplace) | if (s->inplace) | ||||
return 0; | return 0; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 2)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -593,7 +593,7 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
@@ -623,7 +623,7 @@ static int activate(AVFilterContext *ctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
} else { | } else { | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
} | } | ||||
@@ -668,7 +668,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
PreMultiplyContext *s = ctx->priv; | PreMultiplyContext *s = ctx->priv; | ||||
if (!s->inplace) | if (!s->inplace) | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad premultiply_outputs[] = { | static const AVFilterPad premultiply_outputs[] = { | ||||
@@ -31,7 +31,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "drawutils.h" | #include "drawutils.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "psnr.h" | #include "psnr.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -151,7 +151,7 @@ static int do_psnr(FFFrameSync *fs) | |||||
int ret, j, c; | int ret, j, c; | ||||
AVDictionary **metadata; | AVDictionary **metadata; | ||||
ret = ff_framesync2_dualinput_get(fs, &main, &ref); | |||||
ret = ff_framesync_dualinput_get(fs, &main, &ref); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!ref) | if (!ref) | ||||
@@ -339,7 +339,7 @@ static int config_output(AVFilterLink *outlink) | |||||
AVFilterLink *mainlink = ctx->inputs[0]; | AVFilterLink *mainlink = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&s->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
outlink->w = mainlink->w; | outlink->w = mainlink->w; | ||||
@@ -347,7 +347,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->time_base = mainlink->time_base; | outlink->time_base = mainlink->time_base; | ||||
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | ||||
outlink->frame_rate = mainlink->frame_rate; | outlink->frame_rate = mainlink->frame_rate; | ||||
if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&s->fs)) < 0) | |||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
@@ -356,7 +356,7 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
PSNRContext *s = ctx->priv; | PSNRContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -380,7 +380,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
get_psnr(s->min_mse, 1, s->average_max)); | get_psnr(s->min_mse, 1, s->average_max)); | ||||
} | } | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
if (s->stats_file && s->stats_file != stdout) | if (s->stats_file && s->stats_file != stdout) | ||||
fclose(s->stats_file); | fclose(s->stats_file); | ||||
@@ -41,7 +41,7 @@ | |||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -286,9 +286,9 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *in, *xpic, *ypic; | AVFrame *out, *in, *xpic, *ypic; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &xpic, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 2, &ypic, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &xpic, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 2, &ypic, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -333,7 +333,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = srclink->sample_aspect_ratio; | outlink->sample_aspect_ratio = srclink->sample_aspect_ratio; | ||||
outlink->frame_rate = srclink->frame_rate; | outlink->frame_rate = srclink->frame_rate; | ||||
ret = ff_framesync2_init(&s->fs, ctx, 3); | |||||
ret = ff_framesync_init(&s->fs, ctx, 3); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
@@ -353,13 +353,13 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
RemapContext *s = ctx->priv; | RemapContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
@@ -367,7 +367,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
{ | { | ||||
RemapContext *s = ctx->priv; | RemapContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad remap_inputs[] = { | static const AVFilterPad remap_inputs[] = { | ||||
@@ -40,7 +40,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "drawutils.h" | #include "drawutils.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "ssim.h" | #include "ssim.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -291,7 +291,7 @@ static int do_ssim(FFFrameSync *fs) | |||||
float c[4], ssimv = 0.0; | float c[4], ssimv = 0.0; | ||||
int ret, i; | int ret, i; | ||||
ret = ff_framesync2_dualinput_get(fs, &main, &ref); | |||||
ret = ff_framesync_dualinput_get(fs, &main, &ref); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (!ref) | if (!ref) | ||||
@@ -431,7 +431,7 @@ static int config_output(AVFilterLink *outlink) | |||||
AVFilterLink *mainlink = ctx->inputs[0]; | AVFilterLink *mainlink = ctx->inputs[0]; | ||||
int ret; | int ret; | ||||
ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |||||
ret = ff_framesync_init_dualinput(&s->fs, ctx); | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
outlink->w = mainlink->w; | outlink->w = mainlink->w; | ||||
@@ -440,7 +440,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | ||||
outlink->frame_rate = mainlink->frame_rate; | outlink->frame_rate = mainlink->frame_rate; | ||||
if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |||||
if ((ret = ff_framesync_configure(&s->fs)) < 0) | |||||
return ret; | return ret; | ||||
return 0; | return 0; | ||||
@@ -449,7 +449,7 @@ static int config_output(AVFilterLink *outlink) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
SSIMContext *s = ctx->priv; | SSIMContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -469,7 +469,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
s->ssim_total / s->nb_frames, ssim_db(s->ssim_total, s->nb_frames)); | s->ssim_total / s->nb_frames, ssim_db(s->ssim_total, s->nb_frames)); | ||||
} | } | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
if (s->stats_file && s->stats_file != stdout) | if (s->stats_file && s->stats_file != stdout) | ||||
fclose(s->stats_file); | fclose(s->stats_file); | ||||
@@ -26,7 +26,7 @@ | |||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "formats.h" | #include "formats.h" | ||||
#include "internal.h" | #include "internal.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "video.h" | #include "video.h" | ||||
typedef struct StackContext { | typedef struct StackContext { | ||||
@@ -97,7 +97,7 @@ static int process_frame(FFFrameSync *fs) | |||||
int i, p, ret, offset[4] = { 0 }; | int i, p, ret, offset[4] = { 0 }; | ||||
for (i = 0; i < s->nb_inputs; i++) { | for (i = 0; i < s->nb_inputs; i++) { | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -180,7 +180,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->time_base = time_base; | outlink->time_base = time_base; | ||||
outlink->frame_rate = frame_rate; | outlink->frame_rate = frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, s->nb_inputs)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -196,7 +196,7 @@ static int config_output(AVFilterLink *outlink) | |||||
in[i].after = s->shortest ? EXT_STOP : EXT_INFINITY; | in[i].after = s->shortest ? EXT_STOP : EXT_INFINITY; | ||||
} | } | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
@@ -204,7 +204,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
StackContext *s = ctx->priv; | StackContext *s = ctx->priv; | ||||
int i; | int i; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
av_freep(&s->frames); | av_freep(&s->frames); | ||||
for (i = 0; i < ctx->nb_inputs; i++) | for (i = 0; i < ctx->nb_inputs; i++) | ||||
@@ -214,7 +214,7 @@ static av_cold void uninit(AVFilterContext *ctx) | |||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
StackContext *s = ctx->priv; | StackContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
#define OFFSET(x) offsetof(StackContext, x) | #define OFFSET(x) offsetof(StackContext, x) | ||||
@@ -28,7 +28,7 @@ | |||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "libavutil/pixdesc.h" | #include "libavutil/pixdesc.h" | ||||
#include "avfilter.h" | #include "avfilter.h" | ||||
#include "framesync2.h" | |||||
#include "framesync.h" | |||||
#include "internal.h" | #include "internal.h" | ||||
#include "video.h" | #include "video.h" | ||||
@@ -96,10 +96,10 @@ static int process_frame(FFFrameSync *fs) | |||||
AVFrame *out, *in, *threshold, *min, *max; | AVFrame *out, *in, *threshold, *min, *max; | ||||
int ret; | int ret; | ||||
if ((ret = ff_framesync2_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 1, &threshold, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 2, &min, 0)) < 0 || | |||||
(ret = ff_framesync2_get_frame(&s->fs, 3, &max, 0)) < 0) | |||||
if ((ret = ff_framesync_get_frame(&s->fs, 0, &in, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 2, &min, 0)) < 0 || | |||||
(ret = ff_framesync_get_frame(&s->fs, 3, &max, 0)) < 0) | |||||
return ret; | return ret; | ||||
if (ctx->is_disabled) { | if (ctx->is_disabled) { | ||||
@@ -256,7 +256,7 @@ static int config_output(AVFilterLink *outlink) | |||||
outlink->sample_aspect_ratio = base->sample_aspect_ratio; | outlink->sample_aspect_ratio = base->sample_aspect_ratio; | ||||
outlink->frame_rate = base->frame_rate; | outlink->frame_rate = base->frame_rate; | ||||
if ((ret = ff_framesync2_init(&s->fs, ctx, 4)) < 0) | |||||
if ((ret = ff_framesync_init(&s->fs, ctx, 4)) < 0) | |||||
return ret; | return ret; | ||||
in = s->fs.in; | in = s->fs.in; | ||||
@@ -279,20 +279,20 @@ static int config_output(AVFilterLink *outlink) | |||||
s->fs.opaque = s; | s->fs.opaque = s; | ||||
s->fs.on_event = process_frame; | s->fs.on_event = process_frame; | ||||
return ff_framesync2_configure(&s->fs); | |||||
return ff_framesync_configure(&s->fs); | |||||
} | } | ||||
static int activate(AVFilterContext *ctx) | static int activate(AVFilterContext *ctx) | ||||
{ | { | ||||
ThresholdContext *s = ctx->priv; | ThresholdContext *s = ctx->priv; | ||||
return ff_framesync2_activate(&s->fs); | |||||
return ff_framesync_activate(&s->fs); | |||||
} | } | ||||
static av_cold void uninit(AVFilterContext *ctx) | static av_cold void uninit(AVFilterContext *ctx) | ||||
{ | { | ||||
ThresholdContext *s = ctx->priv; | ThresholdContext *s = ctx->priv; | ||||
ff_framesync2_uninit(&s->fs); | |||||
ff_framesync_uninit(&s->fs); | |||||
} | } | ||||
static const AVFilterPad inputs[] = { | static const AVFilterPad inputs[] = { | ||||