| @@ -31,13 +31,13 @@ | |||
| #include "vidstabutils.h" | |||
| typedef struct { | |||
| const AVClass* class; | |||
| const AVClass *class; | |||
| VSMotionDetect md; | |||
| VSMotionDetectConfig conf; | |||
| char* result; | |||
| FILE* f; | |||
| char *result; | |||
| FILE *f; | |||
| } StabData; | |||
| @@ -45,24 +45,24 @@ typedef struct { | |||
| #define OFFSETC(x) (offsetof(StabData, conf)+offsetof(VSMotionDetectConfig, x)) | |||
| #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | |||
| static const AVOption vidstabdetect_options[]= { | |||
| {"result", "path to the file used to write the transforms (def:transforms.trf)", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}}, | |||
| static const AVOption vidstabdetect_options[] = { | |||
| {"result", "path to the file used to write the transforms (def:transforms.trf)", OFFSET(result), AV_OPT_TYPE_STRING, {.str = DEFAULT_RESULT_NAME}}, | |||
| {"shakiness", "how shaky is the video and how quick is the camera?" | |||
| " 1: little (fast) 10: very strong/quick (slow) (def: 5)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS}, | |||
| {"accuracy", "(>=shakiness) 1: low 15: high (slow) (def: 9)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 9 }, 1, 15, FLAGS}, | |||
| {"stepsize", "region around minimum is scanned with 1 pixel resolution (def: 6)", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS}, | |||
| {"mincontrast", "below this contrast a field is discarded (0-1) (def: 0.3)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS}, | |||
| {"show", "0: draw nothing (def); 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS}, | |||
| " 1: little (fast) 10: very strong/quick (slow) (def: 5)", OFFSETC(shakiness), AV_OPT_TYPE_INT, {.i64 = 5}, 1, 10, FLAGS}, | |||
| {"accuracy", "(>=shakiness) 1: low 15: high (slow) (def: 9)", OFFSETC(accuracy), AV_OPT_TYPE_INT, {.i64 = 9}, 1, 15, FLAGS}, | |||
| {"stepsize", "region around minimum is scanned with 1 pixel resolution (def: 6)", OFFSETC(stepSize), AV_OPT_TYPE_INT, {.i64 = 6}, 1, 32, FLAGS}, | |||
| {"mincontrast", "below this contrast a field is discarded (0-1) (def: 0.3)", OFFSETC(contrastThreshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.25}, 0.0, 1.0, FLAGS}, | |||
| {"show", "0: draw nothing (def); 1,2: show fields and transforms", OFFSETC(show), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 2, FLAGS}, | |||
| {"tripod", "virtual tripod mode (if >0): motion is compared to a reference" | |||
| " reference frame (frame # is the value) (def: 0)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS}, | |||
| {NULL}, | |||
| " reference frame (frame # is the value) (def: 0)", OFFSETC(virtualTripod), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS}, | |||
| {NULL} | |||
| }; | |||
| AVFILTER_DEFINE_CLASS(vidstabdetect); | |||
| static av_cold int init(AVFilterContext *ctx) | |||
| { | |||
| StabData* sd = ctx->priv; | |||
| StabData *sd = ctx->priv; | |||
| vs_set_mem_and_log_functions(); | |||
| sd->class = &vidstabdetect_class; | |||
| av_log(ctx, AV_LOG_VERBOSE, "vidstabdetect filter: init %s\n", LIBVIDSTAB_VERSION); | |||
| @@ -72,7 +72,7 @@ static av_cold int init(AVFilterContext *ctx) | |||
| static av_cold void uninit(AVFilterContext *ctx) | |||
| { | |||
| StabData *sd = ctx->priv; | |||
| VSMotionDetect* md = &(sd->md); | |||
| VSMotionDetect *md = &(sd->md); | |||
| if (sd->f) { | |||
| fclose(sd->f); | |||
| @@ -80,7 +80,6 @@ static av_cold void uninit(AVFilterContext *ctx) | |||
| } | |||
| vsMotionDetectionCleanup(md); | |||
| } | |||
| static int query_formats(AVFilterContext *ctx) | |||
| @@ -98,7 +97,6 @@ static int query_formats(AVFilterContext *ctx) | |||
| return 0; | |||
| } | |||
| static int config_input(AVFilterLink *inlink) | |||
| { | |||
| AVFilterContext *ctx = inlink->dst; | |||
| @@ -108,25 +106,25 @@ static int config_input(AVFilterLink *inlink) | |||
| VSFrameInfo fi; | |||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||
| vsFrameInfoInit(&fi,inlink->w, inlink->h, av_2_vs_pixel_format(ctx, inlink->format)); | |||
| if(fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8){ | |||
| vsFrameInfoInit(&fi, inlink->w, inlink->h, av_2_vs_pixel_format(ctx, inlink->format)); | |||
| if (fi.bytesPerPixel != av_get_bits_per_pixel(desc)/8) { | |||
| av_log(ctx, AV_LOG_ERROR, "pixel-format error: wrong bits/per/pixel, please report a BUG"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| if(fi.log2ChromaW != desc->log2_chroma_w){ | |||
| if (fi.log2ChromaW != desc->log2_chroma_w) { | |||
| av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_w, please report a BUG"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| if(fi.log2ChromaH != desc->log2_chroma_h){ | |||
| if (fi.log2ChromaH != desc->log2_chroma_h) { | |||
| av_log(ctx, AV_LOG_ERROR, "pixel-format error: log2_chroma_h, please report a BUG"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| // set values that are not initializes by the options | |||
| // set values that are not initialized by the options | |||
| sd->conf.algo = 1; | |||
| sd->conf.modName = "vidstabdetect"; | |||
| if(vsMotionDetectInit(md, &sd->conf, &fi) != VS_OK){ | |||
| if (vsMotionDetectInit(md, &sd->conf, &fi) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "initialization of Motion Detection failed, please report a BUG"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| @@ -144,8 +142,8 @@ static int config_input(AVFilterLink *inlink) | |||
| if (sd->f == NULL) { | |||
| av_log(ctx, AV_LOG_ERROR, "cannot open transform file %s\n", sd->result); | |||
| return AVERROR(EINVAL); | |||
| }else{ | |||
| if(vsPrepareFile(md, sd->f) != VS_OK){ | |||
| } else { | |||
| if (vsPrepareFile(md, sd->f) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "cannot write to transform file %s\n", sd->result); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| @@ -158,7 +156,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||
| { | |||
| AVFilterContext *ctx = inlink->dst; | |||
| StabData *sd = ctx->priv; | |||
| VSMotionDetect* md = &(sd->md); | |||
| VSMotionDetect *md = &(sd->md); | |||
| LocalMotions localmotions; | |||
| AVFilterLink *outlink = inlink->dst->outputs[0]; | |||
| @@ -179,21 +177,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||
| av_frame_copy_props(out, in); | |||
| } | |||
| for(plane=0; plane < md->fi.planes; plane++){ | |||
| for (plane = 0; plane < md->fi.planes; plane++) { | |||
| frame.data[plane] = in->data[plane]; | |||
| frame.linesize[plane] = in->linesize[plane]; | |||
| } | |||
| if(vsMotionDetection(md, &localmotions, &frame) != VS_OK){ | |||
| if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "motion detection failed"); | |||
| return AVERROR(AVERROR_EXTERNAL); | |||
| } else { | |||
| if(vsWriteToFile(md, sd->f, &localmotions) != VS_OK){ | |||
| if (vsWriteToFile(md, sd->f, &localmotions) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "cannot write to transform file"); | |||
| return AVERROR(errno); | |||
| } | |||
| vs_vector_del(&localmotions); | |||
| } | |||
| if(sd->conf.show>0 && !direct){ | |||
| if (sd->conf.show > 0 && !direct) { | |||
| av_image_copy(out->data, out->linesize, | |||
| (void*)in->data, in->linesize, | |||
| in->format, in->width, in->height); | |||
| @@ -31,13 +31,13 @@ | |||
| #include "vidstabutils.h" | |||
| typedef struct { | |||
| const AVClass* class; | |||
| const AVClass *class; | |||
| VSTransformData td; | |||
| VSTransformConfig conf; | |||
| VSTransformations trans; // transformations | |||
| char* input; // name of transform file | |||
| VSTransformations trans; // transformations | |||
| char *input; // name of transform file | |||
| int tripod; | |||
| } TransformContext; | |||
| @@ -45,7 +45,7 @@ typedef struct { | |||
| #define OFFSETC(x) (offsetof(TransformContext, conf)+offsetof(VSTransformConfig, x)) | |||
| #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | |||
| static const AVOption vidstabtransform_options[]= { | |||
| static const AVOption vidstabtransform_options[] = { | |||
| {"input", "path to the file storing the transforms (def:transforms.trf)", OFFSET(input), | |||
| AV_OPT_TYPE_STRING, {.str = DEFAULT_INPUT_NAME} }, | |||
| {"smoothing", "number of frames*2 + 1 used for lowpass filtering (def: 10)", OFFSETC(smoothing), | |||
| @@ -62,7 +62,7 @@ static const AVOption vidstabtransform_options[]= { | |||
| AV_OPT_TYPE_CONST, {.i64 = VSCropBorder }, 0, 0, FLAGS, "crop"}, | |||
| {"invert", "1: invert transforms (def: 0)", OFFSETC(invert), | |||
| AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, | |||
| {"relative", "consider transforms as 0: abslute, 1: relative (def)", OFFSETC(relative), | |||
| {"relative", "consider transforms as 0: absolute, 1: relative (def)", OFFSETC(relative), | |||
| AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS}, | |||
| {"zoom", "percentage to zoom >0: zoom in, <0 zoom out (def: 0)", OFFSETC(zoom), | |||
| AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, 100, FLAGS}, | |||
| @@ -80,14 +80,14 @@ static const AVOption vidstabtransform_options[]= { | |||
| AV_OPT_TYPE_CONST, {.i64 = VS_BiCubic },0, 0, FLAGS, "interpol"}, | |||
| {"tripod", "if 1: virtual tripod mode (equiv. to relative=0:smoothing=0)", OFFSET(tripod), | |||
| AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS}, | |||
| {NULL}, | |||
| {NULL} | |||
| }; | |||
| AVFILTER_DEFINE_CLASS(vidstabtransform); | |||
| static av_cold int init(AVFilterContext *ctx) | |||
| { | |||
| TransformContext* tc = ctx->priv; | |||
| TransformContext *tc = ctx->priv; | |||
| vs_set_mem_and_log_functions(); | |||
| tc->class = &vidstabtransform_class; | |||
| av_log(ctx, AV_LOG_VERBOSE, "vidstabtransform filter: init %s\n", LIBVIDSTAB_VERSION); | |||
| @@ -122,27 +122,27 @@ static int config_input(AVFilterLink *inlink) | |||
| { | |||
| AVFilterContext *ctx = inlink->dst; | |||
| TransformContext *tc = ctx->priv; | |||
| FILE* f; | |||
| FILE *f; | |||
| const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |||
| VSTransformData* td = &(tc->td); | |||
| VSTransformData *td = &(tc->td); | |||
| VSFrameInfo fi_src; | |||
| VSFrameInfo fi_dest; | |||
| if(!vsFrameInfoInit(&fi_src, inlink->w, inlink->h, | |||
| av_2_vs_pixel_format(ctx,inlink->format)) || | |||
| !vsFrameInfoInit(&fi_dest, inlink->w, inlink->h, | |||
| av_2_vs_pixel_format(ctx, inlink->format))){ | |||
| if (!vsFrameInfoInit(&fi_src, inlink->w, inlink->h, | |||
| av_2_vs_pixel_format(ctx, inlink->format)) || | |||
| !vsFrameInfoInit(&fi_dest, inlink->w, inlink->h, | |||
| av_2_vs_pixel_format(ctx, inlink->format))) { | |||
| av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)", | |||
| inlink->format, desc->name); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| if(fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 || | |||
| fi_src.log2ChromaW != desc->log2_chroma_w || | |||
| fi_src.log2ChromaH != desc->log2_chroma_h){ | |||
| if (fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 || | |||
| fi_src.log2ChromaW != desc->log2_chroma_w || | |||
| fi_src.log2ChromaH != desc->log2_chroma_h) { | |||
| av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ", | |||
| fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8); | |||
| av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n", | |||
| @@ -154,18 +154,18 @@ static int config_input(AVFilterLink *inlink) | |||
| // set values that are not initializes by the options | |||
| tc->conf.modName = "vidstabtransform"; | |||
| tc->conf.verbose =1; | |||
| if(tc->tripod){ | |||
| if (tc->tripod) { | |||
| av_log(ctx, AV_LOG_INFO, "Virtual tripod mode: relative=0, smoothing=0"); | |||
| tc->conf.relative=0; | |||
| tc->conf.smoothing=0; | |||
| tc->conf.relative = 0; | |||
| tc->conf.smoothing = 0; | |||
| } | |||
| if(vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK){ | |||
| if (vsTransformDataInit(td, &tc->conf, &fi_src, &fi_dest) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "initialization of vid.stab transform failed, please report a BUG\n"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| vsTransformGetConfig(&tc->conf,td); | |||
| vsTransformGetConfig(&tc->conf, td); | |||
| av_log(ctx, AV_LOG_INFO, "Video transformation/stabilization settings (pass 2/2):\n"); | |||
| av_log(ctx, AV_LOG_INFO, " input = %s\n", tc->input); | |||
| av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", tc->conf.smoothing); | |||
| @@ -184,13 +184,13 @@ static int config_input(AVFilterLink *inlink) | |||
| return AVERROR(errno); | |||
| } else { | |||
| VSManyLocalMotions mlms; | |||
| if(vsReadLocalMotionsFile(f,&mlms)==VS_OK){ | |||
| // calculate the actual transforms from the localmotions | |||
| if(vsLocalmotions2TransformsSimple(td, &mlms,&tc->trans)!=VS_OK){ | |||
| if (vsReadLocalMotionsFile(f, &mlms) == VS_OK) { | |||
| // calculate the actual transforms from the local motions | |||
| if (vsLocalmotions2TransformsSimple(td, &mlms, &tc->trans) != VS_OK) { | |||
| av_log(ctx, AV_LOG_ERROR, "calculating transformations failed\n"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| }else{ // try to read old format | |||
| } else { // try to read old format | |||
| if (!vsReadOldTransforms(td, f, &tc->trans)) { /* read input file */ | |||
| av_log(ctx, AV_LOG_ERROR, "error parsing input file %s\n", tc->input); | |||
| return AVERROR(EINVAL); | |||
| @@ -199,7 +199,7 @@ static int config_input(AVFilterLink *inlink) | |||
| } | |||
| fclose(f); | |||
| if (vsPreprocessTransforms(td, &tc->trans)!= VS_OK ) { | |||
| if (vsPreprocessTransforms(td, &tc->trans) != VS_OK ) { | |||
| av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| @@ -209,7 +209,7 @@ static int config_input(AVFilterLink *inlink) | |||
| } | |||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||
| static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||
| { | |||
| AVFilterContext *ctx = inlink->dst; | |||
| TransformContext *tc = ctx->priv; | |||
| @@ -233,15 +233,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||
| av_frame_copy_props(out, in); | |||
| } | |||
| for(plane=0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++){ | |||
| for (plane = 0; plane < vsTransformGetSrcFrameInfo(td)->planes; plane++) { | |||
| inframe.data[plane] = in->data[plane]; | |||
| inframe.linesize[plane] = in->linesize[plane]; | |||
| } | |||
| if(out == in){ // inplace | |||
| if (out == in) { // inplace | |||
| vsTransformPrepare(td, &inframe, &inframe); | |||
| }else{ // seperate frames | |||
| } else { // separate frames | |||
| VSFrame outframe; | |||
| for(plane=0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++){ | |||
| for (plane = 0; plane < vsTransformGetDestFrameInfo(td)->planes; plane++) { | |||
| outframe.data[plane] = out->data[plane]; | |||
| outframe.linesize[plane] = out->linesize[plane]; | |||
| } | |||
| @@ -289,6 +289,4 @@ AVFilter avfilter_vf_vidstabtransform = { | |||
| .inputs = avfilter_vf_vidstabtransform_inputs, | |||
| .outputs = avfilter_vf_vidstabtransform_outputs, | |||
| .priv_class = &vidstabtransform_class, | |||
| }; | |||
| @@ -20,10 +20,10 @@ | |||
| #include "vidstabutils.h" | |||
| /** convert AV's pixelformat to vid.stab pixelformat */ | |||
| VSPixelFormat av_2_vs_pixel_format(AVFilterContext *ctx, enum AVPixelFormat pf){ | |||
| switch(pf){ | |||
| VSPixelFormat av_2_vs_pixel_format(AVFilterContext *ctx, enum AVPixelFormat pf) | |||
| { | |||
| switch (pf) { | |||
| case AV_PIX_FMT_YUV420P: return PF_YUV420P; | |||
| case AV_PIX_FMT_YUV422P: return PF_YUV422P; | |||
| case AV_PIX_FMT_YUV444P: return PF_YUV444P; | |||
| @@ -41,14 +41,14 @@ VSPixelFormat av_2_vs_pixel_format(AVFilterContext *ctx, enum AVPixelFormat pf){ | |||
| } | |||
| } | |||
| /** struct to hold a valid context for logging from within vid.stab lib */ | |||
| typedef struct { | |||
| const AVClass* class; | |||
| const AVClass *class; | |||
| } VS2AVLogCtx; | |||
| /** wrapper to log vs_log into av_log */ | |||
| static int vs_2_av_log_wrapper(int type, const char* tag, const char* format, ...){ | |||
| static int vs_2_av_log_wrapper(int type, const char *tag, const char *format, ...) | |||
| { | |||
| va_list ap; | |||
| VS2AVLogCtx ctx; | |||
| AVClass class = { | |||
| @@ -59,14 +59,15 @@ static int vs_2_av_log_wrapper(int type, const char* tag, const char* format, .. | |||
| .category = AV_CLASS_CATEGORY_FILTER, | |||
| }; | |||
| ctx.class = &class; | |||
| va_start (ap, format); | |||
| va_start(ap, format); | |||
| av_vlog(&ctx, type, format, ap); | |||
| va_end (ap); | |||
| va_end(ap); | |||
| return VS_OK; | |||
| } | |||
| /** sets the memory allocation function and logging constants to av versions */ | |||
| void vs_set_mem_and_log_functions(void){ | |||
| void vs_set_mem_and_log_functions(void) | |||
| { | |||
| vs_malloc = av_malloc; | |||
| vs_zalloc = av_mallocz; | |||
| vs_realloc = av_realloc; | |||