currently, the model outputs the rain, and so need a subtraction in filter c code to get the final derain result. I've sent a PR to update the model file and accepted, see at https://github.com/XueweiMeng/derain_filter/pull/3 Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Steven Liu <lq@chinaffmpeg.org>tags/n4.3
@@ -8878,6 +8878,7 @@ delogo=x=0:y=0:w=100:h=77:band=10 | |||||
@end itemize | @end itemize | ||||
@anchor{derain} | |||||
@section derain | @section derain | ||||
Remove the rain in the input image/video by applying the derain methods based on | Remove the rain in the input image/video by applying the derain methods based on | ||||
@@ -8932,6 +8933,8 @@ Note that different backends use different file formats. TensorFlow and native | |||||
backend can load files for only its format. | backend can load files for only its format. | ||||
@end table | @end table | ||||
It can also be finished with @ref{dnn_processing} filter. | |||||
@section deshake | @section deshake | ||||
Attempt to fix small changes in horizontal and/or vertical shift. This | Attempt to fix small changes in horizontal and/or vertical shift. This | ||||
@@ -9201,9 +9204,9 @@ Set the output name of the dnn network. | |||||
@itemize | @itemize | ||||
@item | @item | ||||
Halve the red channle of the frame with format rgb24: | |||||
Remove rain in rgb24 frame with can.pb (see @ref{derain} filter): | |||||
@example | @example | ||||
ffmpeg -i input.jpg -vf format=rgb24,dnn_processing=model=halve_first_channel.model:input=dnn_in:output=dnn_out:dnn_backend=native out.native.png | |||||
./ffmpeg -i rain.jpg -vf format=rgb24,dnn_processing=dnn_backend=tensorflow:model=can.pb:input=x:output=y derain.jpg | |||||
@end example | @end example | ||||
@item | @item | ||||
@@ -100,7 +100,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
AVFilterLink *outlink = ctx->outputs[0]; | AVFilterLink *outlink = ctx->outputs[0]; | ||||
DRContext *dr_context = ctx->priv; | DRContext *dr_context = ctx->priv; | ||||
DNNReturnType dnn_result; | DNNReturnType dnn_result; | ||||
int pad_size; | |||||
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | ||||
if (!out) { | if (!out) { | ||||
@@ -129,15 +128,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) | |||||
out->width = dr_context->output.width; | out->width = dr_context->output.width; | ||||
outlink->h = dr_context->output.height; | outlink->h = dr_context->output.height; | ||||
outlink->w = dr_context->output.width; | outlink->w = dr_context->output.width; | ||||
pad_size = (in->height - out->height) >> 1; | |||||
for (int i = 0; i < out->height; i++){ | for (int i = 0; i < out->height; i++){ | ||||
for(int j = 0; j < out->width * 3; j++){ | for(int j = 0; j < out->width * 3; j++){ | ||||
int k = i * out->linesize[0] + j; | int k = i * out->linesize[0] + j; | ||||
int t = i * out->width * 3 + j; | int t = i * out->width * 3 + j; | ||||
int t_in = (i + pad_size) * in->width * 3 + j + pad_size * 3; | |||||
out->data[0][k] = CLIP((int)((((float *)dr_context->input.data)[t_in] - ((float *)dr_context->output.data)[t]) * 255), 0, 255); | |||||
out->data[0][k] = CLIP((int)((((float *)dr_context->output.data)[t]) * 255), 0, 255); | |||||
} | } | ||||
} | } | ||||