This is a cuda implementation of yadif, which gives us a way to
do deinterlacing when using the nvdec hwaccel. In that scenario
we don't have access to the nvidia deinterlacer.
(cherry picked from commit d5272e94ab)
tags/n4.1
| @@ -42,6 +42,7 @@ version 4.1: | |||
| - xstack filter | |||
| - pcm vidc decoder and encoder | |||
| - (a)graphmonitor filter | |||
| - yadif_cuda filter | |||
| version 4.0: | |||
| @@ -3481,6 +3481,7 @@ zscale_filter_deps="libzimg const_nan" | |||
| scale_vaapi_filter_deps="vaapi" | |||
| vpp_qsv_filter_deps="libmfx" | |||
| vpp_qsv_filter_select="qsvvpp" | |||
| yadif_cuda_filter_deps="cuda_sdk" | |||
| # examples | |||
| avio_dir_cmd_deps="avformat avutil" | |||
| @@ -17943,6 +17943,64 @@ filter"). | |||
| It accepts the following parameters: | |||
| @table @option | |||
| @item mode | |||
| The interlacing mode to adopt. It accepts one of the following values: | |||
| @table @option | |||
| @item 0, send_frame | |||
| Output one frame for each frame. | |||
| @item 1, send_field | |||
| Output one frame for each field. | |||
| @item 2, send_frame_nospatial | |||
| Like @code{send_frame}, but it skips the spatial interlacing check. | |||
| @item 3, send_field_nospatial | |||
| Like @code{send_field}, but it skips the spatial interlacing check. | |||
| @end table | |||
| The default value is @code{send_frame}. | |||
| @item parity | |||
| The picture field parity assumed for the input interlaced video. It accepts one | |||
| of the following values: | |||
| @table @option | |||
| @item 0, tff | |||
| Assume the top field is first. | |||
| @item 1, bff | |||
| Assume the bottom field is first. | |||
| @item -1, auto | |||
| Enable automatic detection of field parity. | |||
| @end table | |||
| The default value is @code{auto}. | |||
| If the interlacing is unknown or the decoder does not export this information, | |||
| top field first will be assumed. | |||
| @item deint | |||
| Specify which frames to deinterlace. Accept one of the following | |||
| values: | |||
| @table @option | |||
| @item 0, all | |||
| Deinterlace all frames. | |||
| @item 1, interlaced | |||
| Only deinterlace frames marked as interlaced. | |||
| @end table | |||
| The default value is @code{all}. | |||
| @end table | |||
| @section yadif_cuda | |||
| Deinterlace the input video using the @ref{yadif} algorithm, but implemented | |||
| in CUDA so that it can work as part of a GPU accelerated pipeline with nvdec | |||
| and/or nvenc. | |||
| It accepts the following parameters: | |||
| @table @option | |||
| @item mode | |||
| @@ -408,6 +408,7 @@ OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o | |||
| OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o | |||
| OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o | |||
| OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o | |||
| OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o yadif_common.o | |||
| OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o | |||
| OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o | |||
| OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o | |||
| @@ -389,6 +389,7 @@ extern AVFilter ff_vf_weave; | |||
| extern AVFilter ff_vf_xbr; | |||
| extern AVFilter ff_vf_xstack; | |||
| extern AVFilter ff_vf_yadif; | |||
| extern AVFilter ff_vf_yadif_cuda; | |||
| extern AVFilter ff_vf_zmq; | |||
| extern AVFilter ff_vf_zoompan; | |||
| extern AVFilter ff_vf_zscale; | |||
| @@ -31,7 +31,7 @@ | |||
| #define LIBAVFILTER_VERSION_MAJOR 7 | |||
| #define LIBAVFILTER_VERSION_MINOR 40 | |||
| #define LIBAVFILTER_VERSION_MICRO 100 | |||
| #define LIBAVFILTER_VERSION_MICRO 101 | |||
| #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ | |||
| LIBAVFILTER_VERSION_MINOR, \ | |||
| @@ -0,0 +1,426 @@ | |||
| /* | |||
| * Copyright (C) 2018 Philip Langdale <philipl@overt.org> | |||
| * | |||
| * This file is part of FFmpeg. | |||
| * | |||
| * FFmpeg is free software; you can redistribute it and/or | |||
| * modify it under the terms of the GNU Lesser General Public | |||
| * License as published by the Free Software Foundation; either | |||
| * version 2.1 of the License, or (at your option) any later version. | |||
| * | |||
| * FFmpeg is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| * Lesser General Public License for more details. | |||
| * | |||
| * You should have received a copy of the GNU Lesser General Public | |||
| * License along with FFmpeg; if not, write to the Free Software | |||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| */ | |||
| #include <cuda.h> | |||
| #include "libavutil/avassert.h" | |||
| #include "libavutil/hwcontext_cuda.h" | |||
| #include "internal.h" | |||
| #include "yadif.h" | |||
| extern char vf_yadif_cuda_ptx[]; | |||
| typedef struct DeintCUDAContext { | |||
| YADIFContext yadif; | |||
| AVCUDADeviceContext *hwctx; | |||
| AVBufferRef *device_ref; | |||
| AVBufferRef *input_frames_ref; | |||
| AVHWFramesContext *input_frames; | |||
| CUcontext cu_ctx; | |||
| CUstream stream; | |||
| CUmodule cu_module; | |||
| CUfunction cu_func_uchar; | |||
| CUfunction cu_func_uchar2; | |||
| CUfunction cu_func_ushort; | |||
| CUfunction cu_func_ushort2; | |||
| } DeintCUDAContext; | |||
| #define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) ) | |||
| #define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1)) | |||
| #define BLOCKX 32 | |||
| #define BLOCKY 16 | |||
| static int check_cu(AVFilterContext *avctx, CUresult err, const char *func) | |||
| { | |||
| const char *err_name; | |||
| const char *err_string; | |||
| av_log(avctx, AV_LOG_TRACE, "Calling %s\n", func); | |||
| if (err == CUDA_SUCCESS) | |||
| return 0; | |||
| cuGetErrorName(err, &err_name); | |||
| cuGetErrorString(err, &err_string); | |||
| av_log(avctx, AV_LOG_ERROR, "%s failed", func); | |||
| if (err_name && err_string) | |||
| av_log(avctx, AV_LOG_ERROR, " -> %s: %s", err_name, err_string); | |||
| av_log(avctx, AV_LOG_ERROR, "\n"); | |||
| return AVERROR_EXTERNAL; | |||
| } | |||
| #define CHECK_CU(x) check_cu(ctx, (x), #x) | |||
| static CUresult call_kernel(AVFilterContext *ctx, CUfunction func, | |||
| CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next, | |||
| CUarray_format format, int channels, | |||
| int src_width, // Width is pixels per channel | |||
| int src_height, // Height is pixels per channel | |||
| int src_pitch, // Pitch is bytes | |||
| CUdeviceptr dst, | |||
| int dst_width, // Width is pixels per channel | |||
| int dst_height, // Height is pixels per channel | |||
| int dst_pitch, // Pitch is pixels per channel | |||
| int parity, int tff) | |||
| { | |||
| DeintCUDAContext *s = ctx->priv; | |||
| CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0; | |||
| CUresult err; | |||
| int skip_spatial_check = s->yadif.mode&2; | |||
| void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next, | |||
| &dst_width, &dst_height, &dst_pitch, | |||
| &src_width, &src_height, &parity, &tff, | |||
| &skip_spatial_check }; | |||
| CUDA_TEXTURE_DESC tex_desc = { | |||
| .filterMode = CU_TR_FILTER_MODE_POINT, | |||
| .flags = CU_TRSF_READ_AS_INTEGER, | |||
| }; | |||
| CUDA_RESOURCE_DESC res_desc = { | |||
| .resType = CU_RESOURCE_TYPE_PITCH2D, | |||
| .res.pitch2D.format = format, | |||
| .res.pitch2D.numChannels = channels, | |||
| .res.pitch2D.width = src_width, | |||
| .res.pitch2D.height = src_height, | |||
| .res.pitch2D.pitchInBytes = src_pitch, | |||
| }; | |||
| res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev; | |||
| err = CHECK_CU(cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL)); | |||
| if (err != CUDA_SUCCESS) { | |||
| goto exit; | |||
| } | |||
| res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur; | |||
| err = CHECK_CU(cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL)); | |||
| if (err != CUDA_SUCCESS) { | |||
| goto exit; | |||
| } | |||
| res_desc.res.pitch2D.devPtr = (CUdeviceptr)next; | |||
| err = CHECK_CU(cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL)); | |||
| if (err != CUDA_SUCCESS) { | |||
| goto exit; | |||
| } | |||
| err = CHECK_CU(cuLaunchKernel(func, | |||
| DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1, | |||
| BLOCKX, BLOCKY, 1, | |||
| 0, s->stream, args, NULL)); | |||
| exit: | |||
| if (tex_prev) | |||
| CHECK_CU(cuTexObjectDestroy(tex_prev)); | |||
| if (tex_cur) | |||
| CHECK_CU(cuTexObjectDestroy(tex_cur)); | |||
| if (tex_next) | |||
| CHECK_CU(cuTexObjectDestroy(tex_next)); | |||
| return err; | |||
| } | |||
| static void filter(AVFilterContext *ctx, AVFrame *dst, | |||
| int parity, int tff) | |||
| { | |||
| DeintCUDAContext *s = ctx->priv; | |||
| YADIFContext *y = &s->yadif; | |||
| CUcontext dummy; | |||
| CUresult err; | |||
| int i; | |||
| err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); | |||
| if (err != CUDA_SUCCESS) { | |||
| goto exit; | |||
| } | |||
| for (i = 0; i < y->csp->nb_components; i++) { | |||
| CUfunction func; | |||
| CUarray_format format; | |||
| int pixel_size, channels; | |||
| const AVComponentDescriptor *comp = &y->csp->comp[i]; | |||
| if (comp->plane < i) { | |||
| // We process planes as a whole, so don't reprocess | |||
| // them for additional components | |||
| continue; | |||
| } | |||
| pixel_size = (comp->depth + comp->shift) / 8; | |||
| channels = comp->step / pixel_size; | |||
| if (pixel_size > 2 || channels > 2) { | |||
| av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name); | |||
| goto exit; | |||
| } | |||
| switch (pixel_size) { | |||
| case 1: | |||
| func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2; | |||
| format = CU_AD_FORMAT_UNSIGNED_INT8; | |||
| break; | |||
| case 2: | |||
| func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2; | |||
| format = CU_AD_FORMAT_UNSIGNED_INT16; | |||
| break; | |||
| default: | |||
| av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name); | |||
| goto exit; | |||
| } | |||
| av_log(ctx, AV_LOG_TRACE, | |||
| "Deinterlacing plane %d: pixel_size: %d channels: %d\n", | |||
| comp->plane, pixel_size, channels); | |||
| call_kernel(ctx, func, | |||
| (CUdeviceptr)y->prev->data[i], | |||
| (CUdeviceptr)y->cur->data[i], | |||
| (CUdeviceptr)y->next->data[i], | |||
| format, channels, | |||
| AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0), | |||
| AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0), | |||
| y->cur->linesize[i], | |||
| (CUdeviceptr)dst->data[i], | |||
| AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0), | |||
| AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0), | |||
| dst->linesize[i] / comp->step, | |||
| parity, tff); | |||
| } | |||
| err = CHECK_CU(cuStreamSynchronize(s->stream)); | |||
| if (err != CUDA_SUCCESS) { | |||
| goto exit; | |||
| } | |||
| exit: | |||
| CHECK_CU(cuCtxPopCurrent(&dummy)); | |||
| return; | |||
| } | |||
| static av_cold void deint_cuda_uninit(AVFilterContext *ctx) | |||
| { | |||
| CUcontext dummy; | |||
| DeintCUDAContext *s = ctx->priv; | |||
| YADIFContext *y = &s->yadif; | |||
| if (s->cu_module) { | |||
| CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); | |||
| CHECK_CU(cuModuleUnload(s->cu_module)); | |||
| CHECK_CU(cuCtxPopCurrent(&dummy)); | |||
| } | |||
| av_frame_free(&y->prev); | |||
| av_frame_free(&y->cur); | |||
| av_frame_free(&y->next); | |||
| av_buffer_unref(&s->device_ref); | |||
| s->hwctx = NULL; | |||
| av_buffer_unref(&s->input_frames_ref); | |||
| s->input_frames = NULL; | |||
| } | |||
| static int deint_cuda_query_formats(AVFilterContext *ctx) | |||
| { | |||
| enum AVPixelFormat pix_fmts[] = { | |||
| AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE, | |||
| }; | |||
| int ret; | |||
| if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts), | |||
| &ctx->inputs[0]->out_formats)) < 0) | |||
| return ret; | |||
| if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts), | |||
| &ctx->outputs[0]->in_formats)) < 0) | |||
| return ret; | |||
| return 0; | |||
| } | |||
| static int config_input(AVFilterLink *inlink) | |||
| { | |||
| AVFilterContext *ctx = inlink->dst; | |||
| DeintCUDAContext *s = ctx->priv; | |||
| if (!inlink->hw_frames_ctx) { | |||
| av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is " | |||
| "required to associate the processing device.\n"); | |||
| return AVERROR(EINVAL); | |||
| } | |||
| s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); | |||
| if (!s->input_frames_ref) { | |||
| av_log(ctx, AV_LOG_ERROR, "A input frames reference create " | |||
| "failed.\n"); | |||
| return AVERROR(ENOMEM); | |||
| } | |||
| s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data; | |||
| return 0; | |||
| } | |||
| static int config_output(AVFilterLink *link) | |||
| { | |||
| AVHWFramesContext *output_frames; | |||
| AVFilterContext *ctx = link->src; | |||
| DeintCUDAContext *s = ctx->priv; | |||
| YADIFContext *y = &s->yadif; | |||
| int ret = 0; | |||
| CUcontext dummy; | |||
| CUresult err; | |||
| av_assert0(s->input_frames); | |||
| s->device_ref = av_buffer_ref(s->input_frames->device_ref); | |||
| if (!s->device_ref) { | |||
| av_log(ctx, AV_LOG_ERROR, "A device reference create " | |||
| "failed.\n"); | |||
| return AVERROR(ENOMEM); | |||
| } | |||
| s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx; | |||
| s->cu_ctx = s->hwctx->cuda_ctx; | |||
| s->stream = s->hwctx->stream; | |||
| link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref); | |||
| if (!link->hw_frames_ctx) { | |||
| av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " | |||
| "for output.\n"); | |||
| ret = AVERROR(ENOMEM); | |||
| goto exit; | |||
| } | |||
| output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data; | |||
| output_frames->format = AV_PIX_FMT_CUDA; | |||
| output_frames->sw_format = s->input_frames->sw_format; | |||
| output_frames->width = ctx->inputs[0]->w; | |||
| output_frames->height = ctx->inputs[0]->h; | |||
| output_frames->initial_pool_size = 4; | |||
| ret = ff_filter_init_hw_frames(ctx, link, 10); | |||
| if (ret < 0) | |||
| goto exit; | |||
| ret = av_hwframe_ctx_init(link->hw_frames_ctx); | |||
| if (ret < 0) { | |||
| av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame " | |||
| "context for output: %d\n", ret); | |||
| goto exit; | |||
| } | |||
| link->time_base.num = ctx->inputs[0]->time_base.num; | |||
| link->time_base.den = ctx->inputs[0]->time_base.den * 2; | |||
| link->w = ctx->inputs[0]->w; | |||
| link->h = ctx->inputs[0]->h; | |||
| if(y->mode & 1) | |||
| link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate, | |||
| (AVRational){2, 1}); | |||
| if (link->w < 3 || link->h < 3) { | |||
| av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n"); | |||
| ret = AVERROR(EINVAL); | |||
| goto exit; | |||
| } | |||
| y->csp = av_pix_fmt_desc_get(output_frames->sw_format); | |||
| y->filter = filter; | |||
| err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_EXTERNAL; | |||
| goto exit; | |||
| } | |||
| err = CHECK_CU(cuModuleLoadData(&s->cu_module, vf_yadif_cuda_ptx)); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_INVALIDDATA; | |||
| goto exit; | |||
| } | |||
| err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "yadif_uchar")); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_INVALIDDATA; | |||
| goto exit; | |||
| } | |||
| err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "yadif_uchar2")); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_INVALIDDATA; | |||
| goto exit; | |||
| } | |||
| err= CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "yadif_ushort")); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_INVALIDDATA; | |||
| goto exit; | |||
| } | |||
| err = CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "yadif_ushort2")); | |||
| if (err != CUDA_SUCCESS) { | |||
| ret = AVERROR_INVALIDDATA; | |||
| goto exit; | |||
| } | |||
| exit: | |||
| CHECK_CU(cuCtxPopCurrent(&dummy)); | |||
| return ret; | |||
| } | |||
| static const AVClass yadif_cuda_class = { | |||
| .class_name = "yadif_cuda", | |||
| .item_name = av_default_item_name, | |||
| .option = ff_yadif_options, | |||
| .version = LIBAVUTIL_VERSION_INT, | |||
| .category = AV_CLASS_CATEGORY_FILTER, | |||
| }; | |||
| static const AVFilterPad deint_cuda_inputs[] = { | |||
| { | |||
| .name = "default", | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .filter_frame = ff_yadif_filter_frame, | |||
| .config_props = config_input, | |||
| }, | |||
| { NULL } | |||
| }; | |||
| static const AVFilterPad deint_cuda_outputs[] = { | |||
| { | |||
| .name = "default", | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .request_frame = ff_yadif_request_frame, | |||
| .config_props = config_output, | |||
| }, | |||
| { NULL } | |||
| }; | |||
| AVFilter ff_vf_yadif_cuda = { | |||
| .name = "yadif_cuda", | |||
| .description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"), | |||
| .priv_size = sizeof(DeintCUDAContext), | |||
| .priv_class = &yadif_cuda_class, | |||
| .uninit = deint_cuda_uninit, | |||
| .query_formats = deint_cuda_query_formats, | |||
| .inputs = deint_cuda_inputs, | |||
| .outputs = deint_cuda_outputs, | |||
| .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, | |||
| .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, | |||
| }; | |||
| @@ -0,0 +1,296 @@ | |||
| /* | |||
| * Copyright (C) 2018 Philip Langdale <philipl@overt.org> | |||
| * | |||
| * This file is part of FFmpeg. | |||
| * | |||
| * FFmpeg is free software; you can redistribute it and/or | |||
| * modify it under the terms of the GNU Lesser General Public | |||
| * License as published by the Free Software Foundation; either | |||
| * version 2.1 of the License, or (at your option) any later version. | |||
| * | |||
| * FFmpeg is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
| * Lesser General Public License for more details. | |||
| * | |||
| * You should have received a copy of the GNU Lesser General Public | |||
| * License along with FFmpeg; if not, write to the Free Software | |||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
| */ | |||
| template<typename T> | |||
| __inline__ __device__ T spatial_predictor(T a, T b, T c, T d, T e, T f, T g, | |||
| T h, T i, T j, T k, T l, T m, T n) | |||
| { | |||
| int spatial_pred = (d + k)/2; | |||
| int spatial_score = abs(c - j) + abs(d - k) + abs(e - l); | |||
| int score = abs(b - k) + abs(c - l) + abs(d - m); | |||
| if (score < spatial_score) { | |||
| spatial_pred = (c + l)/2; | |||
| spatial_score = score; | |||
| score = abs(a - l) + abs(b - m) + abs(c - n); | |||
| if (score < spatial_score) { | |||
| spatial_pred = (b + m)/2; | |||
| spatial_score = score; | |||
| } | |||
| } | |||
| score = abs(d - i) + abs(e - j) + abs(f - k); | |||
| if (score < spatial_score) { | |||
| spatial_pred = (e + j)/2; | |||
| spatial_score = score; | |||
| score = abs(e - h) + abs(f - i) + abs(g - j); | |||
| if (score < spatial_score) { | |||
| spatial_pred = (f + i)/2; | |||
| spatial_score = score; | |||
| } | |||
| } | |||
| return spatial_pred; | |||
| } | |||
| __inline__ __device__ int max3(int a, int b, int c) | |||
| { | |||
| int x = max(a, b); | |||
| return max(x, c); | |||
| } | |||
| __inline__ __device__ int min3(int a, int b, int c) | |||
| { | |||
| int x = min(a, b); | |||
| return min(x, c); | |||
| } | |||
| template<typename T> | |||
| __inline__ __device__ T temporal_predictor(T A, T B, T C, T D, T E, T F, | |||
| T G, T H, T I, T J, T K, T L, | |||
| T spatial_pred, bool skip_check) | |||
| { | |||
| int p0 = (C + H) / 2; | |||
| int p1 = F; | |||
| int p2 = (D + I) / 2; | |||
| int p3 = G; | |||
| int p4 = (E + J) / 2; | |||
| int tdiff0 = abs(D - I); | |||
| int tdiff1 = (abs(A - F) + abs(B - G)) / 2; | |||
| int tdiff2 = (abs(K - F) + abs(G - L)) / 2; | |||
| int diff = max3(tdiff0, tdiff1, tdiff2); | |||
| if (!skip_check) { | |||
| int maxi = max3(p2 - p3, p2 - p1, min(p0 - p1, p4 - p3)); | |||
| int mini = min3(p2 - p3, p2 - p1, max(p0 - p1, p4 - p3)); | |||
| diff = max3(diff, mini, -maxi); | |||
| } | |||
| if (spatial_pred > p2 + diff) { | |||
| spatial_pred = p2 + diff; | |||
| } | |||
| if (spatial_pred < p2 - diff) { | |||
| spatial_pred = p2 - diff; | |||
| } | |||
| return spatial_pred; | |||
| } | |||
| template<typename T> | |||
| __inline__ __device__ void yadif_single(T *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| // Identify location | |||
| int xo = blockIdx.x * blockDim.x + threadIdx.x; | |||
| int yo = blockIdx.y * blockDim.y + threadIdx.y; | |||
| if (xo >= dst_width || yo >= dst_height) { | |||
| return; | |||
| } | |||
| // Don't modify the primary field | |||
| if (yo % 2 == parity) { | |||
| dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo); | |||
| return; | |||
| } | |||
| // Calculate spatial prediction | |||
| T a = tex2D<T>(cur, xo - 3, yo - 1); | |||
| T b = tex2D<T>(cur, xo - 2, yo - 1); | |||
| T c = tex2D<T>(cur, xo - 1, yo - 1); | |||
| T d = tex2D<T>(cur, xo - 0, yo - 1); | |||
| T e = tex2D<T>(cur, xo + 1, yo - 1); | |||
| T f = tex2D<T>(cur, xo + 2, yo - 1); | |||
| T g = tex2D<T>(cur, xo + 3, yo - 1); | |||
| T h = tex2D<T>(cur, xo - 3, yo + 1); | |||
| T i = tex2D<T>(cur, xo - 2, yo + 1); | |||
| T j = tex2D<T>(cur, xo - 1, yo + 1); | |||
| T k = tex2D<T>(cur, xo - 0, yo + 1); | |||
| T l = tex2D<T>(cur, xo + 1, yo + 1); | |||
| T m = tex2D<T>(cur, xo + 2, yo + 1); | |||
| T n = tex2D<T>(cur, xo + 3, yo + 1); | |||
| T spatial_pred = | |||
| spatial_predictor(a, b, c, d, e, f, g, h, i, j, k, l, m, n); | |||
| // Calculate temporal prediction | |||
| int is_second_field = !(parity ^ tff); | |||
| cudaTextureObject_t prev2 = prev; | |||
| cudaTextureObject_t prev1 = is_second_field ? cur : prev; | |||
| cudaTextureObject_t next1 = is_second_field ? next : cur; | |||
| cudaTextureObject_t next2 = next; | |||
| T A = tex2D<T>(prev2, xo, yo - 1); | |||
| T B = tex2D<T>(prev2, xo, yo + 1); | |||
| T C = tex2D<T>(prev1, xo, yo - 2); | |||
| T D = tex2D<T>(prev1, xo, yo + 0); | |||
| T E = tex2D<T>(prev1, xo, yo + 2); | |||
| T F = tex2D<T>(cur, xo, yo - 1); | |||
| T G = tex2D<T>(cur, xo, yo + 1); | |||
| T H = tex2D<T>(next1, xo, yo - 2); | |||
| T I = tex2D<T>(next1, xo, yo + 0); | |||
| T J = tex2D<T>(next1, xo, yo + 2); | |||
| T K = tex2D<T>(next2, xo, yo - 1); | |||
| T L = tex2D<T>(next2, xo, yo + 1); | |||
| spatial_pred = temporal_predictor(A, B, C, D, E, F, G, H, I, J, K, L, | |||
| spatial_pred, skip_spatial_check); | |||
| dst[yo*dst_pitch+xo] = spatial_pred; | |||
| } | |||
| template <typename T> | |||
| __inline__ __device__ void yadif_double(T *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| int xo = blockIdx.x * blockDim.x + threadIdx.x; | |||
| int yo = blockIdx.y * blockDim.y + threadIdx.y; | |||
| if (xo >= dst_width || yo >= dst_height) { | |||
| return; | |||
| } | |||
| if (yo % 2 == parity) { | |||
| // Don't modify the primary field | |||
| dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo); | |||
| return; | |||
| } | |||
| T a = tex2D<T>(cur, xo - 3, yo - 1); | |||
| T b = tex2D<T>(cur, xo - 2, yo - 1); | |||
| T c = tex2D<T>(cur, xo - 1, yo - 1); | |||
| T d = tex2D<T>(cur, xo - 0, yo - 1); | |||
| T e = tex2D<T>(cur, xo + 1, yo - 1); | |||
| T f = tex2D<T>(cur, xo + 2, yo - 1); | |||
| T g = tex2D<T>(cur, xo + 3, yo - 1); | |||
| T h = tex2D<T>(cur, xo - 3, yo + 1); | |||
| T i = tex2D<T>(cur, xo - 2, yo + 1); | |||
| T j = tex2D<T>(cur, xo - 1, yo + 1); | |||
| T k = tex2D<T>(cur, xo - 0, yo + 1); | |||
| T l = tex2D<T>(cur, xo + 1, yo + 1); | |||
| T m = tex2D<T>(cur, xo + 2, yo + 1); | |||
| T n = tex2D<T>(cur, xo + 3, yo + 1); | |||
| T spatial_pred = { | |||
| spatial_predictor(a.x, b.x, c.x, d.x, e.x, f.x, g.x, h.x, i.x, j.x, k.x, l.x, m.x, n.x), | |||
| spatial_predictor(a.y, b.y, c.y, d.y, e.y, f.y, g.y, h.y, i.y, j.y, k.y, l.y, m.y, n.y) }; | |||
| // Calculate temporal prediction | |||
| int is_second_field = !(parity ^ tff); | |||
| cudaTextureObject_t prev2 = prev; | |||
| cudaTextureObject_t prev1 = is_second_field ? cur : prev; | |||
| cudaTextureObject_t next1 = is_second_field ? next : cur; | |||
| cudaTextureObject_t next2 = next; | |||
| T A = tex2D<T>(prev2, xo, yo - 1); | |||
| T B = tex2D<T>(prev2, xo, yo + 1); | |||
| T C = tex2D<T>(prev1, xo, yo - 2); | |||
| T D = tex2D<T>(prev1, xo, yo + 0); | |||
| T E = tex2D<T>(prev1, xo, yo + 2); | |||
| T F = tex2D<T>(cur, xo, yo - 1); | |||
| T G = tex2D<T>(cur, xo, yo + 1); | |||
| T H = tex2D<T>(next1, xo, yo - 2); | |||
| T I = tex2D<T>(next1, xo, yo + 0); | |||
| T J = tex2D<T>(next1, xo, yo + 2); | |||
| T K = tex2D<T>(next2, xo, yo - 1); | |||
| T L = tex2D<T>(next2, xo, yo + 1); | |||
| spatial_pred = { | |||
| temporal_predictor(A.x, B.x, C.x, D.x, E.x, F.x, G.x, H.x, I.x, J.x, K.x, L.x, | |||
| spatial_pred.x, skip_spatial_check), | |||
| temporal_predictor(A.y, B.y, C.y, D.y, E.y, F.y, G.y, H.y, I.y, J.y, K.y, L.y, | |||
| spatial_pred.y, skip_spatial_check) }; | |||
| dst[yo*dst_pitch+xo] = spatial_pred; | |||
| } | |||
| extern "C" { | |||
| __global__ void yadif_uchar(unsigned char *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| yadif_single(dst, prev, cur, next, | |||
| dst_width, dst_height, dst_pitch, | |||
| src_width, src_height, | |||
| parity, tff, skip_spatial_check); | |||
| } | |||
| __global__ void yadif_ushort(unsigned short *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| yadif_single(dst, prev, cur, next, | |||
| dst_width, dst_height, dst_pitch, | |||
| src_width, src_height, | |||
| parity, tff, skip_spatial_check); | |||
| } | |||
| __global__ void yadif_uchar2(uchar2 *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| yadif_double(dst, prev, cur, next, | |||
| dst_width, dst_height, dst_pitch, | |||
| src_width, src_height, | |||
| parity, tff, skip_spatial_check); | |||
| } | |||
| __global__ void yadif_ushort2(ushort2 *dst, | |||
| cudaTextureObject_t prev, | |||
| cudaTextureObject_t cur, | |||
| cudaTextureObject_t next, | |||
| int dst_width, int dst_height, int dst_pitch, | |||
| int src_width, int src_height, | |||
| int parity, int tff, bool skip_spatial_check) | |||
| { | |||
| yadif_double(dst, prev, cur, next, | |||
| dst_width, dst_height, dst_pitch, | |||
| src_width, src_height, | |||
| parity, tff, skip_spatial_check); | |||
| } | |||
| } /* extern "C" */ | |||