Some hwaccels use name prefixes, some do not, others only use them for some codecs. Add prefixes everywhere for consistency.tags/n1.2
@@ -108,7 +108,7 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context | |||
(h->pps.transform_8x8_mode << 13) | | |||
((h->sps.level_idc >= 31) << 14) | | |||
/* IntraPicFlag (Modified if we detect a non | |||
* intra slice in decode_slice) */ | |||
* intra slice in dxva2_h264_decode_slice) */ | |||
(1 << 15); | |||
pp->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; | |||
@@ -367,9 +367,9 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, | |||
} | |||
static int start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
static int dxva2_h264_start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
{ | |||
const H264Context *h = avctx->priv_data; | |||
struct dxva_context *ctx = avctx->hwaccel_context; | |||
@@ -391,8 +391,9 @@ static int start_frame(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, uint32_t size) | |||
static int dxva2_h264_decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
{ | |||
const H264Context *h = avctx->priv_data; | |||
struct dxva_context *ctx = avctx->hwaccel_context; | |||
@@ -421,7 +422,7 @@ static int decode_slice(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int end_frame(AVCodecContext *avctx) | |||
static int dxva2_h264_end_frame(AVCodecContext *avctx) | |||
{ | |||
H264Context *h = avctx->priv_data; | |||
struct dxva2_picture_context *ctx_pic = | |||
@@ -444,8 +445,8 @@ AVHWAccel ff_h264_dxva2_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_H264, | |||
.pix_fmt = AV_PIX_FMT_DXVA2_VLD, | |||
.start_frame = start_frame, | |||
.decode_slice = decode_slice, | |||
.end_frame = end_frame, | |||
.start_frame = dxva2_h264_start_frame, | |||
.decode_slice = dxva2_h264_decode_slice, | |||
.end_frame = dxva2_h264_end_frame, | |||
.priv_data_size = sizeof(struct dxva2_picture_context), | |||
}; |
@@ -203,9 +203,9 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, | |||
mb_count); | |||
} | |||
static int start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
static int dxva2_mpeg2_start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
{ | |||
const struct MpegEncContext *s = avctx->priv_data; | |||
struct dxva_context *ctx = avctx->hwaccel_context; | |||
@@ -225,8 +225,8 @@ static int start_frame(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, uint32_t size) | |||
static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, uint32_t size) | |||
{ | |||
const struct MpegEncContext *s = avctx->priv_data; | |||
struct dxva2_picture_context *ctx_pic = | |||
@@ -246,7 +246,7 @@ static int decode_slice(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int end_frame(AVCodecContext *avctx) | |||
static int dxva2_mpeg2_end_frame(AVCodecContext *avctx) | |||
{ | |||
struct MpegEncContext *s = avctx->priv_data; | |||
struct dxva2_picture_context *ctx_pic = | |||
@@ -269,8 +269,8 @@ AVHWAccel ff_mpeg2_dxva2_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_MPEG2VIDEO, | |||
.pix_fmt = AV_PIX_FMT_DXVA2_VLD, | |||
.start_frame = start_frame, | |||
.decode_slice = decode_slice, | |||
.end_frame = end_frame, | |||
.start_frame = dxva2_mpeg2_start_frame, | |||
.decode_slice = dxva2_mpeg2_decode_slice, | |||
.end_frame = dxva2_mpeg2_end_frame, | |||
.priv_data_size = sizeof(struct dxva2_picture_context), | |||
}; |
@@ -208,9 +208,9 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, | |||
slice, sizeof(*slice), bs->NumMBsInBuffer); | |||
} | |||
static int start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
static int dxva2_vc1_start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
{ | |||
const VC1Context *v = avctx->priv_data; | |||
struct dxva_context *ctx = avctx->hwaccel_context; | |||
@@ -227,8 +227,9 @@ static int start_frame(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, uint32_t size) | |||
static int dxva2_vc1_decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
{ | |||
const VC1Context *v = avctx->priv_data; | |||
const Picture *current_picture = v->s.current_picture_ptr; | |||
@@ -250,7 +251,7 @@ static int decode_slice(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int end_frame(AVCodecContext *avctx) | |||
static int dxva2_vc1_end_frame(AVCodecContext *avctx) | |||
{ | |||
VC1Context *v = avctx->priv_data; | |||
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private; | |||
@@ -274,9 +275,9 @@ AVHWAccel ff_wmv3_dxva2_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_WMV3, | |||
.pix_fmt = AV_PIX_FMT_DXVA2_VLD, | |||
.start_frame = start_frame, | |||
.decode_slice = decode_slice, | |||
.end_frame = end_frame, | |||
.start_frame = dxva2_vc1_start_frame, | |||
.decode_slice = dxva2_vc1_decode_slice, | |||
.end_frame = dxva2_vc1_end_frame, | |||
.priv_data_size = sizeof(struct dxva2_picture_context), | |||
}; | |||
#endif | |||
@@ -286,8 +287,8 @@ AVHWAccel ff_vc1_dxva2_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_VC1, | |||
.pix_fmt = AV_PIX_FMT_DXVA2_VLD, | |||
.start_frame = start_frame, | |||
.decode_slice = decode_slice, | |||
.end_frame = end_frame, | |||
.start_frame = dxva2_vc1_start_frame, | |||
.decode_slice = dxva2_vc1_decode_slice, | |||
.end_frame = dxva2_vc1_end_frame, | |||
.priv_data_size = sizeof(struct dxva2_picture_context), | |||
}; |
@@ -219,16 +219,16 @@ static void fill_vaapi_plain_pred_weight_table(H264Context *h, | |||
} | |||
/** Initialize and start decoding a frame with VA API. */ | |||
static int start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
static int vaapi_h264_start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
{ | |||
H264Context * const h = avctx->priv_data; | |||
struct vaapi_context * const vactx = avctx->hwaccel_context; | |||
VAPictureParameterBufferH264 *pic_param; | |||
VAIQMatrixBufferH264 *iq_matrix; | |||
av_dlog(avctx, "start_frame()\n"); | |||
av_dlog(avctx, "vaapi_h264_start_frame()\n"); | |||
vactx->slice_param_size = sizeof(VASliceParameterBufferH264); | |||
@@ -286,13 +286,13 @@ static int start_frame(AVCodecContext *avctx, | |||
} | |||
/** End a hardware decoding based frame. */ | |||
static int end_frame(AVCodecContext *avctx) | |||
static int vaapi_h264_end_frame(AVCodecContext *avctx) | |||
{ | |||
struct vaapi_context * const vactx = avctx->hwaccel_context; | |||
H264Context * const h = avctx->priv_data; | |||
int ret; | |||
av_dlog(avctx, "end_frame()\n"); | |||
av_dlog(avctx, "vaapi_h264_end_frame()\n"); | |||
ret = ff_vaapi_commit_slices(vactx); | |||
if (ret < 0) | |||
goto finish; | |||
@@ -309,14 +309,15 @@ finish: | |||
} | |||
/** Decode the given H.264 slice with VA API. */ | |||
static int decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
static int vaapi_h264_decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
{ | |||
H264Context * const h = avctx->priv_data; | |||
VASliceParameterBufferH264 *slice_param; | |||
av_dlog(avctx, "decode_slice(): buffer %p, size %d\n", buffer, size); | |||
av_dlog(avctx, "vaapi_h264_decode_slice(): buffer %p, size %d\n", | |||
buffer, size); | |||
/* Fill in VASliceParameterBufferH264. */ | |||
slice_param = (VASliceParameterBufferH264 *)ff_vaapi_alloc_slice(avctx->hwaccel_context, buffer, size); | |||
@@ -353,7 +354,7 @@ AVHWAccel ff_h264_vaapi_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_H264, | |||
.pix_fmt = AV_PIX_FMT_VAAPI_VLD, | |||
.start_frame = start_frame, | |||
.end_frame = end_frame, | |||
.decode_slice = decode_slice, | |||
.start_frame = vaapi_h264_start_frame, | |||
.end_frame = vaapi_h264_end_frame, | |||
.decode_slice = vaapi_h264_decode_slice, | |||
}; |
@@ -197,9 +197,9 @@ static int vda_sync_decode(struct vda_context *vda_ctx) | |||
} | |||
static int start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
static int vda_h264_start_frame(AVCodecContext *avctx, | |||
av_unused const uint8_t *buffer, | |||
av_unused uint32_t size) | |||
{ | |||
struct vda_context *vda_ctx = avctx->hwaccel_context; | |||
@@ -211,9 +211,9 @@ static int start_frame(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
static int vda_h264_decode_slice(AVCodecContext *avctx, | |||
const uint8_t *buffer, | |||
uint32_t size) | |||
{ | |||
struct vda_context *vda_ctx = avctx->hwaccel_context; | |||
void *tmp; | |||
@@ -237,7 +237,7 @@ static int decode_slice(AVCodecContext *avctx, | |||
return 0; | |||
} | |||
static int end_frame(AVCodecContext *avctx) | |||
static int vda_h264_end_frame(AVCodecContext *avctx) | |||
{ | |||
H264Context *h = avctx->priv_data; | |||
struct vda_context *vda_ctx = avctx->hwaccel_context; | |||
@@ -372,7 +372,7 @@ AVHWAccel ff_h264_vda_hwaccel = { | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = AV_CODEC_ID_H264, | |||
.pix_fmt = AV_PIX_FMT_VDA_VLD, | |||
.start_frame = start_frame, | |||
.decode_slice = decode_slice, | |||
.end_frame = end_frame, | |||
.start_frame = vda_h264_start_frame, | |||
.decode_slice = vda_h264_decode_slice, | |||
.end_frame = vda_h264_end_frame, | |||
}; |