* commit '3176217c60ca7828712985092d9102d331ea4f3d': h264: decouple h264_ps from the h264 decoder Main changes: - a local GetBitContext is created for the various ff_h264_decode_seq_parameter_set() attempts - just like the old code, remove_sps() is adjusted so it doesn't remove the pps. Fixes decode with Ticket #631 http://ffmpeg.org/pipermail/ffmpeg-user/attachments/20111108/dae58f17/attachment.mp4 but see next point as well. - ff_h264_update_thread_context() is updated to work even when SPS isn't set as it breaks current skip_frame code. This makes sure we can still decode the sample from ticket #631 without the need for -flags2 +chunks. (Thanks to Michael) - keep {sps,pps}_ref pointers that stay alive even when the active pps/sps get removed from the available lists (patch by michaelni with additionnal frees in ff_h264_free_context() from mateo) - added a check on sps in avpriv_h264_has_num_reorder_frames() to fix crashes with mpegts_with_dvbsubs.ts from Ticket #4074 http://samples.ffmpeg.org/ffmpeg-bugs/trac/ticket4074/mpegts_with_dvbsubs.ts - in h264_parser.c:h264_parse(), after the ff_h264_decode_extradata() is called, the pps and sps from the local parser context are updated with the pps and sps from the used h264context. This fixes fate-flv-demux. - in h264_slice.c, "PPS changed between slices" error is not triggered anymore in one condition as it makes fate-h264-xavc-4389 fails with THREADS=N (Thanks to Michael) Merged-by: Clément Bœsch <clement@stupeflix.com> Merged-by: Michael Niedermayer <michael@niedermayer.cc> Merged-by: Matthieu Bouron <matthieu.bouron@stupeflix.com>tags/n3.1
@@ -52,6 +52,8 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * | |||||
DXVA_PicParams_H264 *pp) | DXVA_PicParams_H264 *pp) | ||||
{ | { | ||||
const H264Picture *current_picture = h->cur_pic_ptr; | const H264Picture *current_picture = h->cur_pic_ptr; | ||||
const SPS *sps = h->ps.sps; | |||||
const PPS *pps = h->ps.pps; | |||||
int i, j; | int i, j; | ||||
memset(pp, 0, sizeof(*pp)); | memset(pp, 0, sizeof(*pp)); | ||||
@@ -96,30 +98,30 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * | |||||
pp->wFrameWidthInMbsMinus1 = h->mb_width - 1; | pp->wFrameWidthInMbsMinus1 = h->mb_width - 1; | ||||
pp->wFrameHeightInMbsMinus1 = h->mb_height - 1; | pp->wFrameHeightInMbsMinus1 = h->mb_height - 1; | ||||
pp->num_ref_frames = h->sps.ref_frame_count; | |||||
pp->num_ref_frames = sps->ref_frame_count; | |||||
pp->wBitFields = ((h->picture_structure != PICT_FRAME) << 0) | | pp->wBitFields = ((h->picture_structure != PICT_FRAME) << 0) | | ||||
((h->sps.mb_aff && | |||||
((sps->mb_aff && | |||||
(h->picture_structure == PICT_FRAME)) << 1) | | (h->picture_structure == PICT_FRAME)) << 1) | | ||||
(h->sps.residual_color_transform_flag << 2) | | |||||
(sps->residual_color_transform_flag << 2) | | |||||
/* sp_for_switch_flag (not implemented by FFmpeg) */ | /* sp_for_switch_flag (not implemented by FFmpeg) */ | ||||
(0 << 3) | | (0 << 3) | | ||||
(h->sps.chroma_format_idc << 4) | | |||||
(sps->chroma_format_idc << 4) | | |||||
((h->nal_ref_idc != 0) << 6) | | ((h->nal_ref_idc != 0) << 6) | | ||||
(h->pps.constrained_intra_pred << 7) | | |||||
(h->pps.weighted_pred << 8) | | |||||
(h->pps.weighted_bipred_idc << 9) | | |||||
(pps->constrained_intra_pred << 7) | | |||||
(pps->weighted_pred << 8) | | |||||
(pps->weighted_bipred_idc << 9) | | |||||
/* MbsConsecutiveFlag */ | /* MbsConsecutiveFlag */ | ||||
(1 << 11) | | (1 << 11) | | ||||
(h->sps.frame_mbs_only_flag << 12) | | |||||
(h->pps.transform_8x8_mode << 13) | | |||||
((h->sps.level_idc >= 31) << 14) | | |||||
(sps->frame_mbs_only_flag << 12) | | |||||
(pps->transform_8x8_mode << 13) | | |||||
((sps->level_idc >= 31) << 14) | | |||||
/* IntraPicFlag (Modified if we detect a non | /* IntraPicFlag (Modified if we detect a non | ||||
* intra slice in dxva2_h264_decode_slice) */ | * intra slice in dxva2_h264_decode_slice) */ | ||||
(1 << 15); | (1 << 15); | ||||
pp->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; | |||||
pp->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; | |||||
pp->bit_depth_luma_minus8 = sps->bit_depth_luma - 8; | |||||
pp->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8; | |||||
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) | if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) | ||||
pp->Reserved16Bits = 0; | pp->Reserved16Bits = 0; | ||||
else if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO) | else if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO) | ||||
@@ -135,28 +137,28 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * | |||||
if ((h->picture_structure & PICT_BOTTOM_FIELD) && | if ((h->picture_structure & PICT_BOTTOM_FIELD) && | ||||
current_picture->field_poc[1] != INT_MAX) | current_picture->field_poc[1] != INT_MAX) | ||||
pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1]; | pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1]; | ||||
pp->pic_init_qs_minus26 = h->pps.init_qs - 26; | |||||
pp->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |||||
pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |||||
pp->pic_init_qs_minus26 = pps->init_qs - 26; | |||||
pp->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; | |||||
pp->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; | |||||
pp->ContinuationFlag = 1; | pp->ContinuationFlag = 1; | ||||
pp->pic_init_qp_minus26 = h->pps.init_qp - 26; | |||||
pp->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; | |||||
pp->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; | |||||
pp->pic_init_qp_minus26 = pps->init_qp - 26; | |||||
pp->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1; | |||||
pp->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1; | |||||
pp->Reserved8BitsA = 0; | pp->Reserved8BitsA = 0; | ||||
pp->frame_num = h->frame_num; | pp->frame_num = h->frame_num; | ||||
pp->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |||||
pp->pic_order_cnt_type = h->sps.poc_type; | |||||
if (h->sps.poc_type == 0) | |||||
pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; | |||||
else if (h->sps.poc_type == 1) | |||||
pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |||||
pp->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |||||
pp->entropy_coding_mode_flag = h->pps.cabac; | |||||
pp->pic_order_present_flag = h->pps.pic_order_present; | |||||
pp->num_slice_groups_minus1 = h->pps.slice_group_count - 1; | |||||
pp->slice_group_map_type = h->pps.mb_slice_group_map_type; | |||||
pp->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |||||
pp->redundant_pic_cnt_present_flag= h->pps.redundant_pic_cnt_present; | |||||
pp->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; | |||||
pp->pic_order_cnt_type = sps->poc_type; | |||||
if (sps->poc_type == 0) | |||||
pp->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4; | |||||
else if (sps->poc_type == 1) | |||||
pp->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; | |||||
pp->direct_8x8_inference_flag = sps->direct_8x8_inference_flag; | |||||
pp->entropy_coding_mode_flag = pps->cabac; | |||||
pp->pic_order_present_flag = pps->pic_order_present; | |||||
pp->num_slice_groups_minus1 = pps->slice_group_count - 1; | |||||
pp->slice_group_map_type = pps->mb_slice_group_map_type; | |||||
pp->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; | |||||
pp->redundant_pic_cnt_present_flag= pps->redundant_pic_cnt_present; | |||||
pp->Reserved8BitsB = 0; | pp->Reserved8BitsB = 0; | ||||
pp->slice_group_change_rate_minus1= 0; /* XXX not implemented by FFmpeg */ | pp->slice_group_change_rate_minus1= 0; /* XXX not implemented by FFmpeg */ | ||||
//pp->SliceGroupMap[810]; /* XXX not implemented by FFmpeg */ | //pp->SliceGroupMap[810]; /* XXX not implemented by FFmpeg */ | ||||
@@ -164,25 +166,26 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext * | |||||
static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm) | static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm) | ||||
{ | { | ||||
const PPS *pps = h->ps.pps; | |||||
unsigned i, j; | unsigned i, j; | ||||
memset(qm, 0, sizeof(*qm)); | memset(qm, 0, sizeof(*qm)); | ||||
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) { | if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) { | ||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
for (j = 0; j < 16; j++) | for (j = 0; j < 16; j++) | ||||
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j]; | |||||
qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][j]; | |||||
for (i = 0; i < 64; i++) { | for (i = 0; i < 64; i++) { | ||||
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i]; | |||||
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i]; | |||||
qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][i]; | |||||
qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][i]; | |||||
} | } | ||||
} else { | } else { | ||||
for (i = 0; i < 6; i++) | for (i = 0; i < 6; i++) | ||||
for (j = 0; j < 16; j++) | for (j = 0; j < 16; j++) | ||||
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][ff_zigzag_scan[j]]; | |||||
qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][ff_zigzag_scan[j]]; | |||||
for (i = 0; i < 64; i++) { | for (i = 0; i < 64; i++) { | ||||
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]]; | |||||
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]]; | |||||
qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][ff_zigzag_direct[i]]; | |||||
qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][ff_zigzag_direct[i]]; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -282,11 +285,11 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, | |||||
} | } | ||||
} | } | ||||
slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */ | slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */ | ||||
slice->slice_qp_delta = sl->qscale - h->pps.init_qp; | |||||
slice->slice_qp_delta = sl->qscale - h->ps.pps->init_qp; | |||||
slice->redundant_pic_cnt = sl->redundant_pic_count; | slice->redundant_pic_cnt = sl->redundant_pic_count; | ||||
if (sl->slice_type == AV_PICTURE_TYPE_B) | if (sl->slice_type == AV_PICTURE_TYPE_B) | ||||
slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred; | slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred; | ||||
slice->cabac_init_idc = h->pps.cabac ? sl->cabac_init_idc : 0; | |||||
slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0; | |||||
if (sl->deblocking_filter < 2) | if (sl->deblocking_filter < 2) | ||||
slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter; | slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter; | ||||
else | else | ||||
@@ -60,7 +60,7 @@ const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 }; | |||||
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx) | int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx) | ||||
{ | { | ||||
H264Context *h = avctx->priv_data; | H264Context *h = avctx->priv_data; | ||||
return h ? h->sps.num_reorder_frames : 0; | |||||
return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0; | |||||
} | } | ||||
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, | static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, | ||||
@@ -224,9 +224,6 @@ int ff_h264_alloc_tables(H264Context *h) | |||||
h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride))); | h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride))); | ||||
} | } | ||||
if (!h->dequant4_coeff[0]) | |||||
ff_h264_init_dequant_tables(h); | |||||
return 0; | return 0; | ||||
fail: | fail: | ||||
@@ -425,7 +422,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h) | |||||
h->backup_width = -1; | h->backup_width = -1; | ||||
h->backup_height = -1; | h->backup_height = -1; | ||||
h->backup_pix_fmt = AV_PIX_FMT_NONE; | h->backup_pix_fmt = AV_PIX_FMT_NONE; | ||||
h->dequant_coeff_pps = -1; | |||||
h->current_sps_id = -1; | h->current_sps_id = -1; | ||||
h->cur_chroma_format_idc = -1; | h->cur_chroma_format_idc = -1; | ||||
@@ -514,9 +510,9 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) | |||||
} | } | ||||
} | } | ||||
if (h->sps.bitstream_restriction_flag && | |||||
h->avctx->has_b_frames < h->sps.num_reorder_frames) { | |||||
h->avctx->has_b_frames = h->sps.num_reorder_frames; | |||||
if (h->ps.sps && h->ps.sps->bitstream_restriction_flag && | |||||
h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) { | |||||
h->avctx->has_b_frames = h->ps.sps->num_reorder_frames; | |||||
h->low_delay = 0; | h->low_delay = 0; | ||||
} | } | ||||
@@ -567,6 +563,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx) | |||||
*/ | */ | ||||
static void decode_postinit(H264Context *h, int setup_finished) | static void decode_postinit(H264Context *h, int setup_finished) | ||||
{ | { | ||||
const SPS *sps = h->ps.sps; | |||||
H264Picture *out = h->cur_pic_ptr; | H264Picture *out = h->cur_pic_ptr; | ||||
H264Picture *cur = h->cur_pic_ptr; | H264Picture *cur = h->cur_pic_ptr; | ||||
int i, pics, out_of_order, out_idx; | int i, pics, out_of_order, out_idx; | ||||
@@ -596,7 +593,7 @@ static void decode_postinit(H264Context *h, int setup_finished) | |||||
/* Prioritize picture timing SEI information over used | /* Prioritize picture timing SEI information over used | ||||
* decoding process if it exists. */ | * decoding process if it exists. */ | ||||
if (h->sps.pic_struct_present_flag) { | |||||
if (sps->pic_struct_present_flag) { | |||||
switch (h->sei_pic_struct) { | switch (h->sei_pic_struct) { | ||||
case SEI_PIC_STRUCT_FRAME: | case SEI_PIC_STRUCT_FRAME: | ||||
break; | break; | ||||
@@ -640,7 +637,7 @@ static void decode_postinit(H264Context *h, int setup_finished) | |||||
/* Derive top_field_first from field pocs. */ | /* Derive top_field_first from field pocs. */ | ||||
cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; | cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; | ||||
} else { | } else { | ||||
if (h->sps.pic_struct_present_flag) { | |||||
if (sps->pic_struct_present_flag) { | |||||
/* Use picture timing SEI information. Even if it is a | /* Use picture timing SEI information. Even if it is a | ||||
* information of a past frame, better than nothing. */ | * information of a past frame, better than nothing. */ | ||||
if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || | if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || | ||||
@@ -737,9 +734,9 @@ static void decode_postinit(H264Context *h, int setup_finished) | |||||
// FIXME do something with unavailable reference frames | // FIXME do something with unavailable reference frames | ||||
/* Sort B-frames into display order */ | /* Sort B-frames into display order */ | ||||
if (h->sps.bitstream_restriction_flag || | |||||
if (sps->bitstream_restriction_flag || | |||||
h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) { | h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) { | ||||
h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, h->sps.num_reorder_frames); | |||||
h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames); | |||||
} | } | ||||
h->low_delay = !h->avctx->has_b_frames; | h->low_delay = !h->avctx->has_b_frames; | ||||
@@ -762,7 +759,7 @@ static void decode_postinit(H264Context *h, int setup_finished) | |||||
h->last_pocs[i] = INT_MIN; | h->last_pocs[i] = INT_MIN; | ||||
h->last_pocs[0] = cur->poc; | h->last_pocs[0] = cur->poc; | ||||
cur->mmco_reset = 1; | cur->mmco_reset = 1; | ||||
} else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){ | |||||
} else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){ | |||||
av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order); | av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order); | ||||
h->avctx->has_b_frames = out_of_order; | h->avctx->has_b_frames = out_of_order; | ||||
h->low_delay = 0; | h->low_delay = 0; | ||||
@@ -894,15 +891,16 @@ static void flush_dpb(AVCodecContext *avctx) | |||||
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) | int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) | ||||
{ | { | ||||
const int max_frame_num = 1 << h->sps.log2_max_frame_num; | |||||
const SPS *sps = h->ps.sps; | |||||
const int max_frame_num = 1 << sps->log2_max_frame_num; | |||||
int field_poc[2]; | int field_poc[2]; | ||||
h->frame_num_offset = h->prev_frame_num_offset; | h->frame_num_offset = h->prev_frame_num_offset; | ||||
if (h->frame_num < h->prev_frame_num) | if (h->frame_num < h->prev_frame_num) | ||||
h->frame_num_offset += max_frame_num; | h->frame_num_offset += max_frame_num; | ||||
if (h->sps.poc_type == 0) { | |||||
const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb; | |||||
if (sps->poc_type == 0) { | |||||
const int max_poc_lsb = 1 << sps->log2_max_poc_lsb; | |||||
if (h->poc_lsb < h->prev_poc_lsb && | if (h->poc_lsb < h->prev_poc_lsb && | ||||
h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2) | h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2) | ||||
@@ -916,11 +914,11 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) | |||||
field_poc[1] = h->poc_msb + h->poc_lsb; | field_poc[1] = h->poc_msb + h->poc_lsb; | ||||
if (h->picture_structure == PICT_FRAME) | if (h->picture_structure == PICT_FRAME) | ||||
field_poc[1] += h->delta_poc_bottom; | field_poc[1] += h->delta_poc_bottom; | ||||
} else if (h->sps.poc_type == 1) { | |||||
} else if (sps->poc_type == 1) { | |||||
int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc; | int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc; | ||||
int i; | int i; | ||||
if (h->sps.poc_cycle_length != 0) | |||||
if (sps->poc_cycle_length != 0) | |||||
abs_frame_num = h->frame_num_offset + h->frame_num; | abs_frame_num = h->frame_num_offset + h->frame_num; | ||||
else | else | ||||
abs_frame_num = 0; | abs_frame_num = 0; | ||||
@@ -929,25 +927,25 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) | |||||
abs_frame_num--; | abs_frame_num--; | ||||
expected_delta_per_poc_cycle = 0; | expected_delta_per_poc_cycle = 0; | ||||
for (i = 0; i < h->sps.poc_cycle_length; i++) | |||||
for (i = 0; i < sps->poc_cycle_length; i++) | |||||
// FIXME integrate during sps parse | // FIXME integrate during sps parse | ||||
expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i]; | |||||
expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i]; | |||||
if (abs_frame_num > 0) { | if (abs_frame_num > 0) { | ||||
int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length; | |||||
int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length; | |||||
int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length; | |||||
int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length; | |||||
expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle; | expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle; | ||||
for (i = 0; i <= frame_num_in_poc_cycle; i++) | for (i = 0; i <= frame_num_in_poc_cycle; i++) | ||||
expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i]; | |||||
expectedpoc = expectedpoc + sps->offset_for_ref_frame[i]; | |||||
} else | } else | ||||
expectedpoc = 0; | expectedpoc = 0; | ||||
if (h->nal_ref_idc == 0) | if (h->nal_ref_idc == 0) | ||||
expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic; | |||||
expectedpoc = expectedpoc + sps->offset_for_non_ref_pic; | |||||
field_poc[0] = expectedpoc + h->delta_poc[0]; | field_poc[0] = expectedpoc + h->delta_poc[0]; | ||||
field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field; | |||||
field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field; | |||||
if (h->picture_structure == PICT_FRAME) | if (h->picture_structure == PICT_FRAME) | ||||
field_poc[1] += h->delta_poc[1]; | field_poc[1] += h->delta_poc[1]; | ||||
@@ -977,7 +975,7 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) | |||||
* | * | ||||
* @return profile as defined by FF_PROFILE_H264_* | * @return profile as defined by FF_PROFILE_H264_* | ||||
*/ | */ | ||||
int ff_h264_get_profile(SPS *sps) | |||||
int ff_h264_get_profile(const SPS *sps) | |||||
{ | { | ||||
int profile = sps->profile_idc; | int profile = sps->profile_idc; | ||||
@@ -1154,8 +1152,8 @@ again: | |||||
h->valid_recovery_point = 1; | h->valid_recovery_point = 1; | ||||
if ( h->recovery_frame < 0 | if ( h->recovery_frame < 0 | ||||
|| av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) { | |||||
h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num); | |||||
|| av_mod_uintp2(h->recovery_frame - h->frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) { | |||||
h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num); | |||||
if (!h->valid_recovery_point) | if (!h->valid_recovery_point) | ||||
h->recovery_frame = h->frame_num; | h->recovery_frame = h->frame_num; | ||||
@@ -1225,22 +1223,21 @@ again: | |||||
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) | if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) | ||||
goto end; | goto end; | ||||
break; | break; | ||||
case NAL_SPS: | |||||
h->gb = nal->gb; | |||||
if (ff_h264_decode_seq_parameter_set(h, 0) >= 0) | |||||
case NAL_SPS: { | |||||
GetBitContext tmp_gb = nal->gb; | |||||
if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0) | |||||
break; | break; | ||||
av_log(h->avctx, AV_LOG_DEBUG, | av_log(h->avctx, AV_LOG_DEBUG, | ||||
"SPS decoding failure, trying again with the complete NAL\n"); | "SPS decoding failure, trying again with the complete NAL\n"); | ||||
init_get_bits8(&h->gb, nal->raw_data + 1, nal->raw_size - 1); | |||||
if (ff_h264_decode_seq_parameter_set(h, 0) >= 0) | |||||
init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1); | |||||
if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0) | |||||
break; | break; | ||||
h->gb = nal->gb; | |||||
ff_h264_decode_seq_parameter_set(h, 1); | |||||
ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1); | |||||
break; | break; | ||||
} | |||||
case NAL_PPS: | case NAL_PPS: | ||||
h->gb = nal->gb; | |||||
ret = ff_h264_decode_picture_parameter_set(h, nal->size_bits); | |||||
ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps, | |||||
nal->size_bits); | |||||
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) | if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) | ||||
goto end; | goto end; | ||||
break; | break; | ||||
@@ -1305,7 +1302,10 @@ end: | |||||
* past end by one (callers fault) and resync_mb_y != 0 | * past end by one (callers fault) and resync_mb_y != 0 | ||||
* causes problems for the first MB line, too. | * causes problems for the first MB line, too. | ||||
*/ | */ | ||||
if (!FIELD_PICTURE(h) && h->current_slice && !h->sps.new && h->enable_er) { | |||||
if (!FIELD_PICTURE(h) && h->current_slice && | |||||
h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data && | |||||
h->enable_er) { | |||||
H264SliceContext *sl = h->slice_ctx; | H264SliceContext *sl = h->slice_ctx; | ||||
int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0]; | int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0]; | ||||
@@ -1585,10 +1585,13 @@ av_cold void ff_h264_free_context(H264Context *h) | |||||
av_freep(&h->a53_caption); | av_freep(&h->a53_caption); | ||||
for (i = 0; i < MAX_SPS_COUNT; i++) | for (i = 0; i < MAX_SPS_COUNT; i++) | ||||
av_freep(h->sps_buffers + i); | |||||
av_buffer_unref(&h->ps.sps_list[i]); | |||||
for (i = 0; i < MAX_PPS_COUNT; i++) | for (i = 0; i < MAX_PPS_COUNT; i++) | ||||
av_freep(h->pps_buffers + i); | |||||
av_buffer_unref(&h->ps.pps_list[i]); | |||||
av_buffer_unref(&h->ps.sps_ref); | |||||
av_buffer_unref(&h->ps.pps_ref); | |||||
ff_h2645_packet_uninit(&h->pkt); | ff_h2645_packet_uninit(&h->pkt); | ||||
} | } | ||||
@@ -28,6 +28,7 @@ | |||||
#ifndef AVCODEC_H264_H | #ifndef AVCODEC_H264_H | ||||
#define AVCODEC_H264_H | #define AVCODEC_H264_H | ||||
#include "libavutil/buffer.h" | |||||
#include "libavutil/intreadwrite.h" | #include "libavutil/intreadwrite.h" | ||||
#include "libavutil/thread.h" | #include "libavutil/thread.h" | ||||
#include "cabac.h" | #include "cabac.h" | ||||
@@ -94,12 +95,12 @@ | |||||
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h)) | #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h)) | ||||
#ifndef CABAC | #ifndef CABAC | ||||
#define CABAC(h) (h)->pps.cabac | |||||
#define CABAC(h) (h)->ps.pps->cabac | |||||
#endif | #endif | ||||
#define CHROMA(h) ((h)->sps.chroma_format_idc) | |||||
#define CHROMA422(h) ((h)->sps.chroma_format_idc == 2) | |||||
#define CHROMA444(h) ((h)->sps.chroma_format_idc == 3) | |||||
#define CHROMA(h) ((h)->ps.sps->chroma_format_idc) | |||||
#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2) | |||||
#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3) | |||||
#define EXTENDED_SAR 255 | #define EXTENDED_SAR 255 | ||||
@@ -231,7 +232,6 @@ typedef struct SPS { | |||||
int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 | int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 | ||||
int residual_color_transform_flag; ///< residual_colour_transform_flag | int residual_color_transform_flag; ///< residual_colour_transform_flag | ||||
int constraint_set_flags; ///< constraint_set[0-3]_flag | int constraint_set_flags; ///< constraint_set[0-3]_flag | ||||
int new; ///< flag to keep track if the decoder context needs re-init due to changed SPS | |||||
uint8_t data[4096]; | uint8_t data[4096]; | ||||
size_t data_size; | size_t data_size; | ||||
} SPS; | } SPS; | ||||
@@ -261,8 +261,25 @@ typedef struct PPS { | |||||
int chroma_qp_diff; | int chroma_qp_diff; | ||||
uint8_t data[4096]; | uint8_t data[4096]; | ||||
size_t data_size; | size_t data_size; | ||||
uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; | |||||
uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64]; | |||||
uint32_t(*dequant4_coeff[6])[16]; | |||||
uint32_t(*dequant8_coeff[6])[64]; | |||||
} PPS; | } PPS; | ||||
typedef struct H264ParamSets { | |||||
AVBufferRef *sps_list[MAX_SPS_COUNT]; | |||||
AVBufferRef *pps_list[MAX_PPS_COUNT]; | |||||
AVBufferRef *pps_ref; | |||||
AVBufferRef *sps_ref; | |||||
/* currently active parameters sets */ | |||||
const PPS *pps; | |||||
// FIXME this should properly be const | |||||
SPS *sps; | |||||
} H264ParamSets; | |||||
/** | /** | ||||
* Frame Packing Arrangement Type | * Frame Packing Arrangement Type | ||||
*/ | */ | ||||
@@ -572,16 +589,9 @@ typedef struct H264Context { | |||||
unsigned current_sps_id; ///< id of the current SPS | unsigned current_sps_id; ///< id of the current SPS | ||||
SPS sps; ///< current sps | |||||
PPS pps; ///< current pps | |||||
int au_pps_id; ///< pps_id of current access unit | int au_pps_id; ///< pps_id of current access unit | ||||
uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; // FIXME should these be moved down? | |||||
uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64]; | |||||
uint32_t(*dequant4_coeff[6])[16]; | |||||
uint32_t(*dequant8_coeff[6])[64]; | |||||
uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1 | uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1 | ||||
// interlacing specific flags | // interlacing specific flags | ||||
@@ -634,10 +644,7 @@ typedef struct H264Context { | |||||
int bit_depth_luma; ///< luma bit depth from sps to detect changes | int bit_depth_luma; ///< luma bit depth from sps to detect changes | ||||
int chroma_format_idc; ///< chroma format from sps to detect changes | int chroma_format_idc; ///< chroma format from sps to detect changes | ||||
SPS *sps_buffers[MAX_SPS_COUNT]; | |||||
PPS *pps_buffers[MAX_PPS_COUNT]; | |||||
int dequant_coeff_pps; ///< reinit tables when pps changes | |||||
H264ParamSets ps; | |||||
uint16_t *slice_table_base; | uint16_t *slice_table_base; | ||||
@@ -848,17 +855,19 @@ int ff_h264_decode_sei(H264Context *h); | |||||
/** | /** | ||||
* Decode SPS | * Decode SPS | ||||
*/ | */ | ||||
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation); | |||||
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, | |||||
H264ParamSets *ps, int ignore_truncation); | |||||
/** | /** | ||||
* compute profile from sps | * compute profile from sps | ||||
*/ | */ | ||||
int ff_h264_get_profile(SPS *sps); | |||||
int ff_h264_get_profile(const SPS *sps); | |||||
/** | /** | ||||
* Decode PPS | * Decode PPS | ||||
*/ | */ | ||||
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length); | |||||
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, | |||||
H264ParamSets *ps, int bit_length); | |||||
/** | /** | ||||
* Free any data that may have been allocated in the H264 context | * Free any data that may have been allocated in the H264 context | ||||
@@ -910,7 +919,7 @@ int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl); | |||||
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl); | void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl); | ||||
void ff_h264_init_dequant_tables(H264Context *h); | |||||
void ff_h264_init_dequant_tables(PPS *pps, const SPS *sps); | |||||
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl); | void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl); | ||||
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl); | void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl); | ||||
@@ -1010,7 +1019,7 @@ static av_always_inline uint16_t pack8to16(unsigned a, unsigned b) | |||||
*/ | */ | ||||
static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale) | static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale) | ||||
{ | { | ||||
return h->pps.chroma_qp_table[t][qscale]; | |||||
return h->ps.pps->chroma_qp_table[t][qscale]; | |||||
} | } | ||||
/** | /** | ||||
@@ -1133,7 +1142,7 @@ static av_always_inline void write_back_motion(const H264Context *h, | |||||
static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl) | static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl) | ||||
{ | { | ||||
if (h->sps.direct_8x8_inference_flag) | |||||
if (h->ps.sps->direct_8x8_inference_flag) | |||||
return !(AV_RN64A(sl->sub_mb_type) & | return !(AV_RN64A(sl->sub_mb_type) & | ||||
((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) * | ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) * | ||||
0x0001000100010001ULL)); | 0x0001000100010001ULL)); | ||||
@@ -1265,7 +1265,7 @@ void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl) | |||||
{ | { | ||||
int i; | int i; | ||||
const int8_t (*tab)[2]; | const int8_t (*tab)[2]; | ||||
const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51); | |||||
const int slice_qp = av_clip(sl->qscale - 6*(h->ps.sps->bit_depth_luma-8), 0, 51); | |||||
if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I; | if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I; | ||||
else tab = cabac_context_init_PB[sl->cabac_init_idc]; | else tab = cabac_context_init_PB[sl->cabac_init_idc]; | ||||
@@ -1876,7 +1876,7 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 | |||||
decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16); | decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16); | ||||
if( cbp&15 ) { | if( cbp&15 ) { | ||||
qmul = h->dequant4_coeff[p][qscale]; | |||||
qmul = h->ps.pps->dequant4_coeff[p][qscale]; | |||||
for( i4x4 = 0; i4x4 < 16; i4x4++ ) { | for( i4x4 = 0; i4x4 < 16; i4x4++ ) { | ||||
const int index = 16*p + i4x4; | const int index = 16*p + i4x4; | ||||
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15); | decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15); | ||||
@@ -1891,9 +1891,9 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 | |||||
if( IS_8x8DCT(mb_type) ) { | if( IS_8x8DCT(mb_type) ) { | ||||
const int index = 16*p + 4*i8x8; | const int index = 16*p + 4*i8x8; | ||||
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[3][p], index, | decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[3][p], index, | ||||
scan8x8, h->dequant8_coeff[cqm][qscale], 64); | |||||
scan8x8, h->ps.pps->dequant8_coeff[cqm][qscale], 64); | |||||
} else { | } else { | ||||
qmul = h->dequant4_coeff[cqm][qscale]; | |||||
qmul = h->ps.pps->dequant4_coeff[cqm][qscale]; | |||||
for( i4x4 = 0; i4x4 < 4; i4x4++ ) { | for( i4x4 = 0; i4x4 < 4; i4x4++ ) { | ||||
const int index = 16*p + 4*i8x8 + i4x4; | const int index = 16*p + 4*i8x8 + i4x4; | ||||
//START_TIMER | //START_TIMER | ||||
@@ -1914,10 +1914,11 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2 | |||||
*/ | */ | ||||
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl) | int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl) | ||||
{ | { | ||||
const SPS *sps = h->ps.sps; | |||||
int mb_xy; | int mb_xy; | ||||
int mb_type, partition_count, cbp = 0; | int mb_type, partition_count, cbp = 0; | ||||
int dct8x8_allowed= h->pps.transform_8x8_mode; | |||||
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; | |||||
int dct8x8_allowed= h->ps.pps->transform_8x8_mode; | |||||
int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2; | |||||
const int pixel_shift = h->pixel_shift; | const int pixel_shift = h->pixel_shift; | ||||
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; | mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; | ||||
@@ -2027,8 +2028,8 @@ decode_intra_mb: | |||||
h->slice_table[mb_xy] = sl->slice_num; | h->slice_table[mb_xy] = sl->slice_num; | ||||
if(IS_INTRA_PCM(mb_type)) { | if(IS_INTRA_PCM(mb_type)) { | ||||
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * | |||||
h->sps.bit_depth_luma >> 3; | |||||
const int mb_size = ff_h264_mb_sizes[sps->chroma_format_idc] * | |||||
sps->bit_depth_luma >> 3; | |||||
const uint8_t *ptr; | const uint8_t *ptr; | ||||
int ret; | int ret; | ||||
@@ -2215,7 +2216,7 @@ decode_intra_mb: | |||||
ff_h264_pred_direct_motion(h, sl, &mb_type); | ff_h264_pred_direct_motion(h, sl, &mb_type); | ||||
fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); | fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); | ||||
fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); | fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); | ||||
dct8x8_allowed &= h->sps.direct_8x8_inference_flag; | |||||
dct8x8_allowed &= sps->direct_8x8_inference_flag; | |||||
} else { | } else { | ||||
int list, i; | int list, i; | ||||
if(IS_16X16(mb_type)){ | if(IS_16X16(mb_type)){ | ||||
@@ -2382,7 +2383,7 @@ decode_intra_mb: | |||||
if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){ | if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){ | ||||
int val = 1; | int val = 1; | ||||
int ctx= 2; | int ctx= 2; | ||||
const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); | |||||
const int max_qp = 51 + 6*(sps->bit_depth_luma-8); | |||||
while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) { | while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) { | ||||
ctx= 3; | ctx= 3; | ||||
@@ -2425,7 +2426,7 @@ decode_intra_mb: | |||||
int c, i, i8x8; | int c, i, i8x8; | ||||
for( c = 0; c < 2; c++ ) { | for( c = 0; c < 2; c++ ) { | ||||
int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift); | int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift); | ||||
qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; | |||||
qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; | |||||
for (i8x8 = 0; i8x8 < 2; i8x8++) { | for (i8x8 = 0; i8x8 < 2; i8x8++) { | ||||
for (i = 0; i < 4; i++) { | for (i = 0; i < 4; i++) { | ||||
const int index = 16 + 16 * c + 8*i8x8 + i; | const int index = 16 + 16 * c + 8*i8x8 + i; | ||||
@@ -2449,7 +2450,7 @@ decode_intra_mb: | |||||
if( cbp&0x20 ) { | if( cbp&0x20 ) { | ||||
int c, i; | int c, i; | ||||
for( c = 0; c < 2; c++ ) { | for( c = 0; c < 2; c++ ) { | ||||
qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; | |||||
qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; | |||||
for( i = 0; i < 4; i++ ) { | for( i = 0; i < 4; i++ ) { | ||||
const int index = 16 + 16 * c + i; | const int index = 16 + 16 * c + i; | ||||
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15); | decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15); | ||||
@@ -656,7 +656,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, | |||||
for(i4x4=0; i4x4<4; i4x4++){ | for(i4x4=0; i4x4<4; i4x4++){ | ||||
const int index= i4x4 + 4*i8x8 + p*16; | const int index= i4x4 + 4*i8x8 + p*16; | ||||
if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), | if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), | ||||
index, scan + 1, h->dequant4_coeff[p][qscale], 15) < 0 ){ | |||||
index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale], 15) < 0 ){ | |||||
return -1; | return -1; | ||||
} | } | ||||
} | } | ||||
@@ -678,7 +678,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, | |||||
for(i4x4=0; i4x4<4; i4x4++){ | for(i4x4=0; i4x4<4; i4x4++){ | ||||
const int index= i4x4 + 4*i8x8 + p*16; | const int index= i4x4 + 4*i8x8 + p*16; | ||||
if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4, | if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4, | ||||
h->dequant8_coeff[cqm][qscale], 16) < 0 ) | |||||
h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 ) | |||||
return -1; | return -1; | ||||
} | } | ||||
nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]]; | nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]]; | ||||
@@ -688,7 +688,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl, | |||||
for(i4x4=0; i4x4<4; i4x4++){ | for(i4x4=0; i4x4<4; i4x4++){ | ||||
const int index= i4x4 + 4*i8x8 + p*16; | const int index= i4x4 + 4*i8x8 + p*16; | ||||
if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, | if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, | ||||
scan, h->dequant4_coeff[cqm][qscale], 16) < 0 ){ | |||||
scan, h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){ | |||||
return -1; | return -1; | ||||
} | } | ||||
new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8; | new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8; | ||||
@@ -708,8 +708,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl) | |||||
int mb_xy; | int mb_xy; | ||||
int partition_count; | int partition_count; | ||||
unsigned int mb_type, cbp; | unsigned int mb_type, cbp; | ||||
int dct8x8_allowed= h->pps.transform_8x8_mode; | |||||
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; | |||||
int dct8x8_allowed= h->ps.pps->transform_8x8_mode; | |||||
int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2; | |||||
const int pixel_shift = h->pixel_shift; | const int pixel_shift = h->pixel_shift; | ||||
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; | mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; | ||||
@@ -775,8 +775,8 @@ decode_intra_mb: | |||||
h->slice_table[mb_xy] = sl->slice_num; | h->slice_table[mb_xy] = sl->slice_num; | ||||
if(IS_INTRA_PCM(mb_type)){ | if(IS_INTRA_PCM(mb_type)){ | ||||
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * | |||||
h->sps.bit_depth_luma; | |||||
const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * | |||||
h->ps.sps->bit_depth_luma; | |||||
// We assume these blocks are very rare so we do not optimize it. | // We assume these blocks are very rare so we do not optimize it. | ||||
sl->intra_pcm_ptr = align_get_bits(&sl->gb); | sl->intra_pcm_ptr = align_get_bits(&sl->gb); | ||||
@@ -949,7 +949,7 @@ decode_intra_mb: | |||||
} | } | ||||
}else if(IS_DIRECT(mb_type)){ | }else if(IS_DIRECT(mb_type)){ | ||||
ff_h264_pred_direct_motion(h, sl, &mb_type); | ff_h264_pred_direct_motion(h, sl, &mb_type); | ||||
dct8x8_allowed &= h->sps.direct_8x8_inference_flag; | |||||
dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag; | |||||
}else{ | }else{ | ||||
int list, mx, my, i; | int list, mx, my, i; | ||||
//FIXME we should set ref_idx_l? to 0 if we use that later ... | //FIXME we should set ref_idx_l? to 0 if we use that later ... | ||||
@@ -1104,7 +1104,7 @@ decode_intra_mb: | |||||
int ret; | int ret; | ||||
GetBitContext *gb = &sl->gb; | GetBitContext *gb = &sl->gb; | ||||
const uint8_t *scan, *scan8x8; | const uint8_t *scan, *scan8x8; | ||||
const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); | |||||
const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8); | |||||
if(IS_INTERLACED(mb_type)){ | if(IS_INTERLACED(mb_type)){ | ||||
scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; | scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; | ||||
@@ -1142,7 +1142,7 @@ decode_intra_mb: | |||||
return -1; | return -1; | ||||
} | } | ||||
} else { | } else { | ||||
const int num_c8x8 = h->sps.chroma_format_idc; | |||||
const int num_c8x8 = h->ps.sps->chroma_format_idc; | |||||
if(cbp&0x30){ | if(cbp&0x30){ | ||||
for(chroma_idx=0; chroma_idx<2; chroma_idx++) | for(chroma_idx=0; chroma_idx<2; chroma_idx++) | ||||
@@ -1156,7 +1156,7 @@ decode_intra_mb: | |||||
if(cbp&0x20){ | if(cbp&0x20){ | ||||
for(chroma_idx=0; chroma_idx<2; chroma_idx++){ | for(chroma_idx=0; chroma_idx<2; chroma_idx++){ | ||||
const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; | |||||
const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]]; | |||||
int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift); | int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift); | ||||
for (i8x8 = 0; i8x8<num_c8x8; i8x8++) { | for (i8x8 = 0; i8x8<num_c8x8; i8x8++) { | ||||
for (i4x4 = 0; i4x4 < 4; i4x4++) { | for (i4x4 = 0; i4x4 < 4; i4x4++) { | ||||
@@ -315,7 +315,7 @@ single_col: | |||||
*mb_type |= MB_TYPE_DIRECT2 | | *mb_type |= MB_TYPE_DIRECT2 | | ||||
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); | ||||
} else { | } else { | ||||
if (!h->sps.direct_8x8_inference_flag) { | |||||
if (!h->ps.sps->direct_8x8_inference_flag) { | |||||
/* FIXME: Save sub mb types from previous frames (or derive | /* FIXME: Save sub mb types from previous frames (or derive | ||||
* from MVs) so we know exactly what block size to use. */ | * from MVs) so we know exactly what block size to use. */ | ||||
sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ | sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ | ||||
@@ -538,7 +538,7 @@ single_col: | |||||
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | | *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 | | ||||
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); | ||||
} else { | } else { | ||||
if (!h->sps.direct_8x8_inference_flag) { | |||||
if (!h->ps.sps->direct_8x8_inference_flag) { | |||||
/* FIXME: save sub mb types from previous frames (or derive | /* FIXME: save sub mb types from previous frames (or derive | ||||
* from MVs) so we know exactly what block size to use */ | * from MVs) so we know exactly what block size to use */ | ||||
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | | sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | | ||||
@@ -579,7 +579,7 @@ single_col: | |||||
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { | if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { | ||||
int y_shift = 2 * !IS_INTERLACED(*mb_type); | int y_shift = 2 * !IS_INTERLACED(*mb_type); | ||||
assert(h->sps.direct_8x8_inference_flag); | |||||
assert(h->ps.sps->direct_8x8_inference_flag); | |||||
for (i8 = 0; i8 < 4; i8++) { | for (i8 = 0; i8 < 4; i8++) { | ||||
const int x8 = i8 & 1; | const int x8 = i8 & 1; | ||||
@@ -250,7 +250,7 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h, | |||||
int left_type = sl->left_type[LTOP]; | int left_type = sl->left_type[LTOP]; | ||||
int top_type = sl->top_type; | int top_type = sl->top_type; | ||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); | |||||
int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8); | |||||
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | ||||
int b = 52 + sl->slice_beta_offset - qp_bd_offset; | int b = 52 + sl->slice_beta_offset - qp_bd_offset; | ||||
@@ -420,7 +420,7 @@ void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, | |||||
unsigned int linesize, unsigned int uvlinesize) | unsigned int linesize, unsigned int uvlinesize) | ||||
{ | { | ||||
av_assert2(!FRAME_MBAFF(h)); | av_assert2(!FRAME_MBAFF(h)); | ||||
if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) { | |||||
if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) { | |||||
ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); | ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize); | ||||
return; | return; | ||||
} | } | ||||
@@ -724,7 +724,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, | |||||
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; | const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; | ||||
int first_vertical_edge_done = 0; | int first_vertical_edge_done = 0; | ||||
int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); | int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY)); | ||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); | |||||
int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8); | |||||
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset; | ||||
int b = 52 + sl->slice_beta_offset - qp_bd_offset; | int b = 52 + sl->slice_beta_offset - qp_bd_offset; | ||||
@@ -767,7 +767,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, | |||||
bS[i] = 4; | bS[i] = 4; | ||||
else{ | else{ | ||||
bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] | | bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] | | ||||
((!h->pps.cabac && IS_8x8DCT(mbn_type)) ? | |||||
((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ? | |||||
(h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12)) | (h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12)) | ||||
: | : | ||||
h->non_zero_count[mbn_xy][ off[i] ])); | h->non_zero_count[mbn_xy][ off[i] ])); | ||||
@@ -635,7 +635,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, | |||||
for (i = 0; i < 16; i += 4) { | for (i = 0; i < 16; i += 4) { | ||||
uint8_t *const ptr = dest_y + block_offset[i]; | uint8_t *const ptr = dest_y + block_offset[i]; | ||||
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; | const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; | ||||
if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { | |||||
if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) { | |||||
if (h->x264_build != -1) { | if (h->x264_build != -1) { | ||||
h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); | h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); | ||||
} else | } else | ||||
@@ -666,7 +666,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, | |||||
uint8_t *const ptr = dest_y + block_offset[i]; | uint8_t *const ptr = dest_y + block_offset[i]; | ||||
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; | const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; | ||||
if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { | |||||
if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) { | |||||
h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); | h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); | ||||
} else { | } else { | ||||
uint8_t *topright; | uint8_t *topright; | ||||
@@ -705,7 +705,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, | |||||
if (!transform_bypass) | if (!transform_bypass) | ||||
h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift), | h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift), | ||||
sl->mb_luma_dc[p], | sl->mb_luma_dc[p], | ||||
h->dequant4_coeff[p][qscale][0]); | |||||
h->ps.pps->dequant4_coeff[p][qscale][0]); | |||||
else { | else { | ||||
static const uint8_t dc_mapping[16] = { | static const uint8_t dc_mapping[16] = { | ||||
0 * 16, 1 * 16, 4 * 16, 5 * 16, | 0 * 16, 1 * 16, 4 * 16, 5 * 16, | ||||
@@ -737,7 +737,7 @@ static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264Sl | |||||
if (!IS_INTRA4x4(mb_type)) { | if (!IS_INTRA4x4(mb_type)) { | ||||
if (IS_INTRA16x16(mb_type)) { | if (IS_INTRA16x16(mb_type)) { | ||||
if (transform_bypass) { | if (transform_bypass) { | ||||
if (h->sps.profile_idc == 244 && | |||||
if (h->ps.sps->profile_idc == 244 && | |||||
(sl->intra16x16_pred_mode == VERT_PRED8x8 || | (sl->intra16x16_pred_mode == VERT_PRED8x8 || | ||||
sl->intra16x16_pred_mode == HOR_PRED8x8)) { | sl->intra16x16_pred_mode == HOR_PRED8x8)) { | ||||
h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset, | h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset, | ||||
@@ -48,7 +48,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
int linesize, uvlinesize /*dct_offset*/; | int linesize, uvlinesize /*dct_offset*/; | ||||
int i, j; | int i, j; | ||||
const int *block_offset = &h->block_offset[0]; | const int *block_offset = &h->block_offset[0]; | ||||
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); | |||||
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass); | |||||
void (*idct_add)(uint8_t *dst, int16_t *block, int stride); | void (*idct_add)(uint8_t *dst, int16_t *block, int stride); | ||||
const int block_h = 16 >> h->chroma_y_shift; | const int block_h = 16 >> h->chroma_y_shift; | ||||
const int chroma422 = CHROMA422(h); | const int chroma422 = CHROMA422(h); | ||||
@@ -96,12 +96,12 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
} | } | ||||
if (!SIMPLE && IS_INTRA_PCM(mb_type)) { | if (!SIMPLE && IS_INTRA_PCM(mb_type)) { | ||||
const int bit_depth = h->sps.bit_depth_luma; | |||||
const int bit_depth = h->ps.sps->bit_depth_luma; | |||||
if (PIXEL_SHIFT) { | if (PIXEL_SHIFT) { | ||||
int j; | int j; | ||||
GetBitContext gb; | GetBitContext gb; | ||||
init_get_bits(&gb, sl->intra_pcm_ptr, | init_get_bits(&gb, sl->intra_pcm_ptr, | ||||
ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth); | |||||
ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * bit_depth); | |||||
for (i = 0; i < 16; i++) { | for (i = 0; i < 16; i++) { | ||||
uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize); | uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize); | ||||
@@ -109,7 +109,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
tmp_y[j] = get_bits(&gb, bit_depth); | tmp_y[j] = get_bits(&gb, bit_depth); | ||||
} | } | ||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | ||||
if (!h->sps.chroma_format_idc) { | |||||
if (!h->ps.sps->chroma_format_idc) { | |||||
for (i = 0; i < block_h; i++) { | for (i = 0; i < block_h; i++) { | ||||
uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); | uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); | ||||
uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); | uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); | ||||
@@ -134,7 +134,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
for (i = 0; i < 16; i++) | for (i = 0; i < 16; i++) | ||||
memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16); | memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16); | ||||
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) { | ||||
if (!h->sps.chroma_format_idc) { | |||||
if (!h->ps.sps->chroma_format_idc) { | |||||
for (i = 0; i < 8; i++) { | for (i = 0; i < 8; i++) { | ||||
memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8); | memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8); | ||||
memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8); | memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8); | ||||
@@ -190,7 +190,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
(sl->cbp & 0x30)) { | (sl->cbp & 0x30)) { | ||||
uint8_t *dest[2] = { dest_cb, dest_cr }; | uint8_t *dest[2] = { dest_cb, dest_cr }; | ||||
if (transform_bypass) { | if (transform_bypass) { | ||||
if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 && | |||||
if (IS_INTRA(mb_type) && h->ps.sps->profile_idc == 244 && | |||||
(sl->chroma_pred_mode == VERT_PRED8x8 || | (sl->chroma_pred_mode == VERT_PRED8x8 || | ||||
sl->chroma_pred_mode == HOR_PRED8x8)) { | sl->chroma_pred_mode == HOR_PRED8x8)) { | ||||
h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0], | h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0], | ||||
@@ -231,10 +231,10 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex | |||||
} | } | ||||
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]]) | if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]]) | ||||
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT), | h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT), | ||||
h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); | |||||
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); | |||||
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]]) | if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]]) | ||||
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT), | h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT), | ||||
h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); | |||||
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); | |||||
h->h264dsp.h264_idct_add8(dest, block_offset, | h->h264dsp.h264_idct_add8(dest, block_offset, | ||||
sl->mb, uvlinesize, | sl->mb, uvlinesize, | ||||
sl->non_zero_count_cache); | sl->non_zero_count_cache); | ||||
@@ -259,7 +259,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo | |||||
int linesize; | int linesize; | ||||
int i, j, p; | int i, j, p; | ||||
const int *block_offset = &h->block_offset[0]; | const int *block_offset = &h->block_offset[0]; | ||||
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass); | |||||
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass); | |||||
const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1; | const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1; | ||||
for (p = 0; p < plane_count; p++) { | for (p = 0; p < plane_count; p++) { | ||||
@@ -301,7 +301,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo | |||||
if (!SIMPLE && IS_INTRA_PCM(mb_type)) { | if (!SIMPLE && IS_INTRA_PCM(mb_type)) { | ||||
if (PIXEL_SHIFT) { | if (PIXEL_SHIFT) { | ||||
const int bit_depth = h->sps.bit_depth_luma; | |||||
const int bit_depth = h->ps.sps->bit_depth_luma; | |||||
GetBitContext gb; | GetBitContext gb; | ||||
init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth); | init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth); | ||||
@@ -464,7 +464,7 @@ static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int m | |||||
if (!IS_SKIP(mb_type)) { | if (!IS_SKIP(mb_type)) { | ||||
if (IS_INTRA(mb_type)) { | if (IS_INTRA(mb_type)) { | ||||
int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1; | |||||
int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1; | |||||
sl->topleft_samples_available = | sl->topleft_samples_available = | ||||
sl->top_samples_available = | sl->top_samples_available = | ||||
sl->left_samples_available = 0xFFFF; | sl->left_samples_available = 0xFFFF; | ||||
@@ -47,6 +47,7 @@ | |||||
typedef struct H264ParseContext { | typedef struct H264ParseContext { | ||||
H264Context h; | H264Context h; | ||||
ParseContext pc; | ParseContext pc; | ||||
H264ParamSets ps; | |||||
int got_first; | int got_first; | ||||
} H264ParseContext; | } H264ParseContext; | ||||
@@ -148,13 +149,13 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb) | |||||
int list_count, ref_count[2]; | int list_count, ref_count[2]; | ||||
if (h->pps.redundant_pic_cnt_present) | |||||
if (p->ps.pps->redundant_pic_cnt_present) | |||||
get_ue_golomb(gb); // redundant_pic_count | get_ue_golomb(gb); // redundant_pic_count | ||||
if (slice_type_nos == AV_PICTURE_TYPE_B) | if (slice_type_nos == AV_PICTURE_TYPE_B) | ||||
get_bits1(gb); // direct_spatial_mv_pred | get_bits1(gb); // direct_spatial_mv_pred | ||||
if (ff_h264_parse_ref_count(&list_count, ref_count, gb, &h->pps, | |||||
if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps, | |||||
slice_type_nos, h->picture_structure, h->avctx) < 0) | slice_type_nos, h->picture_structure, h->avctx) < 0) | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
@@ -186,9 +187,9 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb) | |||||
} | } | ||||
} | } | ||||
if ((h->pps.weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) || | |||||
(h->pps.weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B)) | |||||
ff_h264_pred_weight_table(gb, &h->sps, ref_count, slice_type_nos, | |||||
if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) || | |||||
(p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B)) | |||||
ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos, | |||||
&pwt); | &pwt); | ||||
if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag | if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag | ||||
@@ -255,6 +256,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
buf_index = 0; | buf_index = 0; | ||||
next_avc = h->is_avc ? 0 : buf_size; | next_avc = h->is_avc ? 0 : buf_size; | ||||
for (;;) { | for (;;) { | ||||
const SPS *sps; | |||||
int src_length, consumed, nalsize = 0; | int src_length, consumed, nalsize = 0; | ||||
if (buf_index >= next_avc) { | if (buf_index >= next_avc) { | ||||
@@ -307,13 +309,19 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
switch (h->nal_unit_type) { | switch (h->nal_unit_type) { | ||||
case NAL_SPS: | case NAL_SPS: | ||||
ff_h264_decode_seq_parameter_set(h, 0); | |||||
ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0); | |||||
break; | break; | ||||
case NAL_PPS: | case NAL_PPS: | ||||
ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits); | |||||
ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps, | |||||
nal.size_bits); | |||||
break; | break; | ||||
case NAL_SEI: | case NAL_SEI: | ||||
ff_h264_decode_sei(h); | |||||
{ | |||||
H264ParamSets ps = h->ps; | |||||
h->ps = p->ps; | |||||
ff_h264_decode_sei(h); | |||||
h->ps = ps; | |||||
} | |||||
break; | break; | ||||
case NAL_IDR_SLICE: | case NAL_IDR_SLICE: | ||||
s->key_frame = 1; | s->key_frame = 1; | ||||
@@ -337,33 +345,39 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
"pps_id %u out of range\n", pps_id); | "pps_id %u out of range\n", pps_id); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
if (!h->pps_buffers[pps_id]) { | |||||
if (!p->ps.pps_list[pps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"non-existing PPS %u referenced\n", pps_id); | "non-existing PPS %u referenced\n", pps_id); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
h->pps = *h->pps_buffers[pps_id]; | |||||
if (!h->sps_buffers[h->pps.sps_id]) { | |||||
p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data; | |||||
if (!p->ps.sps_list[p->ps.pps->sps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"non-existing SPS %u referenced\n", h->pps.sps_id); | |||||
"non-existing SPS %u referenced\n", p->ps.pps->sps_id); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
h->sps = *h->sps_buffers[h->pps.sps_id]; | |||||
h->frame_num = get_bits(&nal.gb, h->sps.log2_max_frame_num); | |||||
p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data; | |||||
h->ps.sps = p->ps.sps; | |||||
h->ps.pps = p->ps.pps; | |||||
sps = p->ps.sps; | |||||
if(h->sps.ref_frame_count <= 1 && h->pps.ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I) | |||||
// heuristic to detect non marked keyframes | |||||
if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I) | |||||
s->key_frame = 1; | s->key_frame = 1; | ||||
s->coded_width = 16 * h->sps.mb_width; | |||||
s->coded_height = 16 * h->sps.mb_height; | |||||
s->width = s->coded_width - (h->sps.crop_right + h->sps.crop_left); | |||||
s->height = s->coded_height - (h->sps.crop_top + h->sps.crop_bottom); | |||||
h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num); | |||||
s->coded_width = 16 * sps->mb_width; | |||||
s->coded_height = 16 * sps->mb_height; | |||||
s->width = s->coded_width - (sps->crop_right + sps->crop_left); | |||||
s->height = s->coded_height - (sps->crop_top + sps->crop_bottom); | |||||
if (s->width <= 0 || s->height <= 0) { | if (s->width <= 0 || s->height <= 0) { | ||||
s->width = s->coded_width; | s->width = s->coded_width; | ||||
s->height = s->coded_height; | s->height = s->coded_height; | ||||
} | } | ||||
switch (h->sps.bit_depth_luma) { | |||||
switch (sps->bit_depth_luma) { | |||||
case 9: | case 9: | ||||
if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9; | if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9; | ||||
else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9; | else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9; | ||||
@@ -383,10 +397,10 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
s->format = AV_PIX_FMT_NONE; | s->format = AV_PIX_FMT_NONE; | ||||
} | } | ||||
avctx->profile = ff_h264_get_profile(&h->sps); | |||||
avctx->level = h->sps.level_idc; | |||||
avctx->profile = ff_h264_get_profile(sps); | |||||
avctx->level = sps->level_idc; | |||||
if (h->sps.frame_mbs_only_flag) { | |||||
if (sps->frame_mbs_only_flag) { | |||||
h->picture_structure = PICT_FRAME; | h->picture_structure = PICT_FRAME; | ||||
} else { | } else { | ||||
if (get_bits1(&nal.gb)) { // field_pic_flag | if (get_bits1(&nal.gb)) { // field_pic_flag | ||||
@@ -398,19 +412,19 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
if (h->nal_unit_type == NAL_IDR_SLICE) | if (h->nal_unit_type == NAL_IDR_SLICE) | ||||
get_ue_golomb_long(&nal.gb); /* idr_pic_id */ | get_ue_golomb_long(&nal.gb); /* idr_pic_id */ | ||||
if (h->sps.poc_type == 0) { | |||||
h->poc_lsb = get_bits(&nal.gb, h->sps.log2_max_poc_lsb); | |||||
if (sps->poc_type == 0) { | |||||
h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb); | |||||
if (h->pps.pic_order_present == 1 && | |||||
if (p->ps.pps->pic_order_present == 1 && | |||||
h->picture_structure == PICT_FRAME) | h->picture_structure == PICT_FRAME) | ||||
h->delta_poc_bottom = get_se_golomb(&nal.gb); | h->delta_poc_bottom = get_se_golomb(&nal.gb); | ||||
} | } | ||||
if (h->sps.poc_type == 1 && | |||||
!h->sps.delta_pic_order_always_zero_flag) { | |||||
if (sps->poc_type == 1 && | |||||
!sps->delta_pic_order_always_zero_flag) { | |||||
h->delta_poc[0] = get_se_golomb(&nal.gb); | h->delta_poc[0] = get_se_golomb(&nal.gb); | ||||
if (h->pps.pic_order_present == 1 && | |||||
if (p->ps.pps->pic_order_present == 1 && | |||||
h->picture_structure == PICT_FRAME) | h->picture_structure == PICT_FRAME) | ||||
h->delta_poc[1] = get_se_golomb(&nal.gb); | h->delta_poc[1] = get_se_golomb(&nal.gb); | ||||
} | } | ||||
@@ -444,7 +458,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
} | } | ||||
} | } | ||||
if (h->sps.pic_struct_present_flag) { | |||||
if (sps->pic_struct_present_flag) { | |||||
switch (h->sei_pic_struct) { | switch (h->sei_pic_struct) { | ||||
case SEI_PIC_STRUCT_TOP_FIELD: | case SEI_PIC_STRUCT_TOP_FIELD: | ||||
case SEI_PIC_STRUCT_BOTTOM_FIELD: | case SEI_PIC_STRUCT_BOTTOM_FIELD: | ||||
@@ -475,7 +489,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, | |||||
if (h->picture_structure == PICT_FRAME) { | if (h->picture_structure == PICT_FRAME) { | ||||
s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; | s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; | ||||
if (h->sps.pic_struct_present_flag) { | |||||
if (sps->pic_struct_present_flag) { | |||||
switch (h->sei_pic_struct) { | switch (h->sei_pic_struct) { | ||||
case SEI_PIC_STRUCT_TOP_BOTTOM: | case SEI_PIC_STRUCT_TOP_BOTTOM: | ||||
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: | case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: | ||||
@@ -533,6 +547,8 @@ static int h264_parse(AVCodecParserContext *s, | |||||
if (!p->got_first) { | if (!p->got_first) { | ||||
p->got_first = 1; | p->got_first = 1; | ||||
if (avctx->extradata_size) { | if (avctx->extradata_size) { | ||||
int i; | |||||
h->avctx = avctx; | h->avctx = avctx; | ||||
// must be done like in decoder, otherwise opening the parser, | // must be done like in decoder, otherwise opening the parser, | ||||
// letting it create extradata and then closing and opening again | // letting it create extradata and then closing and opening again | ||||
@@ -541,6 +557,25 @@ static int h264_parse(AVCodecParserContext *s, | |||||
if (!avctx->has_b_frames) | if (!avctx->has_b_frames) | ||||
h->low_delay = 1; | h->low_delay = 1; | ||||
ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size); | ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size); | ||||
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) { | |||||
av_buffer_unref(&p->ps.sps_list[i]); | |||||
if (h->ps.sps_list[i]) { | |||||
p->ps.sps_list[i] = av_buffer_ref(h->ps.sps_list[i]); | |||||
if (!p->ps.sps_list[i]) | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
} | |||||
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) { | |||||
av_buffer_unref(&p->ps.pps_list[i]); | |||||
if (h->ps.pps_list[i]) { | |||||
p->ps.pps_list[i] = av_buffer_ref(h->ps.pps_list[i]); | |||||
if (!p->ps.pps_list[i]) | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
} | |||||
p->ps.sps = h->ps.sps; | |||||
} | } | ||||
} | } | ||||
@@ -626,9 +661,16 @@ static void h264_close(AVCodecParserContext *s) | |||||
H264ParseContext *p = s->priv_data; | H264ParseContext *p = s->priv_data; | ||||
H264Context *h = &p->h; | H264Context *h = &p->h; | ||||
ParseContext *pc = &p->pc; | ParseContext *pc = &p->pc; | ||||
int i; | |||||
av_freep(&pc->buffer); | av_freep(&pc->buffer); | ||||
ff_h264_free_context(h); | ff_h264_free_context(h); | ||||
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) | |||||
av_buffer_unref(&p->ps.sps_list[i]); | |||||
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) | |||||
av_buffer_unref(&p->ps.pps_list[i]); | |||||
} | } | ||||
static av_cold int init(AVCodecParserContext *s) | static av_cold int init(AVCodecParserContext *s) | ||||
@@ -84,47 +84,68 @@ static const int level_max_dpb_mbs[][2] = { | |||||
{ 52, 184320 }, | { 52, 184320 }, | ||||
}; | }; | ||||
static inline int decode_hrd_parameters(H264Context *h, SPS *sps) | |||||
static void remove_pps(H264ParamSets *s, int id) | |||||
{ | |||||
av_buffer_unref(&s->pps_list[id]); | |||||
} | |||||
static void remove_sps(H264ParamSets *s, int id) | |||||
{ | |||||
#if 0 | |||||
int i; | |||||
if (s->sps_list[id]) { | |||||
/* drop all PPS that depend on this SPS */ | |||||
for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) | |||||
if (s->pps_list[i] && ((PPS*)s->pps_list[i]->data)->sps_id == id) | |||||
remove_pps(s, i); | |||||
} | |||||
#endif | |||||
av_buffer_unref(&s->sps_list[id]); | |||||
} | |||||
static inline int decode_hrd_parameters(GetBitContext *gb, AVCodecContext *avctx, | |||||
SPS *sps) | |||||
{ | { | ||||
int cpb_count, i; | int cpb_count, i; | ||||
cpb_count = get_ue_golomb_31(&h->gb) + 1; | |||||
cpb_count = get_ue_golomb_31(gb) + 1; | |||||
if (cpb_count > 32U) { | if (cpb_count > 32U) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); | |||||
av_log(avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
get_bits(&h->gb, 4); /* bit_rate_scale */ | |||||
get_bits(&h->gb, 4); /* cpb_size_scale */ | |||||
get_bits(gb, 4); /* bit_rate_scale */ | |||||
get_bits(gb, 4); /* cpb_size_scale */ | |||||
for (i = 0; i < cpb_count; i++) { | for (i = 0; i < cpb_count; i++) { | ||||
get_ue_golomb_long(&h->gb); /* bit_rate_value_minus1 */ | |||||
get_ue_golomb_long(&h->gb); /* cpb_size_value_minus1 */ | |||||
get_bits1(&h->gb); /* cbr_flag */ | |||||
} | |||||
sps->initial_cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; | |||||
sps->cpb_removal_delay_length = get_bits(&h->gb, 5) + 1; | |||||
sps->dpb_output_delay_length = get_bits(&h->gb, 5) + 1; | |||||
sps->time_offset_length = get_bits(&h->gb, 5); | |||||
get_ue_golomb_long(gb); /* bit_rate_value_minus1 */ | |||||
get_ue_golomb_long(gb); /* cpb_size_value_minus1 */ | |||||
get_bits1(gb); /* cbr_flag */ | |||||
} | |||||
sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1; | |||||
sps->cpb_removal_delay_length = get_bits(gb, 5) + 1; | |||||
sps->dpb_output_delay_length = get_bits(gb, 5) + 1; | |||||
sps->time_offset_length = get_bits(gb, 5); | |||||
sps->cpb_cnt = cpb_count; | sps->cpb_cnt = cpb_count; | ||||
return 0; | return 0; | ||||
} | } | ||||
static inline int decode_vui_parameters(H264Context *h, SPS *sps) | |||||
static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx, | |||||
SPS *sps) | |||||
{ | { | ||||
int aspect_ratio_info_present_flag; | int aspect_ratio_info_present_flag; | ||||
unsigned int aspect_ratio_idc; | unsigned int aspect_ratio_idc; | ||||
aspect_ratio_info_present_flag = get_bits1(&h->gb); | |||||
aspect_ratio_info_present_flag = get_bits1(gb); | |||||
if (aspect_ratio_info_present_flag) { | if (aspect_ratio_info_present_flag) { | ||||
aspect_ratio_idc = get_bits(&h->gb, 8); | |||||
aspect_ratio_idc = get_bits(gb, 8); | |||||
if (aspect_ratio_idc == EXTENDED_SAR) { | if (aspect_ratio_idc == EXTENDED_SAR) { | ||||
sps->sar.num = get_bits(&h->gb, 16); | |||||
sps->sar.den = get_bits(&h->gb, 16); | |||||
sps->sar.num = get_bits(gb, 16); | |||||
sps->sar.den = get_bits(gb, 16); | |||||
} else if (aspect_ratio_idc < FF_ARRAY_ELEMS(ff_h264_pixel_aspect)) { | } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(ff_h264_pixel_aspect)) { | ||||
sps->sar = ff_h264_pixel_aspect[aspect_ratio_idc]; | sps->sar = ff_h264_pixel_aspect[aspect_ratio_idc]; | ||||
} else { | } else { | ||||
av_log(h->avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); | |||||
av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
} else { | } else { | ||||
@@ -132,19 +153,19 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) | |||||
sps->sar.den = 0; | sps->sar.den = 0; | ||||
} | } | ||||
if (get_bits1(&h->gb)) /* overscan_info_present_flag */ | |||||
get_bits1(&h->gb); /* overscan_appropriate_flag */ | |||||
if (get_bits1(gb)) /* overscan_info_present_flag */ | |||||
get_bits1(gb); /* overscan_appropriate_flag */ | |||||
sps->video_signal_type_present_flag = get_bits1(&h->gb); | |||||
sps->video_signal_type_present_flag = get_bits1(gb); | |||||
if (sps->video_signal_type_present_flag) { | if (sps->video_signal_type_present_flag) { | ||||
get_bits(&h->gb, 3); /* video_format */ | |||||
sps->full_range = get_bits1(&h->gb); /* video_full_range_flag */ | |||||
get_bits(gb, 3); /* video_format */ | |||||
sps->full_range = get_bits1(gb); /* video_full_range_flag */ | |||||
sps->colour_description_present_flag = get_bits1(&h->gb); | |||||
sps->colour_description_present_flag = get_bits1(gb); | |||||
if (sps->colour_description_present_flag) { | if (sps->colour_description_present_flag) { | ||||
sps->color_primaries = get_bits(&h->gb, 8); /* colour_primaries */ | |||||
sps->color_trc = get_bits(&h->gb, 8); /* transfer_characteristics */ | |||||
sps->colorspace = get_bits(&h->gb, 8); /* matrix_coefficients */ | |||||
sps->color_primaries = get_bits(gb, 8); /* colour_primaries */ | |||||
sps->color_trc = get_bits(gb, 8); /* transfer_characteristics */ | |||||
sps->colorspace = get_bits(gb, 8); /* matrix_coefficients */ | |||||
if (sps->color_primaries >= AVCOL_PRI_NB) | if (sps->color_primaries >= AVCOL_PRI_NB) | ||||
sps->color_primaries = AVCOL_PRI_UNSPECIFIED; | sps->color_primaries = AVCOL_PRI_UNSPECIFIED; | ||||
if (sps->color_trc >= AVCOL_TRC_NB) | if (sps->color_trc >= AVCOL_TRC_NB) | ||||
@@ -155,23 +176,23 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) | |||||
} | } | ||||
/* chroma_location_info_present_flag */ | /* chroma_location_info_present_flag */ | ||||
if (get_bits1(&h->gb)) { | |||||
if (get_bits1(gb)) { | |||||
/* chroma_sample_location_type_top_field */ | /* chroma_sample_location_type_top_field */ | ||||
h->avctx->chroma_sample_location = get_ue_golomb(&h->gb) + 1; | |||||
get_ue_golomb(&h->gb); /* chroma_sample_location_type_bottom_field */ | |||||
avctx->chroma_sample_location = get_ue_golomb(gb) + 1; | |||||
get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */ | |||||
} | } | ||||
if (show_bits1(&h->gb) && get_bits_left(&h->gb) < 10) { | |||||
av_log(h->avctx, AV_LOG_WARNING, "Truncated VUI\n"); | |||||
if (show_bits1(gb) && get_bits_left(gb) < 10) { | |||||
av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n"); | |||||
return 0; | return 0; | ||||
} | } | ||||
sps->timing_info_present_flag = get_bits1(&h->gb); | |||||
sps->timing_info_present_flag = get_bits1(gb); | |||||
if (sps->timing_info_present_flag) { | if (sps->timing_info_present_flag) { | ||||
unsigned num_units_in_tick = get_bits_long(&h->gb, 32); | |||||
unsigned time_scale = get_bits_long(&h->gb, 32); | |||||
unsigned num_units_in_tick = get_bits_long(gb, 32); | |||||
unsigned time_scale = get_bits_long(gb, 32); | |||||
if (!num_units_in_tick || !time_scale) { | if (!num_units_in_tick || !time_scale) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n", | "time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n", | ||||
time_scale, num_units_in_tick); | time_scale, num_units_in_tick); | ||||
sps->timing_info_present_flag = 0; | sps->timing_info_present_flag = 0; | ||||
@@ -179,41 +200,41 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) | |||||
sps->num_units_in_tick = num_units_in_tick; | sps->num_units_in_tick = num_units_in_tick; | ||||
sps->time_scale = time_scale; | sps->time_scale = time_scale; | ||||
} | } | ||||
sps->fixed_frame_rate_flag = get_bits1(&h->gb); | |||||
sps->fixed_frame_rate_flag = get_bits1(gb); | |||||
} | } | ||||
sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb); | |||||
sps->nal_hrd_parameters_present_flag = get_bits1(gb); | |||||
if (sps->nal_hrd_parameters_present_flag) | if (sps->nal_hrd_parameters_present_flag) | ||||
if (decode_hrd_parameters(h, sps) < 0) | |||||
if (decode_hrd_parameters(gb, avctx, sps) < 0) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb); | |||||
sps->vcl_hrd_parameters_present_flag = get_bits1(gb); | |||||
if (sps->vcl_hrd_parameters_present_flag) | if (sps->vcl_hrd_parameters_present_flag) | ||||
if (decode_hrd_parameters(h, sps) < 0) | |||||
if (decode_hrd_parameters(gb, avctx, sps) < 0) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
if (sps->nal_hrd_parameters_present_flag || | if (sps->nal_hrd_parameters_present_flag || | ||||
sps->vcl_hrd_parameters_present_flag) | sps->vcl_hrd_parameters_present_flag) | ||||
get_bits1(&h->gb); /* low_delay_hrd_flag */ | |||||
sps->pic_struct_present_flag = get_bits1(&h->gb); | |||||
if (!get_bits_left(&h->gb)) | |||||
get_bits1(gb); /* low_delay_hrd_flag */ | |||||
sps->pic_struct_present_flag = get_bits1(gb); | |||||
if (!get_bits_left(gb)) | |||||
return 0; | return 0; | ||||
sps->bitstream_restriction_flag = get_bits1(&h->gb); | |||||
sps->bitstream_restriction_flag = get_bits1(gb); | |||||
if (sps->bitstream_restriction_flag) { | if (sps->bitstream_restriction_flag) { | ||||
get_bits1(&h->gb); /* motion_vectors_over_pic_boundaries_flag */ | |||||
get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */ | |||||
get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */ | |||||
get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */ | |||||
get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */ | |||||
sps->num_reorder_frames = get_ue_golomb(&h->gb); | |||||
get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/ | |||||
if (get_bits_left(&h->gb) < 0) { | |||||
get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */ | |||||
get_ue_golomb(gb); /* max_bytes_per_pic_denom */ | |||||
get_ue_golomb(gb); /* max_bits_per_mb_denom */ | |||||
get_ue_golomb(gb); /* log2_max_mv_length_horizontal */ | |||||
get_ue_golomb(gb); /* log2_max_mv_length_vertical */ | |||||
sps->num_reorder_frames = get_ue_golomb(gb); | |||||
get_ue_golomb(gb); /*max_dec_frame_buffering*/ | |||||
if (get_bits_left(gb) < 0) { | |||||
sps->num_reorder_frames = 0; | sps->num_reorder_frames = 0; | ||||
sps->bitstream_restriction_flag = 0; | sps->bitstream_restriction_flag = 0; | ||||
} | } | ||||
if (sps->num_reorder_frames > 16U | if (sps->num_reorder_frames > 16U | ||||
/* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { | /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"Clipping illegal num_reorder_frames %d\n", | "Clipping illegal num_reorder_frames %d\n", | ||||
sps->num_reorder_frames); | sps->num_reorder_frames); | ||||
sps->num_reorder_frames = 16; | sps->num_reorder_frames = 16; | ||||
@@ -224,18 +245,18 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps) | |||||
return 0; | return 0; | ||||
} | } | ||||
static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, | |||||
static void decode_scaling_list(GetBitContext *gb, uint8_t *factors, int size, | |||||
const uint8_t *jvt_list, | const uint8_t *jvt_list, | ||||
const uint8_t *fallback_list) | const uint8_t *fallback_list) | ||||
{ | { | ||||
int i, last = 8, next = 8; | int i, last = 8, next = 8; | ||||
const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; | const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; | ||||
if (!get_bits1(&h->gb)) /* matrix not written, we use the predicted one */ | |||||
if (!get_bits1(gb)) /* matrix not written, we use the predicted one */ | |||||
memcpy(factors, fallback_list, size * sizeof(uint8_t)); | memcpy(factors, fallback_list, size * sizeof(uint8_t)); | ||||
else | else | ||||
for (i = 0; i < size; i++) { | for (i = 0; i < size; i++) { | ||||
if (next) | if (next) | ||||
next = (last + get_se_golomb(&h->gb)) & 0xff; | |||||
next = (last + get_se_golomb(gb)) & 0xff; | |||||
if (!i && !next) { /* matrix not written, we use the preset one */ | if (!i && !next) { /* matrix not written, we use the preset one */ | ||||
memcpy(factors, jvt_list, size * sizeof(uint8_t)); | memcpy(factors, jvt_list, size * sizeof(uint8_t)); | ||||
break; | break; | ||||
@@ -244,7 +265,7 @@ static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, | |||||
} | } | ||||
} | } | ||||
static void decode_scaling_matrices(H264Context *h, SPS *sps, | |||||
static void decode_scaling_matrices(GetBitContext *gb, SPS *sps, | |||||
PPS *pps, int is_sps, | PPS *pps, int is_sps, | ||||
uint8_t(*scaling_matrix4)[16], | uint8_t(*scaling_matrix4)[16], | ||||
uint8_t(*scaling_matrix8)[64]) | uint8_t(*scaling_matrix8)[64]) | ||||
@@ -256,58 +277,61 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps, | |||||
fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], | fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], | ||||
fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] | fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] | ||||
}; | }; | ||||
if (get_bits1(&h->gb)) { | |||||
if (get_bits1(gb)) { | |||||
sps->scaling_matrix_present |= is_sps; | sps->scaling_matrix_present |= is_sps; | ||||
decode_scaling_list(h, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y | |||||
decode_scaling_list(h, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr | |||||
decode_scaling_list(h, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb | |||||
decode_scaling_list(h, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y | |||||
decode_scaling_list(h, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr | |||||
decode_scaling_list(h, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb | |||||
decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y | |||||
decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr | |||||
decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb | |||||
decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y | |||||
decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr | |||||
decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb | |||||
if (is_sps || pps->transform_8x8_mode) { | if (is_sps || pps->transform_8x8_mode) { | ||||
decode_scaling_list(h, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y | |||||
decode_scaling_list(h, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y | |||||
decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y | |||||
decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y | |||||
if (sps->chroma_format_idc == 3) { | if (sps->chroma_format_idc == 3) { | ||||
decode_scaling_list(h, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr | |||||
decode_scaling_list(h, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr | |||||
decode_scaling_list(h, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb | |||||
decode_scaling_list(h, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb | |||||
decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr | |||||
decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr | |||||
decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb | |||||
decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb | |||||
} | } | ||||
} | } | ||||
} | } | ||||
} | } | ||||
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, | |||||
H264ParamSets *ps, int ignore_truncation) | |||||
{ | { | ||||
AVBufferRef *sps_buf; | |||||
int profile_idc, level_idc, constraint_set_flags = 0; | int profile_idc, level_idc, constraint_set_flags = 0; | ||||
unsigned int sps_id; | unsigned int sps_id; | ||||
int i, log2_max_frame_num_minus4; | int i, log2_max_frame_num_minus4; | ||||
SPS *sps; | SPS *sps; | ||||
sps = av_mallocz(sizeof(SPS)); | |||||
if (!sps) | |||||
sps_buf = av_buffer_allocz(sizeof(*sps)); | |||||
if (!sps_buf) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
sps = (SPS*)sps_buf->data; | |||||
sps->data_size = h->gb.buffer_end - h->gb.buffer; | |||||
sps->data_size = gb->buffer_end - gb->buffer; | |||||
if (sps->data_size > sizeof(sps->data)) { | if (sps->data_size > sizeof(sps->data)) { | ||||
av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n"); | |||||
av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n"); | |||||
sps->data_size = sizeof(sps->data); | sps->data_size = sizeof(sps->data); | ||||
} | } | ||||
memcpy(sps->data, h->gb.buffer, sps->data_size); | |||||
profile_idc = get_bits(&h->gb, 8); | |||||
constraint_set_flags |= get_bits1(&h->gb) << 0; // constraint_set0_flag | |||||
constraint_set_flags |= get_bits1(&h->gb) << 1; // constraint_set1_flag | |||||
constraint_set_flags |= get_bits1(&h->gb) << 2; // constraint_set2_flag | |||||
constraint_set_flags |= get_bits1(&h->gb) << 3; // constraint_set3_flag | |||||
constraint_set_flags |= get_bits1(&h->gb) << 4; // constraint_set4_flag | |||||
constraint_set_flags |= get_bits1(&h->gb) << 5; // constraint_set5_flag | |||||
skip_bits(&h->gb, 2); // reserved_zero_2bits | |||||
level_idc = get_bits(&h->gb, 8); | |||||
sps_id = get_ue_golomb_31(&h->gb); | |||||
memcpy(sps->data, gb->buffer, sps->data_size); | |||||
profile_idc = get_bits(gb, 8); | |||||
constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag | |||||
constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag | |||||
constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag | |||||
constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag | |||||
constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag | |||||
constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag | |||||
skip_bits(gb, 2); // reserved_zero_2bits | |||||
level_idc = get_bits(gb, 8); | |||||
sps_id = get_ue_golomb_31(gb); | |||||
if (sps_id >= MAX_SPS_COUNT) { | if (sps_id >= MAX_SPS_COUNT) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id); | |||||
av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -334,33 +358,33 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
sps->profile_idc == 128 || // Multiview High profile (MVC) | sps->profile_idc == 128 || // Multiview High profile (MVC) | ||||
sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) | sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) | ||||
sps->profile_idc == 144) { // old High444 profile | sps->profile_idc == 144) { // old High444 profile | ||||
sps->chroma_format_idc = get_ue_golomb_31(&h->gb); | |||||
sps->chroma_format_idc = get_ue_golomb_31(gb); | |||||
if (sps->chroma_format_idc > 3U) { | if (sps->chroma_format_idc > 3U) { | ||||
avpriv_request_sample(h->avctx, "chroma_format_idc %u", | |||||
avpriv_request_sample(avctx, "chroma_format_idc %u", | |||||
sps->chroma_format_idc); | sps->chroma_format_idc); | ||||
goto fail; | goto fail; | ||||
} else if (sps->chroma_format_idc == 3) { | } else if (sps->chroma_format_idc == 3) { | ||||
sps->residual_color_transform_flag = get_bits1(&h->gb); | |||||
sps->residual_color_transform_flag = get_bits1(gb); | |||||
if (sps->residual_color_transform_flag) { | if (sps->residual_color_transform_flag) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "separate color planes are not supported\n"); | |||||
av_log(avctx, AV_LOG_ERROR, "separate color planes are not supported\n"); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
} | } | ||||
sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8; | |||||
sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8; | |||||
sps->bit_depth_luma = get_ue_golomb(gb) + 8; | |||||
sps->bit_depth_chroma = get_ue_golomb(gb) + 8; | |||||
if (sps->bit_depth_chroma != sps->bit_depth_luma) { | if (sps->bit_depth_chroma != sps->bit_depth_luma) { | ||||
avpriv_request_sample(h->avctx, | |||||
avpriv_request_sample(avctx, | |||||
"Different chroma and luma bit depth"); | "Different chroma and luma bit depth"); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 || | if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 || | ||||
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) { | sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n", | |||||
av_log(avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n", | |||||
sps->bit_depth_luma, sps->bit_depth_chroma); | sps->bit_depth_luma, sps->bit_depth_chroma); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->transform_bypass = get_bits1(&h->gb); | |||||
decode_scaling_matrices(h, sps, NULL, 1, | |||||
sps->transform_bypass = get_bits1(gb); | |||||
decode_scaling_matrices(gb, sps, NULL, 1, | |||||
sps->scaling_matrix4, sps->scaling_matrix8); | sps->scaling_matrix4, sps->scaling_matrix8); | ||||
} else { | } else { | ||||
sps->chroma_format_idc = 1; | sps->chroma_format_idc = 1; | ||||
@@ -368,89 +392,89 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
sps->bit_depth_chroma = 8; | sps->bit_depth_chroma = 8; | ||||
} | } | ||||
log2_max_frame_num_minus4 = get_ue_golomb(&h->gb); | |||||
log2_max_frame_num_minus4 = get_ue_golomb(gb); | |||||
if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || | if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || | ||||
log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { | log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"log2_max_frame_num_minus4 out of range (0-12): %d\n", | "log2_max_frame_num_minus4 out of range (0-12): %d\n", | ||||
log2_max_frame_num_minus4); | log2_max_frame_num_minus4); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; | sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; | ||||
sps->poc_type = get_ue_golomb_31(&h->gb); | |||||
sps->poc_type = get_ue_golomb_31(gb); | |||||
if (sps->poc_type == 0) { // FIXME #define | if (sps->poc_type == 0) { // FIXME #define | ||||
unsigned t = get_ue_golomb(&h->gb); | |||||
unsigned t = get_ue_golomb(gb); | |||||
if (t>12) { | if (t>12) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t); | |||||
av_log(avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->log2_max_poc_lsb = t + 4; | sps->log2_max_poc_lsb = t + 4; | ||||
} else if (sps->poc_type == 1) { // FIXME #define | } else if (sps->poc_type == 1) { // FIXME #define | ||||
sps->delta_pic_order_always_zero_flag = get_bits1(&h->gb); | |||||
sps->offset_for_non_ref_pic = get_se_golomb(&h->gb); | |||||
sps->offset_for_top_to_bottom_field = get_se_golomb(&h->gb); | |||||
sps->poc_cycle_length = get_ue_golomb(&h->gb); | |||||
sps->delta_pic_order_always_zero_flag = get_bits1(gb); | |||||
sps->offset_for_non_ref_pic = get_se_golomb(gb); | |||||
sps->offset_for_top_to_bottom_field = get_se_golomb(gb); | |||||
sps->poc_cycle_length = get_ue_golomb(gb); | |||||
if ((unsigned)sps->poc_cycle_length >= | if ((unsigned)sps->poc_cycle_length >= | ||||
FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) { | FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"poc_cycle_length overflow %d\n", sps->poc_cycle_length); | "poc_cycle_length overflow %d\n", sps->poc_cycle_length); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (i = 0; i < sps->poc_cycle_length; i++) | for (i = 0; i < sps->poc_cycle_length; i++) | ||||
sps->offset_for_ref_frame[i] = get_se_golomb(&h->gb); | |||||
sps->offset_for_ref_frame[i] = get_se_golomb(gb); | |||||
} else if (sps->poc_type != 2) { | } else if (sps->poc_type != 2) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); | |||||
av_log(avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->ref_frame_count = get_ue_golomb_31(&h->gb); | |||||
if (h->avctx->codec_tag == MKTAG('S', 'M', 'V', '2')) | |||||
sps->ref_frame_count = get_ue_golomb_31(gb); | |||||
if (avctx->codec_tag == MKTAG('S', 'M', 'V', '2')) | |||||
sps->ref_frame_count = FFMAX(2, sps->ref_frame_count); | sps->ref_frame_count = FFMAX(2, sps->ref_frame_count); | ||||
if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 || | if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 || | ||||
sps->ref_frame_count > 16U) { | sps->ref_frame_count > 16U) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"too many reference frames %d\n", sps->ref_frame_count); | "too many reference frames %d\n", sps->ref_frame_count); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->gaps_in_frame_num_allowed_flag = get_bits1(&h->gb); | |||||
sps->mb_width = get_ue_golomb(&h->gb) + 1; | |||||
sps->mb_height = get_ue_golomb(&h->gb) + 1; | |||||
sps->gaps_in_frame_num_allowed_flag = get_bits1(gb); | |||||
sps->mb_width = get_ue_golomb(gb) + 1; | |||||
sps->mb_height = get_ue_golomb(gb) + 1; | |||||
if ((unsigned)sps->mb_width >= INT_MAX / 16 || | if ((unsigned)sps->mb_width >= INT_MAX / 16 || | ||||
(unsigned)sps->mb_height >= INT_MAX / 16 || | (unsigned)sps->mb_height >= INT_MAX / 16 || | ||||
av_image_check_size(16 * sps->mb_width, | av_image_check_size(16 * sps->mb_width, | ||||
16 * sps->mb_height, 0, h->avctx)) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); | |||||
16 * sps->mb_height, 0, avctx)) { | |||||
av_log(avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
sps->frame_mbs_only_flag = get_bits1(&h->gb); | |||||
sps->frame_mbs_only_flag = get_bits1(gb); | |||||
if (!sps->frame_mbs_only_flag) | if (!sps->frame_mbs_only_flag) | ||||
sps->mb_aff = get_bits1(&h->gb); | |||||
sps->mb_aff = get_bits1(gb); | |||||
else | else | ||||
sps->mb_aff = 0; | sps->mb_aff = 0; | ||||
sps->direct_8x8_inference_flag = get_bits1(&h->gb); | |||||
sps->direct_8x8_inference_flag = get_bits1(gb); | |||||
#ifndef ALLOW_INTERLACE | #ifndef ALLOW_INTERLACE | ||||
if (sps->mb_aff) | if (sps->mb_aff) | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"MBAFF support not included; enable it at compile-time.\n"); | "MBAFF support not included; enable it at compile-time.\n"); | ||||
#endif | #endif | ||||
sps->crop = get_bits1(&h->gb); | |||||
sps->crop = get_bits1(gb); | |||||
if (sps->crop) { | if (sps->crop) { | ||||
unsigned int crop_left = get_ue_golomb(&h->gb); | |||||
unsigned int crop_right = get_ue_golomb(&h->gb); | |||||
unsigned int crop_top = get_ue_golomb(&h->gb); | |||||
unsigned int crop_bottom = get_ue_golomb(&h->gb); | |||||
unsigned int crop_left = get_ue_golomb(gb); | |||||
unsigned int crop_right = get_ue_golomb(gb); | |||||
unsigned int crop_top = get_ue_golomb(gb); | |||||
unsigned int crop_bottom = get_ue_golomb(gb); | |||||
int width = 16 * sps->mb_width; | int width = 16 * sps->mb_width; | ||||
int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag); | int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag); | ||||
if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { | |||||
av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original " | |||||
if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { | |||||
av_log(avctx, AV_LOG_DEBUG, "discarding sps cropping, original " | |||||
"values are l:%d r:%d t:%d b:%d\n", | "values are l:%d r:%d t:%d b:%d\n", | ||||
crop_left, crop_right, crop_top, crop_bottom); | crop_left, crop_right, crop_top, crop_bottom); | ||||
@@ -466,9 +490,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
int step_y = (2 - sps->frame_mbs_only_flag) << vsub; | int step_y = (2 - sps->frame_mbs_only_flag) << vsub; | ||||
if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && | if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && | ||||
!(h->avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { | |||||
!(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { | |||||
crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); | crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); | ||||
av_log(h->avctx, AV_LOG_WARNING, | |||||
av_log(avctx, AV_LOG_WARNING, | |||||
"Reducing left cropping to %d " | "Reducing left cropping to %d " | ||||
"chroma samples to preserve alignment.\n", | "chroma samples to preserve alignment.\n", | ||||
crop_left); | crop_left); | ||||
@@ -481,7 +505,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
(crop_left + crop_right ) * step_x >= width || | (crop_left + crop_right ) * step_x >= width || | ||||
(crop_top + crop_bottom) * step_y >= height | (crop_top + crop_bottom) * step_y >= height | ||||
) { | ) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height); | |||||
av_log(avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height); | |||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -498,16 +522,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
sps->crop = 0; | sps->crop = 0; | ||||
} | } | ||||
sps->vui_parameters_present_flag = get_bits1(&h->gb); | |||||
sps->vui_parameters_present_flag = get_bits1(gb); | |||||
if (sps->vui_parameters_present_flag) { | if (sps->vui_parameters_present_flag) { | ||||
int ret = decode_vui_parameters(h, sps); | |||||
int ret = decode_vui_parameters(gb, avctx, sps); | |||||
if (ret < 0) | if (ret < 0) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
if (get_bits_left(&h->gb) < 0) { | |||||
av_log(h->avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR, | |||||
"Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(&h->gb)); | |||||
if (get_bits_left(gb) < 0) { | |||||
av_log(avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR, | |||||
"Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb)); | |||||
if (!ignore_truncation) | if (!ignore_truncation) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -528,9 +552,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
if (!sps->sar.den) | if (!sps->sar.den) | ||||
sps->sar.den = 1; | sps->sar.den = 1; | ||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { | |||||
if (avctx->debug & FF_DEBUG_PICT_INFO) { | |||||
static const char csp[4][5] = { "Gray", "420", "422", "444" }; | static const char csp[4][5] = { "Gray", "420", "422", "444" }; | ||||
av_log(h->avctx, AV_LOG_DEBUG, | |||||
av_log(avctx, AV_LOG_DEBUG, | |||||
"sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32" b%d reo:%d\n", | "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32" b%d reo:%d\n", | ||||
sps_id, sps->profile_idc, sps->level_idc, | sps_id, sps->profile_idc, sps->level_idc, | ||||
sps->poc_type, | sps->poc_type, | ||||
@@ -548,18 +572,97 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation) | |||||
sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1 | sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1 | ||||
); | ); | ||||
} | } | ||||
sps->new = 1; | |||||
av_free(h->sps_buffers[sps_id]); | |||||
h->sps_buffers[sps_id] = sps; | |||||
/* check if this is a repeat of an already parsed SPS, then keep the | |||||
* original one. | |||||
* otherwise drop all PPSes that depend on it */ | |||||
if (ps->sps_list[sps_id] && | |||||
!memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) { | |||||
av_buffer_unref(&sps_buf); | |||||
} else { | |||||
remove_sps(ps, sps_id); | |||||
ps->sps_list[sps_id] = sps_buf; | |||||
} | |||||
return 0; | return 0; | ||||
fail: | fail: | ||||
av_free(sps); | |||||
av_buffer_unref(&sps_buf); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
static void init_dequant8_coeff_table(PPS *pps, const SPS *sps) | |||||
{ | |||||
int i, j, q, x; | |||||
const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8); | |||||
for (i = 0; i < 6; i++) { | |||||
pps->dequant8_coeff[i] = pps->dequant8_buffer[i]; | |||||
for (j = 0; j < i; j++) | |||||
if (!memcmp(pps->scaling_matrix8[j], pps->scaling_matrix8[i], | |||||
64 * sizeof(uint8_t))) { | |||||
pps->dequant8_coeff[i] = pps->dequant8_buffer[j]; | |||||
break; | |||||
} | |||||
if (j < i) | |||||
continue; | |||||
for (q = 0; q < max_qp + 1; q++) { | |||||
int shift = ff_h264_quant_div6[q]; | |||||
int idx = ff_h264_quant_rem6[q]; | |||||
for (x = 0; x < 64; x++) | |||||
pps->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] = | |||||
((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] * | |||||
pps->scaling_matrix8[i][x]) << shift; | |||||
} | |||||
} | |||||
} | |||||
static void init_dequant4_coeff_table(PPS *pps, const SPS *sps) | |||||
{ | |||||
int i, j, q, x; | |||||
const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8); | |||||
for (i = 0; i < 6; i++) { | |||||
pps->dequant4_coeff[i] = pps->dequant4_buffer[i]; | |||||
for (j = 0; j < i; j++) | |||||
if (!memcmp(pps->scaling_matrix4[j], pps->scaling_matrix4[i], | |||||
16 * sizeof(uint8_t))) { | |||||
pps->dequant4_coeff[i] = pps->dequant4_buffer[j]; | |||||
break; | |||||
} | |||||
if (j < i) | |||||
continue; | |||||
for (q = 0; q < max_qp + 1; q++) { | |||||
int shift = ff_h264_quant_div6[q] + 2; | |||||
int idx = ff_h264_quant_rem6[q]; | |||||
for (x = 0; x < 16; x++) | |||||
pps->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] = | |||||
((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * | |||||
pps->scaling_matrix4[i][x]) << shift; | |||||
} | |||||
} | |||||
} | |||||
static void init_dequant_tables(PPS *pps, const SPS *sps) | |||||
{ | |||||
int i, x; | |||||
init_dequant4_coeff_table(pps, sps); | |||||
memset(pps->dequant8_coeff, 0, sizeof(pps->dequant8_coeff)); | |||||
if (pps->transform_8x8_mode) | |||||
init_dequant8_coeff_table(pps, sps); | |||||
if (sps->transform_bypass) { | |||||
for (i = 0; i < 6; i++) | |||||
for (x = 0; x < 16; x++) | |||||
pps->dequant4_coeff[i][0][x] = 1 << 6; | |||||
if (pps->transform_8x8_mode) | |||||
for (i = 0; i < 6; i++) | |||||
for (x = 0; x < 64; x++) | |||||
pps->dequant8_coeff[i][0][x] = 1 << 6; | |||||
} | |||||
} | |||||
static void build_qp_table(PPS *pps, int t, int index, const int depth) | static void build_qp_table(PPS *pps, int t, int index, const int depth) | ||||
{ | { | ||||
int i; | int i; | ||||
@@ -569,14 +672,13 @@ static void build_qp_table(PPS *pps, int t, int index, const int depth) | |||||
ff_h264_chroma_qp[depth - 8][av_clip(i + index, 0, max_qp)]; | ff_h264_chroma_qp[depth - 8][av_clip(i + index, 0, max_qp)]; | ||||
} | } | ||||
static int more_rbsp_data_in_pps(H264Context *h, PPS *pps) | |||||
static int more_rbsp_data_in_pps(const SPS *sps, void *logctx) | |||||
{ | { | ||||
const SPS *sps = h->sps_buffers[pps->sps_id]; | |||||
int profile_idc = sps->profile_idc; | int profile_idc = sps->profile_idc; | ||||
if ((profile_idc == 66 || profile_idc == 77 || | if ((profile_idc == 66 || profile_idc == 77 || | ||||
profile_idc == 88) && (sps->constraint_set_flags & 7)) { | profile_idc == 88) && (sps->constraint_set_flags & 7)) { | ||||
av_log(h->avctx, AV_LOG_VERBOSE, | |||||
av_log(logctx, AV_LOG_VERBOSE, | |||||
"Current profile doesn't provide more RBSP data in PPS, skipping\n"); | "Current profile doesn't provide more RBSP data in PPS, skipping\n"); | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -584,57 +686,62 @@ static int more_rbsp_data_in_pps(H264Context *h, PPS *pps) | |||||
return 1; | return 1; | ||||
} | } | ||||
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) | |||||
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, | |||||
H264ParamSets *ps, int bit_length) | |||||
{ | { | ||||
const SPS *sps; | |||||
unsigned int pps_id = get_ue_golomb(&h->gb); | |||||
AVBufferRef *pps_buf; | |||||
SPS *sps; | |||||
unsigned int pps_id = get_ue_golomb(gb); | |||||
PPS *pps; | PPS *pps; | ||||
int qp_bd_offset; | int qp_bd_offset; | ||||
int bits_left; | int bits_left; | ||||
int ret; | int ret; | ||||
if (pps_id >= MAX_PPS_COUNT) { | if (pps_id >= MAX_PPS_COUNT) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); | |||||
av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
pps = av_mallocz(sizeof(PPS)); | |||||
if (!pps) | |||||
pps_buf = av_buffer_allocz(sizeof(*pps)); | |||||
if (!pps_buf) | |||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
pps->data_size = h->gb.buffer_end - h->gb.buffer; | |||||
pps = (PPS*)pps_buf->data; | |||||
pps->data_size = gb->buffer_end - gb->buffer; | |||||
if (pps->data_size > sizeof(pps->data)) { | if (pps->data_size > sizeof(pps->data)) { | ||||
av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n"); | |||||
av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n"); | |||||
pps->data_size = sizeof(pps->data); | pps->data_size = sizeof(pps->data); | ||||
} | } | ||||
memcpy(pps->data, h->gb.buffer, pps->data_size); | |||||
pps->sps_id = get_ue_golomb_31(&h->gb); | |||||
memcpy(pps->data, gb->buffer, pps->data_size); | |||||
pps->sps_id = get_ue_golomb_31(gb); | |||||
if ((unsigned)pps->sps_id >= MAX_SPS_COUNT || | if ((unsigned)pps->sps_id >= MAX_SPS_COUNT || | ||||
!h->sps_buffers[pps->sps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id); | |||||
!ps->sps_list[pps->sps_id]) { | |||||
av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id); | |||||
ret = AVERROR_INVALIDDATA; | ret = AVERROR_INVALIDDATA; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
sps = h->sps_buffers[pps->sps_id]; | |||||
sps = (SPS*)ps->sps_list[pps->sps_id]->data; | |||||
if (sps->bit_depth_luma > 14) { | if (sps->bit_depth_luma > 14) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"Invalid luma bit depth=%d\n", | "Invalid luma bit depth=%d\n", | ||||
sps->bit_depth_luma); | sps->bit_depth_luma); | ||||
ret = AVERROR_INVALIDDATA; | ret = AVERROR_INVALIDDATA; | ||||
goto fail; | goto fail; | ||||
} else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) { | } else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | |||||
av_log(avctx, AV_LOG_ERROR, | |||||
"Unimplemented luma bit depth=%d\n", | "Unimplemented luma bit depth=%d\n", | ||||
sps->bit_depth_luma); | sps->bit_depth_luma); | ||||
ret = AVERROR_PATCHWELCOME; | ret = AVERROR_PATCHWELCOME; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
pps->cabac = get_bits1(&h->gb); | |||||
pps->pic_order_present = get_bits1(&h->gb); | |||||
pps->slice_group_count = get_ue_golomb(&h->gb) + 1; | |||||
pps->cabac = get_bits1(gb); | |||||
pps->pic_order_present = get_bits1(gb); | |||||
pps->slice_group_count = get_ue_golomb(gb) + 1; | |||||
if (pps->slice_group_count > 1) { | if (pps->slice_group_count > 1) { | ||||
pps->mb_slice_group_map_type = get_ue_golomb(&h->gb); | |||||
av_log(h->avctx, AV_LOG_ERROR, "FMO not supported\n"); | |||||
pps->mb_slice_group_map_type = get_ue_golomb(gb); | |||||
av_log(avctx, AV_LOG_ERROR, "FMO not supported\n"); | |||||
switch (pps->mb_slice_group_map_type) { | switch (pps->mb_slice_group_map_type) { | ||||
case 0: | case 0: | ||||
#if 0 | #if 0 | ||||
@@ -667,40 +774,38 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) | |||||
break; | break; | ||||
} | } | ||||
} | } | ||||
pps->ref_count[0] = get_ue_golomb(&h->gb) + 1; | |||||
pps->ref_count[1] = get_ue_golomb(&h->gb) + 1; | |||||
pps->ref_count[0] = get_ue_golomb(gb) + 1; | |||||
pps->ref_count[1] = get_ue_golomb(gb) + 1; | |||||
if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) { | if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); | |||||
av_log(avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); | |||||
ret = AVERROR_INVALIDDATA; | ret = AVERROR_INVALIDDATA; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
qp_bd_offset = 6 * (sps->bit_depth_luma - 8); | qp_bd_offset = 6 * (sps->bit_depth_luma - 8); | ||||
pps->weighted_pred = get_bits1(&h->gb); | |||||
pps->weighted_bipred_idc = get_bits(&h->gb, 2); | |||||
pps->init_qp = get_se_golomb(&h->gb) + 26 + qp_bd_offset; | |||||
pps->init_qs = get_se_golomb(&h->gb) + 26 + qp_bd_offset; | |||||
pps->chroma_qp_index_offset[0] = get_se_golomb(&h->gb); | |||||
pps->deblocking_filter_parameters_present = get_bits1(&h->gb); | |||||
pps->constrained_intra_pred = get_bits1(&h->gb); | |||||
pps->redundant_pic_cnt_present = get_bits1(&h->gb); | |||||
pps->weighted_pred = get_bits1(gb); | |||||
pps->weighted_bipred_idc = get_bits(gb, 2); | |||||
pps->init_qp = get_se_golomb(gb) + 26 + qp_bd_offset; | |||||
pps->init_qs = get_se_golomb(gb) + 26 + qp_bd_offset; | |||||
pps->chroma_qp_index_offset[0] = get_se_golomb(gb); | |||||
pps->deblocking_filter_parameters_present = get_bits1(gb); | |||||
pps->constrained_intra_pred = get_bits1(gb); | |||||
pps->redundant_pic_cnt_present = get_bits1(gb); | |||||
pps->transform_8x8_mode = 0; | pps->transform_8x8_mode = 0; | ||||
// contents of sps/pps can change even if id doesn't, so reinit | |||||
h->dequant_coeff_pps = -1; | |||||
memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, | |||||
memcpy(pps->scaling_matrix4, sps->scaling_matrix4, | |||||
sizeof(pps->scaling_matrix4)); | sizeof(pps->scaling_matrix4)); | ||||
memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, | |||||
memcpy(pps->scaling_matrix8, sps->scaling_matrix8, | |||||
sizeof(pps->scaling_matrix8)); | sizeof(pps->scaling_matrix8)); | ||||
bits_left = bit_length - get_bits_count(&h->gb); | |||||
if (bits_left > 0 && more_rbsp_data_in_pps(h, pps)) { | |||||
pps->transform_8x8_mode = get_bits1(&h->gb); | |||||
decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, | |||||
bits_left = bit_length - get_bits_count(gb); | |||||
if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) { | |||||
pps->transform_8x8_mode = get_bits1(gb); | |||||
decode_scaling_matrices(gb, sps, pps, 0, | |||||
pps->scaling_matrix4, pps->scaling_matrix8); | pps->scaling_matrix4, pps->scaling_matrix8); | ||||
// second_chroma_qp_index_offset | // second_chroma_qp_index_offset | ||||
pps->chroma_qp_index_offset[1] = get_se_golomb(&h->gb); | |||||
pps->chroma_qp_index_offset[1] = get_se_golomb(gb); | |||||
} else { | } else { | ||||
pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; | pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0]; | ||||
} | } | ||||
@@ -709,11 +814,14 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) | |||||
sps->bit_depth_luma); | sps->bit_depth_luma); | ||||
build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], | build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], | ||||
sps->bit_depth_luma); | sps->bit_depth_luma); | ||||
init_dequant_tables(pps, sps); | |||||
if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) | if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) | ||||
pps->chroma_qp_diff = 1; | pps->chroma_qp_diff = 1; | ||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { | |||||
av_log(h->avctx, AV_LOG_DEBUG, | |||||
if (avctx->debug & FF_DEBUG_PICT_INFO) { | |||||
av_log(avctx, AV_LOG_DEBUG, | |||||
"pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\n", | "pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\n", | ||||
pps_id, pps->sps_id, | pps_id, pps->sps_id, | ||||
pps->cabac ? "CABAC" : "CAVLC", | pps->cabac ? "CABAC" : "CAVLC", | ||||
@@ -727,11 +835,12 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length) | |||||
pps->transform_8x8_mode ? "8x8DCT" : ""); | pps->transform_8x8_mode ? "8x8DCT" : ""); | ||||
} | } | ||||
av_free(h->pps_buffers[pps_id]); | |||||
h->pps_buffers[pps_id] = pps; | |||||
remove_pps(ps, pps_id); | |||||
ps->pps_list[pps_id] = pps_buf; | |||||
return 0; | return 0; | ||||
fail: | fail: | ||||
av_free(pps); | |||||
av_buffer_unref(&pps_buf); | |||||
return ret; | return ret; | ||||
} | } |
@@ -592,7 +592,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice) | |||||
int mmco_index = 0, i = 0; | int mmco_index = 0, i = 0; | ||||
if (h->short_ref_count && | if (h->short_ref_count && | ||||
h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count && | |||||
h->long_ref_count + h->short_ref_count >= h->ps.sps->ref_frame_count && | |||||
!(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) { | !(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) { | ||||
mmco[0].opcode = MMCO_SHORT2UNUSED; | mmco[0].opcode = MMCO_SHORT2UNUSED; | ||||
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; | mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; | ||||
@@ -768,7 +768,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) | |||||
} | } | ||||
} | } | ||||
if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)) { | |||||
if (h->long_ref_count + h->short_ref_count > FFMAX(h->ps.sps->ref_frame_count, 1)) { | |||||
/* We have too many reference frames, probably due to corrupted | /* We have too many reference frames, probably due to corrupted | ||||
* stream. Need to discard one frame. Prevents overrun of the | * stream. Need to discard one frame. Prevents overrun of the | ||||
@@ -777,7 +777,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"number of reference frames (%d+%d) exceeds max (%d; probably " | "number of reference frames (%d+%d) exceeds max (%d; probably " | ||||
"corrupt input), discarding one\n", | "corrupt input), discarding one\n", | ||||
h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count); | |||||
h->long_ref_count, h->short_ref_count, h->ps.sps->ref_frame_count); | |||||
err = AVERROR_INVALIDDATA; | err = AVERROR_INVALIDDATA; | ||||
if (h->long_ref_count && !h->short_ref_count) { | if (h->long_ref_count && !h->short_ref_count) { | ||||
@@ -796,8 +796,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) | |||||
for (i = 0; i<h->short_ref_count; i++) { | for (i = 0; i<h->short_ref_count; i++) { | ||||
pic = h->short_ref[i]; | pic = h->short_ref[i]; | ||||
if (pic->invalid_gap) { | if (pic->invalid_gap) { | ||||
int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->sps.log2_max_frame_num); | |||||
if (d > h->sps.ref_frame_count) | |||||
int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->ps.sps->log2_max_frame_num); | |||||
if (d > h->ps.sps->ref_frame_count) | |||||
remove_short(h, pic->frame_num, 0); | remove_short(h, pic->frame_num, 0); | ||||
} | } | ||||
} | } | ||||
@@ -805,10 +805,11 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count) | |||||
print_short_term(h); | print_short_term(h); | ||||
print_long_term(h); | print_long_term(h); | ||||
for (i = 0; i < FF_ARRAY_ELEMS(h->pps_buffers); i++) { | |||||
if (h->pps_buffers[i]) { | |||||
pps_ref_count[0] = FFMAX(pps_ref_count[0], h->pps_buffers[i]->ref_count[0]); | |||||
pps_ref_count[1] = FFMAX(pps_ref_count[1], h->pps_buffers[i]->ref_count[1]); | |||||
for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) { | |||||
if (h->ps.pps_list[i]) { | |||||
const PPS *pps = (const PPS *)h->ps.pps_list[i]->data; | |||||
pps_ref_count[0] = FFMAX(pps_ref_count[0], pps->ref_count[0]); | |||||
pps_ref_count[1] = FFMAX(pps_ref_count[1], pps->ref_count[1]); | |||||
} | } | ||||
} | } | ||||
@@ -50,12 +50,17 @@ void ff_h264_reset_sei(H264Context *h) | |||||
static int decode_picture_timing(H264Context *h) | static int decode_picture_timing(H264Context *h) | ||||
{ | { | ||||
SPS *sps = &h->sps; | |||||
const SPS *sps = h->ps.sps; | |||||
int i; | int i; | ||||
for (i = 0; i<MAX_SPS_COUNT; i++) | for (i = 0; i<MAX_SPS_COUNT; i++) | ||||
if (!sps->log2_max_frame_num && h->sps_buffers[i]) | |||||
sps = h->sps_buffers[i]; | |||||
if ((!sps || !sps->log2_max_frame_num) && h->ps.sps_list[i]) | |||||
sps = (const SPS *)h->ps.sps_list[i]->data; | |||||
if (!sps) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "SPS unavailable in decode_picture_timing\n"); | |||||
return 0; | |||||
} | |||||
if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) { | if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) { | ||||
h->sei_cpb_removal_delay = get_bits_long(&h->gb, | h->sei_cpb_removal_delay = get_bits_long(&h->gb, | ||||
@@ -275,12 +280,12 @@ static int decode_buffering_period(H264Context *h) | |||||
SPS *sps; | SPS *sps; | ||||
sps_id = get_ue_golomb_31(&h->gb); | sps_id = get_ue_golomb_31(&h->gb); | ||||
if (sps_id > 31 || !h->sps_buffers[sps_id]) { | |||||
if (sps_id > 31 || !h->ps.sps_list[sps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"non-existing SPS %d referenced in buffering period\n", sps_id); | "non-existing SPS %d referenced in buffering period\n", sps_id); | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
sps = h->sps_buffers[sps_id]; | |||||
sps = (SPS*)h->ps.sps_list[sps_id]->data; | |||||
// NOTE: This is really so duplicated in the standard... See H.264, D.1.1 | // NOTE: This is really so duplicated in the standard... See H.264, D.1.1 | ||||
if (sps->nal_hrd_parameters_present_flag) { | if (sps->nal_hrd_parameters_present_flag) { | ||||
@@ -192,9 +192,9 @@ static int alloc_picture(H264Context *h, H264Picture *pic) | |||||
if (ret < 0) | if (ret < 0) | ||||
goto fail; | goto fail; | ||||
pic->crop = h->sps.crop; | |||||
pic->crop_top = h->sps.crop_top; | |||||
pic->crop_left= h->sps.crop_left; | |||||
pic->crop = h->ps.sps->crop; | |||||
pic->crop_top = h->ps.sps->crop_top; | |||||
pic->crop_left= h->ps.sps->crop_left; | |||||
if (h->avctx->hwaccel) { | if (h->avctx->hwaccel) { | ||||
const AVHWAccel *hwaccel = h->avctx->hwaccel; | const AVHWAccel *hwaccel = h->avctx->hwaccel; | ||||
@@ -271,78 +271,6 @@ static int find_unused_picture(H264Context *h) | |||||
} | } | ||||
static void init_dequant8_coeff_table(H264Context *h) | |||||
{ | |||||
int i, j, q, x; | |||||
const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8); | |||||
for (i = 0; i < 6; i++) { | |||||
h->dequant8_coeff[i] = h->dequant8_buffer[i]; | |||||
for (j = 0; j < i; j++) | |||||
if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i], | |||||
64 * sizeof(uint8_t))) { | |||||
h->dequant8_coeff[i] = h->dequant8_buffer[j]; | |||||
break; | |||||
} | |||||
if (j < i) | |||||
continue; | |||||
for (q = 0; q < max_qp + 1; q++) { | |||||
int shift = ff_h264_quant_div6[q]; | |||||
int idx = ff_h264_quant_rem6[q]; | |||||
for (x = 0; x < 64; x++) | |||||
h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] = | |||||
((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] * | |||||
h->pps.scaling_matrix8[i][x]) << shift; | |||||
} | |||||
} | |||||
} | |||||
static void init_dequant4_coeff_table(H264Context *h) | |||||
{ | |||||
int i, j, q, x; | |||||
const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8); | |||||
for (i = 0; i < 6; i++) { | |||||
h->dequant4_coeff[i] = h->dequant4_buffer[i]; | |||||
for (j = 0; j < i; j++) | |||||
if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], | |||||
16 * sizeof(uint8_t))) { | |||||
h->dequant4_coeff[i] = h->dequant4_buffer[j]; | |||||
break; | |||||
} | |||||
if (j < i) | |||||
continue; | |||||
for (q = 0; q < max_qp + 1; q++) { | |||||
int shift = ff_h264_quant_div6[q] + 2; | |||||
int idx = ff_h264_quant_rem6[q]; | |||||
for (x = 0; x < 16; x++) | |||||
h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] = | |||||
((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * | |||||
h->pps.scaling_matrix4[i][x]) << shift; | |||||
} | |||||
} | |||||
} | |||||
void ff_h264_init_dequant_tables(H264Context *h) | |||||
{ | |||||
int i, x; | |||||
init_dequant4_coeff_table(h); | |||||
memset(h->dequant8_coeff, 0, sizeof(h->dequant8_coeff)); | |||||
if (h->pps.transform_8x8_mode) | |||||
init_dequant8_coeff_table(h); | |||||
if (h->sps.transform_bypass) { | |||||
for (i = 0; i < 6; i++) | |||||
for (x = 0; x < 16; x++) | |||||
h->dequant4_coeff[i][0][x] = 1 << 6; | |||||
if (h->pps.transform_8x8_mode) | |||||
for (i = 0; i < 6; i++) | |||||
for (x = 0; x < 64; x++) | |||||
h->dequant8_coeff[i][0][x] = 1 << 6; | |||||
} | |||||
} | |||||
#define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size)))) | #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size)))) | ||||
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \ | #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ | ||||
@@ -364,26 +292,6 @@ static void copy_picture_range(H264Picture **to, H264Picture **from, int count, | |||||
} | } | ||||
} | } | ||||
static int copy_parameter_set(void **to, void **from, int count, int size) | |||||
{ | |||||
int i; | |||||
for (i = 0; i < count; i++) { | |||||
if (to[i] && !from[i]) { | |||||
av_freep(&to[i]); | |||||
} else if (from[i] && !to[i]) { | |||||
to[i] = av_malloc(size); | |||||
if (!to[i]) | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
if (from[i]) | |||||
memcpy(to[i], from[i], size); | |||||
} | |||||
return 0; | |||||
} | |||||
#define copy_fields(to, from, start_field, end_field) \ | #define copy_fields(to, from, start_field, end_field) \ | ||||
memcpy(&(to)->start_field, &(from)->start_field, \ | memcpy(&(to)->start_field, &(from)->start_field, \ | ||||
(char *)&(to)->end_field - (char *)&(to)->start_field) | (char *)&(to)->end_field - (char *)&(to)->start_field) | ||||
@@ -401,15 +309,19 @@ int ff_h264_update_thread_context(AVCodecContext *dst, | |||||
if (dst == src) | if (dst == src) | ||||
return 0; | return 0; | ||||
// We can't fail if SPS isn't set at it breaks current skip_frame code | |||||
//if (!h1->ps.sps) | |||||
// return AVERROR_INVALIDDATA; | |||||
if (inited && | if (inited && | ||||
(h->width != h1->width || | (h->width != h1->width || | ||||
h->height != h1->height || | h->height != h1->height || | ||||
h->mb_width != h1->mb_width || | h->mb_width != h1->mb_width || | ||||
h->mb_height != h1->mb_height || | h->mb_height != h1->mb_height || | ||||
h->sps.bit_depth_luma != h1->sps.bit_depth_luma || | |||||
h->sps.chroma_format_idc != h1->sps.chroma_format_idc || | |||||
h->sps.colorspace != h1->sps.colorspace)) { | |||||
!h->ps.sps || | |||||
h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma || | |||||
h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc || | |||||
h->ps.sps->colorspace != h1->ps.sps->colorspace)) { | |||||
need_reinit = 1; | need_reinit = 1; | ||||
} | } | ||||
@@ -417,16 +329,39 @@ int ff_h264_update_thread_context(AVCodecContext *dst, | |||||
memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset)); | memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset)); | ||||
// SPS/PPS | // SPS/PPS | ||||
if ((ret = copy_parameter_set((void **)h->sps_buffers, | |||||
(void **)h1->sps_buffers, | |||||
MAX_SPS_COUNT, sizeof(SPS))) < 0) | |||||
return ret; | |||||
h->sps = h1->sps; | |||||
if ((ret = copy_parameter_set((void **)h->pps_buffers, | |||||
(void **)h1->pps_buffers, | |||||
MAX_PPS_COUNT, sizeof(PPS))) < 0) | |||||
return ret; | |||||
h->pps = h1->pps; | |||||
for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) { | |||||
av_buffer_unref(&h->ps.sps_list[i]); | |||||
if (h1->ps.sps_list[i]) { | |||||
h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]); | |||||
if (!h->ps.sps_list[i]) | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
} | |||||
for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) { | |||||
av_buffer_unref(&h->ps.pps_list[i]); | |||||
if (h1->ps.pps_list[i]) { | |||||
h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]); | |||||
if (!h->ps.pps_list[i]) | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
} | |||||
av_buffer_unref(&h->ps.pps_ref); | |||||
av_buffer_unref(&h->ps.sps_ref); | |||||
h->ps.pps = NULL; | |||||
h->ps.sps = NULL; | |||||
if (h1->ps.pps_ref) { | |||||
h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref); | |||||
if (!h->ps.pps_ref) | |||||
return AVERROR(ENOMEM); | |||||
h->ps.pps = h->ps.pps_ref->data; | |||||
} | |||||
if (h1->ps.sps_ref) { | |||||
h->ps.sps_ref = av_buffer_ref(h1->ps.sps_ref); | |||||
if (!h->ps.sps_ref) | |||||
return AVERROR(ENOMEM); | |||||
h->ps.sps = h->ps.sps_ref->data; | |||||
} | |||||
if (need_reinit || !inited) { | if (need_reinit || !inited) { | ||||
h->width = h1->width; | h->width = h1->width; | ||||
@@ -485,20 +420,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst, | |||||
h->nal_length_size = h1->nal_length_size; | h->nal_length_size = h1->nal_length_size; | ||||
h->x264_build = h1->x264_build; | h->x264_build = h1->x264_build; | ||||
// Dequantization matrices | |||||
// FIXME these are big - can they be only copied when PPS changes? | |||||
copy_fields(h, h1, dequant4_buffer, dequant4_coeff); | |||||
for (i = 0; i < 6; i++) | |||||
h->dequant4_coeff[i] = h->dequant4_buffer[0] + | |||||
(h1->dequant4_coeff[i] - h1->dequant4_buffer[0]); | |||||
for (i = 0; i < 6; i++) | |||||
h->dequant8_coeff[i] = h->dequant8_buffer[0] + | |||||
(h1->dequant8_coeff[i] - h1->dequant8_buffer[0]); | |||||
h->dequant_coeff_pps = h1->dequant_coeff_pps; | |||||
// POC timing | // POC timing | ||||
copy_fields(h, h1, poc_lsb, current_slice); | copy_fields(h, h1, poc_lsb, current_slice); | ||||
@@ -531,9 +452,9 @@ static int h264_frame_start(H264Context *h) | |||||
int i, ret; | int i, ret; | ||||
const int pixel_shift = h->pixel_shift; | const int pixel_shift = h->pixel_shift; | ||||
int c[4] = { | int c[4] = { | ||||
1<<(h->sps.bit_depth_luma-1), | |||||
1<<(h->sps.bit_depth_chroma-1), | |||||
1<<(h->sps.bit_depth_chroma-1), | |||||
1<<(h->ps.sps->bit_depth_luma-1), | |||||
1<<(h->ps.sps->bit_depth_chroma-1), | |||||
1<<(h->ps.sps->bit_depth_chroma-1), | |||||
-1 | -1 | ||||
}; | }; | ||||
@@ -806,7 +727,7 @@ static void init_scan_tables(H264Context *h) | |||||
h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]); | h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]); | ||||
#undef TRANSPOSE | #undef TRANSPOSE | ||||
} | } | ||||
if (h->sps.transform_bypass) { // FIXME same ugly | |||||
if (h->ps.sps->transform_bypass) { // FIXME same ugly | |||||
memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 )); | memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 )); | ||||
memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 )); | memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 )); | ||||
memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0)); | memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0)); | ||||
@@ -835,7 +756,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) | |||||
const enum AVPixelFormat *choices = pix_fmts; | const enum AVPixelFormat *choices = pix_fmts; | ||||
int i; | int i; | ||||
switch (h->sps.bit_depth_luma) { | |||||
switch (h->ps.sps->bit_depth_luma) { | |||||
case 9: | case 9: | ||||
if (CHROMA444(h)) { | if (CHROMA444(h)) { | ||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) { | if (h->avctx->colorspace == AVCOL_SPC_RGB) { | ||||
@@ -923,7 +844,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) | |||||
break; | break; | ||||
default: | default: | ||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"Unsupported bit depth %d\n", h->sps.bit_depth_luma); | |||||
"Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
@@ -938,10 +859,11 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) | |||||
/* export coded and cropped frame dimensions to AVCodecContext */ | /* export coded and cropped frame dimensions to AVCodecContext */ | ||||
static int init_dimensions(H264Context *h) | static int init_dimensions(H264Context *h) | ||||
{ | { | ||||
int width = h->width - (h->sps.crop_right + h->sps.crop_left); | |||||
int height = h->height - (h->sps.crop_top + h->sps.crop_bottom); | |||||
av_assert0(h->sps.crop_right + h->sps.crop_left < (unsigned)h->width); | |||||
av_assert0(h->sps.crop_top + h->sps.crop_bottom < (unsigned)h->height); | |||||
SPS *sps = h->ps.sps; | |||||
int width = h->width - (sps->crop_right + sps->crop_left); | |||||
int height = h->height - (sps->crop_top + sps->crop_bottom); | |||||
av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width); | |||||
av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height); | |||||
/* handle container cropping */ | /* handle container cropping */ | ||||
if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) && | if (FFALIGN(h->avctx->width, 16) == FFALIGN(width, 16) && | ||||
@@ -960,11 +882,11 @@ static int init_dimensions(H264Context *h) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n"); | av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n"); | ||||
h->sps.crop_bottom = | |||||
h->sps.crop_top = | |||||
h->sps.crop_right = | |||||
h->sps.crop_left = | |||||
h->sps.crop = 0; | |||||
sps->crop_bottom = | |||||
sps->crop_top = | |||||
sps->crop_right = | |||||
sps->crop_left = | |||||
sps->crop = 0; | |||||
width = h->width; | width = h->width; | ||||
height = h->height; | height = h->height; | ||||
@@ -980,21 +902,22 @@ static int init_dimensions(H264Context *h) | |||||
static int h264_slice_header_init(H264Context *h) | static int h264_slice_header_init(H264Context *h) | ||||
{ | { | ||||
const SPS *sps = h->ps.sps; | |||||
int nb_slices = (HAVE_THREADS && | int nb_slices = (HAVE_THREADS && | ||||
h->avctx->active_thread_type & FF_THREAD_SLICE) ? | h->avctx->active_thread_type & FF_THREAD_SLICE) ? | ||||
h->avctx->thread_count : 1; | h->avctx->thread_count : 1; | ||||
int i, ret; | int i, ret; | ||||
ff_set_sar(h->avctx, h->sps.sar); | |||||
ff_set_sar(h->avctx, sps->sar); | |||||
av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, | av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, | ||||
&h->chroma_x_shift, &h->chroma_y_shift); | &h->chroma_x_shift, &h->chroma_y_shift); | ||||
if (h->sps.timing_info_present_flag) { | |||||
int64_t den = h->sps.time_scale; | |||||
if (sps->timing_info_present_flag) { | |||||
int64_t den = sps->time_scale; | |||||
if (h->x264_build < 44U) | if (h->x264_build < 44U) | ||||
den *= 2; | den *= 2; | ||||
av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num, | av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num, | ||||
h->sps.num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30); | |||||
sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30); | |||||
} | } | ||||
ff_h264_free_tables(h); | ff_h264_free_tables(h); | ||||
@@ -1012,7 +935,7 @@ static int h264_slice_header_init(H264Context *h) | |||||
#if FF_API_CAP_VDPAU | #if FF_API_CAP_VDPAU | ||||
if (h->avctx->codec && | if (h->avctx->codec && | ||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU && | h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU && | ||||
(h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { | |||||
(sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"VDPAU decoding does not support video colorspace.\n"); | "VDPAU decoding does not support video colorspace.\n"); | ||||
ret = AVERROR_INVALIDDATA; | ret = AVERROR_INVALIDDATA; | ||||
@@ -1020,29 +943,29 @@ static int h264_slice_header_init(H264Context *h) | |||||
} | } | ||||
#endif | #endif | ||||
if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 14 || | |||||
h->sps.bit_depth_luma == 11 || h->sps.bit_depth_luma == 13 | |||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 || | |||||
sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13 | |||||
) { | ) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n", | av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n", | ||||
h->sps.bit_depth_luma); | |||||
sps->bit_depth_luma); | |||||
ret = AVERROR_INVALIDDATA; | ret = AVERROR_INVALIDDATA; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
h->cur_bit_depth_luma = | h->cur_bit_depth_luma = | ||||
h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; | |||||
h->cur_chroma_format_idc = h->sps.chroma_format_idc; | |||||
h->pixel_shift = h->sps.bit_depth_luma > 8; | |||||
h->chroma_format_idc = h->sps.chroma_format_idc; | |||||
h->bit_depth_luma = h->sps.bit_depth_luma; | |||||
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, | |||||
h->sps.chroma_format_idc); | |||||
ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); | |||||
ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma); | |||||
ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma, | |||||
h->sps.chroma_format_idc); | |||||
ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma); | |||||
h->avctx->bits_per_raw_sample = sps->bit_depth_luma; | |||||
h->cur_chroma_format_idc = sps->chroma_format_idc; | |||||
h->pixel_shift = sps->bit_depth_luma > 8; | |||||
h->chroma_format_idc = sps->chroma_format_idc; | |||||
h->bit_depth_luma = sps->bit_depth_luma; | |||||
ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma, | |||||
sps->chroma_format_idc); | |||||
ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma); | |||||
ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma); | |||||
ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma, | |||||
sps->chroma_format_idc); | |||||
ff_videodsp_init(&h->vdsp, sps->bit_depth_luma); | |||||
if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { | if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { | ||||
int max_slices; | int max_slices; | ||||
@@ -1109,6 +1032,8 @@ static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a) | |||||
*/ | */ | ||||
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | ||||
{ | { | ||||
const SPS *sps; | |||||
const PPS *pps; | |||||
unsigned int first_mb_in_slice; | unsigned int first_mb_in_slice; | ||||
unsigned int pps_id; | unsigned int pps_id; | ||||
int ret; | int ret; | ||||
@@ -1120,7 +1045,6 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
int first_slice = sl == h->slice_ctx && !h->current_slice; | int first_slice = sl == h->slice_ctx && !h->current_slice; | ||||
int frame_num, droppable, picture_structure; | int frame_num, droppable, picture_structure; | ||||
int mb_aff_frame, last_mb_aff_frame; | int mb_aff_frame, last_mb_aff_frame; | ||||
PPS *pps; | |||||
if (first_slice) | if (first_slice) | ||||
av_assert0(!h->setup_finished); | av_assert0(!h->setup_finished); | ||||
@@ -1218,7 +1142,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); | av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
if (!h->pps_buffers[pps_id]) { | |||||
if (!h->ps.pps_list[pps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"non-existing PPS %u referenced\n", | "non-existing PPS %u referenced\n", | ||||
pps_id); | pps_id); | ||||
@@ -1231,29 +1155,33 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
pps = h->pps_buffers[pps_id]; | |||||
pps = (const PPS*)h->ps.pps_list[pps_id]->data; | |||||
if (!h->sps_buffers[pps->sps_id]) { | |||||
if (!h->ps.sps_list[pps->sps_id]) { | |||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
"non-existing SPS %u referenced\n", | "non-existing SPS %u referenced\n", | ||||
h->pps.sps_id); | |||||
pps->sps_id); | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
if (first_slice) { | if (first_slice) { | ||||
h->pps = *h->pps_buffers[pps_id]; | |||||
av_buffer_unref(&h->ps.pps_ref); | |||||
h->ps.pps = NULL; | |||||
h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[pps_id]); | |||||
if (!h->ps.pps_ref) | |||||
return AVERROR(ENOMEM); | |||||
h->ps.pps = (const PPS*)h->ps.pps_ref->data; | |||||
} else { | } else { | ||||
if (h->pps.sps_id != pps->sps_id || | |||||
h->pps.transform_8x8_mode != pps->transform_8x8_mode || | |||||
(h->setup_finished && h->dequant_coeff_pps != pps_id)) { | |||||
if (h->ps.pps->sps_id != pps->sps_id || | |||||
h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*|| | |||||
(h->setup_finished && h->ps.pps != pps)*/) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n"); | av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n"); | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
} | } | ||||
if (pps->sps_id != h->sps.sps_id || | |||||
pps->sps_id != h->current_sps_id || | |||||
h->sps_buffers[pps->sps_id]->new) { | |||||
if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data || | |||||
pps->sps_id != h->current_sps_id) { | |||||
if (!first_slice) { | if (!first_slice) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
@@ -1261,22 +1189,27 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
h->sps = *h->sps_buffers[h->pps.sps_id]; | |||||
if (h->mb_width != h->sps.mb_width || | |||||
h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) || | |||||
h->cur_bit_depth_luma != h->sps.bit_depth_luma || | |||||
h->cur_chroma_format_idc != h->sps.chroma_format_idc | |||||
av_buffer_unref(&h->ps.sps_ref); | |||||
h->ps.sps = NULL; | |||||
h->ps.sps_ref = av_buffer_ref(h->ps.sps_list[h->ps.pps->sps_id]); | |||||
if (!h->ps.sps_ref) | |||||
return AVERROR(ENOMEM); | |||||
h->ps.sps = (const SPS*)h->ps.sps_ref->data; | |||||
if (h->mb_width != h->ps.sps->mb_width || | |||||
h->mb_height != h->ps.sps->mb_height * (2 - h->ps.sps->frame_mbs_only_flag) || | |||||
h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma || | |||||
h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc | |||||
) | ) | ||||
needs_reinit = 1; | needs_reinit = 1; | ||||
if (h->bit_depth_luma != h->sps.bit_depth_luma || | |||||
h->chroma_format_idc != h->sps.chroma_format_idc) | |||||
if (h->bit_depth_luma != h->ps.sps->bit_depth_luma || | |||||
h->chroma_format_idc != h->ps.sps->chroma_format_idc) | |||||
needs_reinit = 1; | needs_reinit = 1; | ||||
if (h->flags & AV_CODEC_FLAG_LOW_DELAY || | if (h->flags & AV_CODEC_FLAG_LOW_DELAY || | ||||
(h->sps.bitstream_restriction_flag && | |||||
!h->sps.num_reorder_frames)) { | |||||
(h->ps.sps->bitstream_restriction_flag && | |||||
!h->ps.sps->num_reorder_frames)) { | |||||
if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) | if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) | ||||
av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " | av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " | ||||
"Reenabling low delay requires a codec flush.\n"); | "Reenabling low delay requires a codec flush.\n"); | ||||
@@ -1289,34 +1222,37 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
pps = h->ps.pps; | |||||
sps = h->ps.sps; | |||||
must_reinit = (h->context_initialized && | must_reinit = (h->context_initialized && | ||||
( 16*h->sps.mb_width != h->avctx->coded_width | |||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height | |||||
|| h->cur_bit_depth_luma != h->sps.bit_depth_luma | |||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc | |||||
|| h->mb_width != h->sps.mb_width | |||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) | |||||
( 16*sps->mb_width != h->avctx->coded_width | |||||
|| 16*sps->mb_height * (2 - sps->frame_mbs_only_flag) != h->avctx->coded_height | |||||
|| h->cur_bit_depth_luma != sps->bit_depth_luma | |||||
|| h->cur_chroma_format_idc != sps->chroma_format_idc | |||||
|| h->mb_width != sps->mb_width | |||||
|| h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag) | |||||
)); | )); | ||||
if (h->avctx->pix_fmt == AV_PIX_FMT_NONE | if (h->avctx->pix_fmt == AV_PIX_FMT_NONE | ||||
|| (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0)))) | || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0)))) | ||||
must_reinit = 1; | must_reinit = 1; | ||||
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)) | |||||
if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio)) | |||||
must_reinit = 1; | must_reinit = 1; | ||||
if (!h->setup_finished) { | if (!h->setup_finished) { | ||||
h->avctx->profile = ff_h264_get_profile(&h->sps); | |||||
h->avctx->level = h->sps.level_idc; | |||||
h->avctx->refs = h->sps.ref_frame_count; | |||||
h->avctx->profile = ff_h264_get_profile(sps); | |||||
h->avctx->level = sps->level_idc; | |||||
h->avctx->refs = sps->ref_frame_count; | |||||
h->mb_width = h->sps.mb_width; | |||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); | |||||
h->mb_width = sps->mb_width; | |||||
h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag); | |||||
h->mb_num = h->mb_width * h->mb_height; | h->mb_num = h->mb_width * h->mb_height; | ||||
h->mb_stride = h->mb_width + 1; | h->mb_stride = h->mb_width + 1; | ||||
h->b_stride = h->mb_width * 4; | h->b_stride = h->mb_width * 4; | ||||
h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p | |||||
h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p | |||||
h->width = 16 * h->mb_width; | h->width = 16 * h->mb_width; | ||||
h->height = 16 * h->mb_height; | h->height = 16 * h->mb_height; | ||||
@@ -1325,15 +1261,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
if (h->sps.video_signal_type_present_flag) { | |||||
h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG | |||||
if (sps->video_signal_type_present_flag) { | |||||
h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG | |||||
: AVCOL_RANGE_MPEG; | : AVCOL_RANGE_MPEG; | ||||
if (h->sps.colour_description_present_flag) { | |||||
if (h->avctx->colorspace != h->sps.colorspace) | |||||
if (sps->colour_description_present_flag) { | |||||
if (h->avctx->colorspace != sps->colorspace) | |||||
needs_reinit = 1; | needs_reinit = 1; | ||||
h->avctx->color_primaries = h->sps.color_primaries; | |||||
h->avctx->color_trc = h->sps.color_trc; | |||||
h->avctx->colorspace = h->sps.colorspace; | |||||
h->avctx->color_primaries = sps->color_primaries; | |||||
h->avctx->color_trc = sps->color_trc; | |||||
h->avctx->colorspace = sps->colorspace; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -1386,12 +1322,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
} | } | ||||
if (!h->current_slice && h->dequant_coeff_pps != pps_id) { | |||||
h->dequant_coeff_pps = pps_id; | |||||
ff_h264_init_dequant_tables(h); | |||||
} | |||||
frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num); | |||||
frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); | |||||
if (!first_slice) { | if (!first_slice) { | ||||
if (h->frame_num != frame_num) { | if (h->frame_num != frame_num) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n", | av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n", | ||||
@@ -1410,10 +1341,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
last_pic_droppable = h->droppable; | last_pic_droppable = h->droppable; | ||||
droppable = h->nal_ref_idc == 0; | droppable = h->nal_ref_idc == 0; | ||||
if (h->sps.frame_mbs_only_flag) { | |||||
if (sps->frame_mbs_only_flag) { | |||||
picture_structure = PICT_FRAME; | picture_structure = PICT_FRAME; | ||||
} else { | } else { | ||||
if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { | |||||
if (!h->ps.sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); | av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); | ||||
return -1; | return -1; | ||||
} | } | ||||
@@ -1424,7 +1355,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
picture_structure = PICT_TOP_FIELD + bottom_field_flag; | picture_structure = PICT_TOP_FIELD + bottom_field_flag; | ||||
} else { | } else { | ||||
picture_structure = PICT_FRAME; | picture_structure = PICT_FRAME; | ||||
mb_aff_frame = h->sps.mb_aff; | |||||
mb_aff_frame = sps->mb_aff; | |||||
} | } | ||||
} | } | ||||
@@ -1456,13 +1387,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
* frames just to throw them away */ | * frames just to throw them away */ | ||||
if (h->frame_num != h->prev_frame_num) { | if (h->frame_num != h->prev_frame_num) { | ||||
int unwrap_prev_frame_num = h->prev_frame_num; | int unwrap_prev_frame_num = h->prev_frame_num; | ||||
int max_frame_num = 1 << h->sps.log2_max_frame_num; | |||||
int max_frame_num = 1 << sps->log2_max_frame_num; | |||||
if (unwrap_prev_frame_num > h->frame_num) | if (unwrap_prev_frame_num > h->frame_num) | ||||
unwrap_prev_frame_num -= max_frame_num; | unwrap_prev_frame_num -= max_frame_num; | ||||
if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { | |||||
unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; | |||||
if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) { | |||||
unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1; | |||||
if (unwrap_prev_frame_num < 0) | if (unwrap_prev_frame_num < 0) | ||||
unwrap_prev_frame_num += max_frame_num; | unwrap_prev_frame_num += max_frame_num; | ||||
@@ -1528,11 +1459,11 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
while (h->frame_num != h->prev_frame_num && !h->first_field && | while (h->frame_num != h->prev_frame_num && !h->first_field && | ||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { | |||||
h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) { | |||||
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; | H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; | ||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", | av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", | ||||
h->frame_num, h->prev_frame_num); | h->frame_num, h->prev_frame_num); | ||||
if (!h->sps.gaps_in_frame_num_allowed_flag) | |||||
if (!sps->gaps_in_frame_num_allowed_flag) | |||||
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) | for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) | ||||
h->last_pocs[i] = INT_MIN; | h->last_pocs[i] = INT_MIN; | ||||
ret = h264_frame_start(h); | ret = h264_frame_start(h); | ||||
@@ -1542,9 +1473,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
h->prev_frame_num++; | h->prev_frame_num++; | ||||
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; | |||||
h->prev_frame_num %= 1 << sps->log2_max_frame_num; | |||||
h->cur_pic_ptr->frame_num = h->prev_frame_num; | h->cur_pic_ptr->frame_num = h->prev_frame_num; | ||||
h->cur_pic_ptr->invalid_gap = !h->sps.gaps_in_frame_num_allowed_flag; | |||||
h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag; | |||||
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); | ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); | ||||
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); | ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); | ||||
ret = ff_generate_sliding_window_mmcos(h, 1); | ret = ff_generate_sliding_window_mmcos(h, 1); | ||||
@@ -1647,35 +1578,35 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
if (h->picture_structure == PICT_FRAME) { | if (h->picture_structure == PICT_FRAME) { | ||||
h->curr_pic_num = h->frame_num; | h->curr_pic_num = h->frame_num; | ||||
h->max_pic_num = 1 << h->sps.log2_max_frame_num; | |||||
h->max_pic_num = 1 << sps->log2_max_frame_num; | |||||
} else { | } else { | ||||
h->curr_pic_num = 2 * h->frame_num + 1; | h->curr_pic_num = 2 * h->frame_num + 1; | ||||
h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); | |||||
h->max_pic_num = 1 << (sps->log2_max_frame_num + 1); | |||||
} | } | ||||
if (h->nal_unit_type == NAL_IDR_SLICE) | if (h->nal_unit_type == NAL_IDR_SLICE) | ||||
get_ue_golomb_long(&sl->gb); /* idr_pic_id */ | get_ue_golomb_long(&sl->gb); /* idr_pic_id */ | ||||
if (h->sps.poc_type == 0) { | |||||
int poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb); | |||||
if (sps->poc_type == 0) { | |||||
int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); | |||||
if (!h->setup_finished) | if (!h->setup_finished) | ||||
h->poc_lsb = poc_lsb; | h->poc_lsb = poc_lsb; | ||||
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) { | |||||
if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { | |||||
int delta_poc_bottom = get_se_golomb(&sl->gb); | int delta_poc_bottom = get_se_golomb(&sl->gb); | ||||
if (!h->setup_finished) | if (!h->setup_finished) | ||||
h->delta_poc_bottom = delta_poc_bottom; | h->delta_poc_bottom = delta_poc_bottom; | ||||
} | } | ||||
} | } | ||||
if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { | |||||
if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { | |||||
int delta_poc = get_se_golomb(&sl->gb); | int delta_poc = get_se_golomb(&sl->gb); | ||||
if (!h->setup_finished) | if (!h->setup_finished) | ||||
h->delta_poc[0] = delta_poc; | h->delta_poc[0] = delta_poc; | ||||
if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) { | |||||
if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { | |||||
delta_poc = get_se_golomb(&sl->gb); | delta_poc = get_se_golomb(&sl->gb); | ||||
if (!h->setup_finished) | if (!h->setup_finished) | ||||
@@ -1686,14 +1617,14 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
if (!h->setup_finished) | if (!h->setup_finished) | ||||
ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); | ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); | ||||
if (h->pps.redundant_pic_cnt_present) | |||||
if (pps->redundant_pic_cnt_present) | |||||
sl->redundant_pic_count = get_ue_golomb(&sl->gb); | sl->redundant_pic_count = get_ue_golomb(&sl->gb); | ||||
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) | if (sl->slice_type_nos == AV_PICTURE_TYPE_B) | ||||
sl->direct_spatial_mv_pred = get_bits1(&sl->gb); | sl->direct_spatial_mv_pred = get_bits1(&sl->gb); | ||||
ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, | ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, | ||||
&sl->gb, &h->pps, sl->slice_type_nos, | |||||
&sl->gb, pps, sl->slice_type_nos, | |||||
h->picture_structure, h->avctx); | h->picture_structure, h->avctx); | ||||
if (ret < 0) | if (ret < 0) | ||||
return ret; | return ret; | ||||
@@ -1706,12 +1637,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
} | } | ||||
if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || | |||||
(h->pps.weighted_bipred_idc == 1 && | |||||
if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || | |||||
(pps->weighted_bipred_idc == 1 && | |||||
sl->slice_type_nos == AV_PICTURE_TYPE_B)) | sl->slice_type_nos == AV_PICTURE_TYPE_B)) | ||||
ff_h264_pred_weight_table(&sl->gb, &h->sps, sl->ref_count, | |||||
ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, | |||||
sl->slice_type_nos, &sl->pwt); | sl->slice_type_nos, &sl->pwt); | ||||
else if (h->pps.weighted_bipred_idc == 2 && | |||||
else if (pps->weighted_bipred_idc == 2 && | |||||
sl->slice_type_nos == AV_PICTURE_TYPE_B) { | sl->slice_type_nos == AV_PICTURE_TYPE_B) { | ||||
implicit_weight_table(h, sl, -1); | implicit_weight_table(h, sl, -1); | ||||
} else { | } else { | ||||
@@ -1738,7 +1669,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
if (FRAME_MBAFF(h)) { | if (FRAME_MBAFF(h)) { | ||||
ff_h264_fill_mbaff_ref_list(h, sl); | ff_h264_fill_mbaff_ref_list(h, sl); | ||||
if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { | |||||
if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { | |||||
implicit_weight_table(h, sl, 0); | implicit_weight_table(h, sl, 0); | ||||
implicit_weight_table(h, sl, 1); | implicit_weight_table(h, sl, 1); | ||||
} | } | ||||
@@ -1748,7 +1679,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
ff_h264_direct_dist_scale_factor(h, sl); | ff_h264_direct_dist_scale_factor(h, sl); | ||||
ff_h264_direct_ref_list_init(h, sl); | ff_h264_direct_ref_list_init(h, sl); | ||||
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { | |||||
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { | |||||
tmp = get_ue_golomb_31(&sl->gb); | tmp = get_ue_golomb_31(&sl->gb); | ||||
if (tmp > 2) { | if (tmp > 2) { | ||||
av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); | av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); | ||||
@@ -1758,8 +1689,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
sl->last_qscale_diff = 0; | sl->last_qscale_diff = 0; | ||||
tmp = h->pps.init_qp + get_se_golomb(&sl->gb); | |||||
if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { | |||||
tmp = pps->init_qp + get_se_golomb(&sl->gb); | |||||
if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { | |||||
av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); | av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
@@ -1776,7 +1707,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
sl->deblocking_filter = 1; | sl->deblocking_filter = 1; | ||||
sl->slice_alpha_c0_offset = 0; | sl->slice_alpha_c0_offset = 0; | ||||
sl->slice_beta_offset = 0; | sl->slice_beta_offset = 0; | ||||
if (h->pps.deblocking_filter_parameters_present) { | |||||
if (pps->deblocking_filter_parameters_present) { | |||||
tmp = get_ue_golomb_31(&sl->gb); | tmp = get_ue_golomb_31(&sl->gb); | ||||
if (tmp > 2) { | if (tmp > 2) { | ||||
av_log(h->avctx, AV_LOG_ERROR, | av_log(h->avctx, AV_LOG_ERROR, | ||||
@@ -1838,9 +1769,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
sl->qp_thresh = 15 - | sl->qp_thresh = 15 - | ||||
FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) - | FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) - | ||||
FFMAX3(0, | FFMAX3(0, | ||||
h->pps.chroma_qp_index_offset[0], | |||||
h->pps.chroma_qp_index_offset[1]) + | |||||
6 * (h->sps.bit_depth_luma - 8); | |||||
pps->chroma_qp_index_offset[0], | |||||
pps->chroma_qp_index_offset[1]) + | |||||
6 * (sps->bit_depth_luma - 8); | |||||
sl->slice_num = ++h->current_slice; | sl->slice_num = ++h->current_slice; | ||||
@@ -1887,9 +1818,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) | |||||
} | } | ||||
h->au_pps_id = pps_id; | h->au_pps_id = pps_id; | ||||
h->sps.new = | |||||
h->sps_buffers[h->pps.sps_id]->new = 0; | |||||
h->current_sps_id = h->pps.sps_id; | |||||
h->current_sps_id = h->ps.pps->sps_id; | |||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { | if (h->avctx->debug & FF_DEBUG_PICT_INFO) { | ||||
av_log(h->avctx, AV_LOG_DEBUG, | av_log(h->avctx, AV_LOG_DEBUG, | ||||
@@ -2120,7 +2049,7 @@ static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb | |||||
/* CAVLC 8x8dct requires NNZ values for residual decoding that differ | /* CAVLC 8x8dct requires NNZ values for residual decoding that differ | ||||
* from what the loop filter needs */ | * from what the loop filter needs */ | ||||
if (!CABAC(h) && h->pps.transform_8x8_mode) { | |||||
if (!CABAC(h) && h->ps.pps->transform_8x8_mode) { | |||||
if (IS_8x8DCT(top_type)) { | if (IS_8x8DCT(top_type)) { | ||||
nnz_cache[4 + 8 * 0] = | nnz_cache[4 + 8 * 0] = | ||||
nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; | nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; | ||||
@@ -2321,7 +2250,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) | |||||
} | } | ||||
} | } | ||||
if (h->pps.cabac) { | |||||
if (h->ps.pps->cabac) { | |||||
/* realign */ | /* realign */ | ||||
align_get_bits(&sl->gb); | align_get_bits(&sl->gb); | ||||
@@ -228,6 +228,8 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, | |||||
{ | { | ||||
H264Context * const h = avctx->priv_data; | H264Context * const h = avctx->priv_data; | ||||
FFVAContext * const vactx = ff_vaapi_get_context(avctx); | FFVAContext * const vactx = ff_vaapi_get_context(avctx); | ||||
const PPS *pps = h->ps.pps; | |||||
const SPS *sps = h->ps.sps; | |||||
VAPictureParameterBufferH264 *pic_param; | VAPictureParameterBufferH264 *pic_param; | ||||
VAIQMatrixBufferH264 *iq_matrix; | VAIQMatrixBufferH264 *iq_matrix; | ||||
@@ -244,38 +246,38 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, | |||||
return -1; | return -1; | ||||
pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1; | pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1; | ||||
pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1; | pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1; | ||||
pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8; | |||||
pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8; | |||||
pic_param->num_ref_frames = h->sps.ref_frame_count; | |||||
pic_param->bit_depth_luma_minus8 = sps->bit_depth_luma - 8; | |||||
pic_param->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8; | |||||
pic_param->num_ref_frames = sps->ref_frame_count; | |||||
pic_param->seq_fields.value = 0; /* reset all bits */ | pic_param->seq_fields.value = 0; /* reset all bits */ | ||||
pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc; | |||||
pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */ | |||||
pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag; | |||||
pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; | |||||
pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff; | |||||
pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |||||
pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */ | |||||
pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |||||
pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type; | |||||
pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; | |||||
pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |||||
pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1; | |||||
pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type; | |||||
pic_param->seq_fields.bits.chroma_format_idc = sps->chroma_format_idc; | |||||
pic_param->seq_fields.bits.residual_colour_transform_flag = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */ | |||||
pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag; | |||||
pic_param->seq_fields.bits.frame_mbs_only_flag = sps->frame_mbs_only_flag; | |||||
pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = sps->mb_aff; | |||||
pic_param->seq_fields.bits.direct_8x8_inference_flag = sps->direct_8x8_inference_flag; | |||||
pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = sps->level_idc >= 31; /* A.3.3.2 */ | |||||
pic_param->seq_fields.bits.log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; | |||||
pic_param->seq_fields.bits.pic_order_cnt_type = sps->poc_type; | |||||
pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4; | |||||
pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; | |||||
pic_param->num_slice_groups_minus1 = pps->slice_group_count - 1; | |||||
pic_param->slice_group_map_type = pps->mb_slice_group_map_type; | |||||
pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in FFmpeg */ | pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in FFmpeg */ | ||||
pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26; | |||||
pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26; | |||||
pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |||||
pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |||||
pic_param->pic_init_qp_minus26 = pps->init_qp - 26; | |||||
pic_param->pic_init_qs_minus26 = pps->init_qs - 26; | |||||
pic_param->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; | |||||
pic_param->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; | |||||
pic_param->pic_fields.value = 0; /* reset all bits */ | pic_param->pic_fields.value = 0; /* reset all bits */ | ||||
pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac; | |||||
pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred; | |||||
pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc; | |||||
pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode; | |||||
pic_param->pic_fields.bits.entropy_coding_mode_flag = pps->cabac; | |||||
pic_param->pic_fields.bits.weighted_pred_flag = pps->weighted_pred; | |||||
pic_param->pic_fields.bits.weighted_bipred_idc = pps->weighted_bipred_idc; | |||||
pic_param->pic_fields.bits.transform_8x8_mode_flag = pps->transform_8x8_mode; | |||||
pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME; | pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME; | ||||
pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred; | |||||
pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present; | |||||
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |||||
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; | |||||
pic_param->pic_fields.bits.constrained_intra_pred_flag = pps->constrained_intra_pred; | |||||
pic_param->pic_fields.bits.pic_order_present_flag = pps->pic_order_present; | |||||
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; | |||||
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present; | |||||
pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0; | pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0; | ||||
pic_param->frame_num = h->frame_num; | pic_param->frame_num = h->frame_num; | ||||
@@ -283,9 +285,9 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx, | |||||
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264)); | iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264)); | ||||
if (!iq_matrix) | if (!iq_matrix) | ||||
return -1; | return -1; | ||||
memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4)); | |||||
memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0])); | |||||
memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0])); | |||||
memcpy(iq_matrix->ScalingList4x4, pps->scaling_matrix4, sizeof(iq_matrix->ScalingList4x4)); | |||||
memcpy(iq_matrix->ScalingList8x8[0], pps->scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0])); | |||||
memcpy(iq_matrix->ScalingList8x8[1], pps->scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0])); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -337,7 +339,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx, | |||||
slice_param->num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0; | slice_param->num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0; | ||||
slice_param->num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0; | slice_param->num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0; | ||||
slice_param->cabac_init_idc = sl->cabac_init_idc; | slice_param->cabac_init_idc = sl->cabac_init_idc; | ||||
slice_param->slice_qp_delta = sl->qscale - h->pps.init_qp; | |||||
slice_param->slice_qp_delta = sl->qscale - h->ps.pps->init_qp; | |||||
slice_param->disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter; | slice_param->disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter; | ||||
slice_param->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2; | slice_param->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2; | ||||
slice_param->slice_beta_offset_div2 = sl->slice_beta_offset / 2; | slice_param->slice_beta_offset_div2 = sl->slice_beta_offset / 2; | ||||
@@ -476,30 +476,30 @@ void ff_vdpau_h264_picture_complete(H264Context *h) | |||||
render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; | render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; | ||||
render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; | render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; | ||||
render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; | render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; | ||||
render->info.h264.num_ref_frames = h->sps.ref_frame_count; | |||||
render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; | |||||
render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; | |||||
render->info.h264.weighted_pred_flag = h->pps.weighted_pred; | |||||
render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; | |||||
render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; | |||||
render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; | |||||
render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |||||
render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |||||
render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; | |||||
render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; | |||||
render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; | |||||
render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |||||
render->info.h264.pic_order_cnt_type = h->sps.poc_type; | |||||
render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; | |||||
render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |||||
render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |||||
render->info.h264.entropy_coding_mode_flag = h->pps.cabac; | |||||
render->info.h264.pic_order_present_flag = h->pps.pic_order_present; | |||||
render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |||||
render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; | |||||
memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); | |||||
memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
render->info.h264.num_ref_frames = h->ps.sps->ref_frame_count; | |||||
render->info.h264.mb_adaptive_frame_field_flag = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag; | |||||
render->info.h264.constrained_intra_pred_flag = h->ps.pps->constrained_intra_pred; | |||||
render->info.h264.weighted_pred_flag = h->ps.pps->weighted_pred; | |||||
render->info.h264.weighted_bipred_idc = h->ps.pps->weighted_bipred_idc; | |||||
render->info.h264.frame_mbs_only_flag = h->ps.sps->frame_mbs_only_flag; | |||||
render->info.h264.transform_8x8_mode_flag = h->ps.pps->transform_8x8_mode; | |||||
render->info.h264.chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[0]; | |||||
render->info.h264.second_chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[1]; | |||||
render->info.h264.pic_init_qp_minus26 = h->ps.pps->init_qp - 26; | |||||
render->info.h264.num_ref_idx_l0_active_minus1 = h->ps.pps->ref_count[0] - 1; | |||||
render->info.h264.num_ref_idx_l1_active_minus1 = h->ps.pps->ref_count[1] - 1; | |||||
render->info.h264.log2_max_frame_num_minus4 = h->ps.sps->log2_max_frame_num - 4; | |||||
render->info.h264.pic_order_cnt_type = h->ps.sps->poc_type; | |||||
render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4; | |||||
render->info.h264.delta_pic_order_always_zero_flag = h->ps.sps->delta_pic_order_always_zero_flag; | |||||
render->info.h264.direct_8x8_inference_flag = h->ps.sps->direct_8x8_inference_flag; | |||||
render->info.h264.entropy_coding_mode_flag = h->ps.pps->cabac; | |||||
render->info.h264.pic_order_present_flag = h->ps.pps->pic_order_present; | |||||
render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present; | |||||
render->info.h264.redundant_pic_cnt_present_flag = h->ps.pps->redundant_pic_cnt_present; | |||||
memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); | |||||
memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); | |||||
ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height); | ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height); | ||||
render->bitstream_buffers_used = 0; | render->bitstream_buffers_used = 0; | ||||
@@ -120,6 +120,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, | |||||
const uint8_t *buffer, uint32_t size) | const uint8_t *buffer, uint32_t size) | ||||
{ | { | ||||
H264Context * const h = avctx->priv_data; | H264Context * const h = avctx->priv_data; | ||||
const PPS *pps = h->ps.pps; | |||||
const SPS *sps = h->ps.sps; | |||||
H264Picture *pic = h->cur_pic_ptr; | H264Picture *pic = h->cur_pic_ptr; | ||||
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; | struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; | ||||
VdpPictureInfoH264 *info = &pic_ctx->info.h264; | VdpPictureInfoH264 *info = &pic_ctx->info.h264; | ||||
@@ -135,37 +137,37 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx, | |||||
info->frame_num = h->frame_num; | info->frame_num = h->frame_num; | ||||
info->field_pic_flag = h->picture_structure != PICT_FRAME; | info->field_pic_flag = h->picture_structure != PICT_FRAME; | ||||
info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; | info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; | ||||
info->num_ref_frames = h->sps.ref_frame_count; | |||||
info->mb_adaptive_frame_field_flag = h->sps.mb_aff && !info->field_pic_flag; | |||||
info->constrained_intra_pred_flag = h->pps.constrained_intra_pred; | |||||
info->weighted_pred_flag = h->pps.weighted_pred; | |||||
info->weighted_bipred_idc = h->pps.weighted_bipred_idc; | |||||
info->frame_mbs_only_flag = h->sps.frame_mbs_only_flag; | |||||
info->transform_8x8_mode_flag = h->pps.transform_8x8_mode; | |||||
info->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |||||
info->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |||||
info->pic_init_qp_minus26 = h->pps.init_qp - 26; | |||||
info->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; | |||||
info->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; | |||||
info->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |||||
info->pic_order_cnt_type = h->sps.poc_type; | |||||
info->log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; | |||||
info->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |||||
info->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |||||
info->num_ref_frames = sps->ref_frame_count; | |||||
info->mb_adaptive_frame_field_flag = sps->mb_aff && !info->field_pic_flag; | |||||
info->constrained_intra_pred_flag = pps->constrained_intra_pred; | |||||
info->weighted_pred_flag = pps->weighted_pred; | |||||
info->weighted_bipred_idc = pps->weighted_bipred_idc; | |||||
info->frame_mbs_only_flag = sps->frame_mbs_only_flag; | |||||
info->transform_8x8_mode_flag = pps->transform_8x8_mode; | |||||
info->chroma_qp_index_offset = pps->chroma_qp_index_offset[0]; | |||||
info->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1]; | |||||
info->pic_init_qp_minus26 = pps->init_qp - 26; | |||||
info->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1; | |||||
info->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1; | |||||
info->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4; | |||||
info->pic_order_cnt_type = sps->poc_type; | |||||
info->log2_max_pic_order_cnt_lsb_minus4 = sps->poc_type ? 0 : sps->log2_max_poc_lsb - 4; | |||||
info->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag; | |||||
info->direct_8x8_inference_flag = sps->direct_8x8_inference_flag; | |||||
#ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE | #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE | ||||
info2->qpprime_y_zero_transform_bypass_flag = h->sps.transform_bypass; | |||||
info2->separate_colour_plane_flag = h->sps.residual_color_transform_flag; | |||||
info2->qpprime_y_zero_transform_bypass_flag = sps->transform_bypass; | |||||
info2->separate_colour_plane_flag = sps->residual_color_transform_flag; | |||||
#endif | #endif | ||||
info->entropy_coding_mode_flag = h->pps.cabac; | |||||
info->pic_order_present_flag = h->pps.pic_order_present; | |||||
info->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |||||
info->redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; | |||||
info->entropy_coding_mode_flag = pps->cabac; | |||||
info->pic_order_present_flag = pps->pic_order_present; | |||||
info->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present; | |||||
info->redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present; | |||||
memcpy(info->scaling_lists_4x4, h->pps.scaling_matrix4, | |||||
memcpy(info->scaling_lists_4x4, pps->scaling_matrix4, | |||||
sizeof(info->scaling_lists_4x4)); | sizeof(info->scaling_lists_4x4)); | ||||
memcpy(info->scaling_lists_8x8[0], h->pps.scaling_matrix8[0], | |||||
memcpy(info->scaling_lists_8x8[0], pps->scaling_matrix8[0], | |||||
sizeof(info->scaling_lists_8x8[0])); | sizeof(info->scaling_lists_8x8[0])); | ||||
memcpy(info->scaling_lists_8x8[1], h->pps.scaling_matrix8[3], | |||||
memcpy(info->scaling_lists_8x8[1], pps->scaling_matrix8[3], | |||||
sizeof(info->scaling_lists_8x8[1])); | sizeof(info->scaling_lists_8x8[1])); | ||||
vdpau_h264_set_reference_frames(avctx); | vdpau_h264_set_reference_frames(avctx); | ||||
@@ -84,7 +84,7 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx) | |||||
H264Context *h = avctx->priv_data; | H264Context *h = avctx->priv_data; | ||||
CFDataRef data = NULL; | CFDataRef data = NULL; | ||||
uint8_t *p; | uint8_t *p; | ||||
int vt_extradata_size = 6 + 3 + h->sps.data_size + 4 + h->pps.data_size; | |||||
int vt_extradata_size = 6 + 3 + h->ps.sps->data_size + 4 + h->ps.sps->data_size; | |||||
uint8_t *vt_extradata = av_malloc(vt_extradata_size); | uint8_t *vt_extradata = av_malloc(vt_extradata_size); | ||||
if (!vt_extradata) | if (!vt_extradata) | ||||
return NULL; | return NULL; | ||||
@@ -92,15 +92,15 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx) | |||||
p = vt_extradata; | p = vt_extradata; | ||||
AV_W8(p + 0, 1); /* version */ | AV_W8(p + 0, 1); /* version */ | ||||
AV_W8(p + 1, h->sps.data[0]); /* profile */ | |||||
AV_W8(p + 2, h->sps.data[1]); /* profile compat */ | |||||
AV_W8(p + 3, h->sps.data[2]); /* level */ | |||||
AV_W8(p + 1, h->ps.sps->data[0]); /* profile */ | |||||
AV_W8(p + 2, h->ps.sps->data[1]); /* profile compat */ | |||||
AV_W8(p + 3, h->ps.sps->data[2]); /* level */ | |||||
AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */ | AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */ | ||||
AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */ | AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */ | ||||
AV_WB16(p + 6, h->sps.data_size + 1); | |||||
AV_WB16(p + 6, h->ps.sps->data_size + 1); | |||||
AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header | AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header | ||||
memcpy(p + 9, h->sps.data, h->sps.data_size); | |||||
p += 9 + h->sps.data_size; | |||||
memcpy(p + 9, h->ps.sps->data, h->ps.sps->data_size); | |||||
p += 9 + h->ps.sps->data_size; | |||||
AV_W8(p + 0, 1); /* number of pps */ | AV_W8(p + 0, 1); /* number of pps */ | ||||
AV_WB16(p + 1, h->pps.data_size + 1); | AV_WB16(p + 1, h->pps.data_size + 1); | ||||
AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header | AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header | ||||