Originally committed as revision 21104 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.6
@@ -341,104 +341,6 @@ void ff_clean_h263_qscales(MpegEncContext *s){ | |||||
#endif //CONFIG_ENCODERS | #endif //CONFIG_ENCODERS | ||||
#define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0])) | |||||
#define tab_bias (tab_size/2) | |||||
//used by mpeg4 and rv10 decoder | |||||
void ff_mpeg4_init_direct_mv(MpegEncContext *s){ | |||||
int i; | |||||
for(i=0; i<tab_size; i++){ | |||||
s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time; | |||||
s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time; | |||||
} | |||||
} | |||||
static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){ | |||||
int xy= s->block_index[i]; | |||||
uint16_t time_pp= s->pp_time; | |||||
uint16_t time_pb= s->pb_time; | |||||
int p_mx, p_my; | |||||
p_mx= s->next_picture.motion_val[0][xy][0]; | |||||
if((unsigned)(p_mx + tab_bias) < tab_size){ | |||||
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx | |||||
: s->direct_scale_mv[1][p_mx + tab_bias]; | |||||
}else{ | |||||
s->mv[0][i][0] = p_mx*time_pb/time_pp + mx; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx | |||||
: p_mx*(time_pb - time_pp)/time_pp; | |||||
} | |||||
p_my= s->next_picture.motion_val[0][xy][1]; | |||||
if((unsigned)(p_my + tab_bias) < tab_size){ | |||||
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my | |||||
: s->direct_scale_mv[1][p_my + tab_bias]; | |||||
}else{ | |||||
s->mv[0][i][1] = p_my*time_pb/time_pp + my; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my | |||||
: p_my*(time_pb - time_pp)/time_pp; | |||||
} | |||||
} | |||||
#undef tab_size | |||||
#undef tab_bias | |||||
/** | |||||
* | |||||
* @return the mb_type | |||||
*/ | |||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ | |||||
const int mb_index= s->mb_x + s->mb_y*s->mb_stride; | |||||
const int colocated_mb_type= s->next_picture.mb_type[mb_index]; | |||||
uint16_t time_pp; | |||||
uint16_t time_pb; | |||||
int i; | |||||
//FIXME avoid divides | |||||
// try special case with shifts for 1 and 3 B-frames? | |||||
if(IS_8X8(colocated_mb_type)){ | |||||
s->mv_type = MV_TYPE_8X8; | |||||
for(i=0; i<4; i++){ | |||||
ff_mpeg4_set_one_direct_mv(s, mx, my, i); | |||||
} | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1; | |||||
} else if(IS_INTERLACED(colocated_mb_type)){ | |||||
s->mv_type = MV_TYPE_FIELD; | |||||
for(i=0; i<2; i++){ | |||||
int field_select= s->next_picture.ref_index[0][s->block_index[2*i]]; | |||||
s->field_select[0][i]= field_select; | |||||
s->field_select[1][i]= i; | |||||
if(s->top_field_first){ | |||||
time_pp= s->pp_field_time - field_select + i; | |||||
time_pb= s->pb_field_time - field_select + i; | |||||
}else{ | |||||
time_pp= s->pp_field_time + field_select - i; | |||||
time_pb= s->pb_field_time + field_select - i; | |||||
} | |||||
s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx; | |||||
s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0] | |||||
: s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1] | |||||
: s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp; | |||||
} | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED; | |||||
}else{ | |||||
ff_mpeg4_set_one_direct_mv(s, mx, my, 0); | |||||
s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0]; | |||||
s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1]; | |||||
s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0]; | |||||
s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1]; | |||||
if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample) | |||||
s->mv_type= MV_TYPE_16X16; | |||||
else | |||||
s->mv_type= MV_TYPE_8X8; | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line | |||||
} | |||||
} | |||||
void ff_h263_update_motion_val(MpegEncContext * s){ | void ff_h263_update_motion_val(MpegEncContext * s){ | ||||
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; | const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; | ||||
//FIXME a lot of that is only needed for !low_delay | //FIXME a lot of that is only needed for !low_delay | ||||
@@ -70,3 +70,102 @@ void ff_mpeg4_clean_buffers(MpegEncContext *s) | |||||
s->last_mv[1][0][0]= | s->last_mv[1][0][0]= | ||||
s->last_mv[1][0][1]= 0; | s->last_mv[1][0][1]= 0; | ||||
} | } | ||||
#define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0])) | |||||
#define tab_bias (tab_size/2) | |||||
//used by mpeg4 and rv10 decoder | |||||
void ff_mpeg4_init_direct_mv(MpegEncContext *s){ | |||||
int i; | |||||
for(i=0; i<tab_size; i++){ | |||||
s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time; | |||||
s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time; | |||||
} | |||||
} | |||||
static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){ | |||||
int xy= s->block_index[i]; | |||||
uint16_t time_pp= s->pp_time; | |||||
uint16_t time_pb= s->pb_time; | |||||
int p_mx, p_my; | |||||
p_mx= s->next_picture.motion_val[0][xy][0]; | |||||
if((unsigned)(p_mx + tab_bias) < tab_size){ | |||||
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx | |||||
: s->direct_scale_mv[1][p_mx + tab_bias]; | |||||
}else{ | |||||
s->mv[0][i][0] = p_mx*time_pb/time_pp + mx; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx | |||||
: p_mx*(time_pb - time_pp)/time_pp; | |||||
} | |||||
p_my= s->next_picture.motion_val[0][xy][1]; | |||||
if((unsigned)(p_my + tab_bias) < tab_size){ | |||||
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my | |||||
: s->direct_scale_mv[1][p_my + tab_bias]; | |||||
}else{ | |||||
s->mv[0][i][1] = p_my*time_pb/time_pp + my; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my | |||||
: p_my*(time_pb - time_pp)/time_pp; | |||||
} | |||||
} | |||||
#undef tab_size | |||||
#undef tab_bias | |||||
/** | |||||
* | |||||
* @return the mb_type | |||||
*/ | |||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ | |||||
const int mb_index= s->mb_x + s->mb_y*s->mb_stride; | |||||
const int colocated_mb_type= s->next_picture.mb_type[mb_index]; | |||||
uint16_t time_pp; | |||||
uint16_t time_pb; | |||||
int i; | |||||
//FIXME avoid divides | |||||
// try special case with shifts for 1 and 3 B-frames? | |||||
if(IS_8X8(colocated_mb_type)){ | |||||
s->mv_type = MV_TYPE_8X8; | |||||
for(i=0; i<4; i++){ | |||||
ff_mpeg4_set_one_direct_mv(s, mx, my, i); | |||||
} | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1; | |||||
} else if(IS_INTERLACED(colocated_mb_type)){ | |||||
s->mv_type = MV_TYPE_FIELD; | |||||
for(i=0; i<2; i++){ | |||||
int field_select= s->next_picture.ref_index[0][s->block_index[2*i]]; | |||||
s->field_select[0][i]= field_select; | |||||
s->field_select[1][i]= i; | |||||
if(s->top_field_first){ | |||||
time_pp= s->pp_field_time - field_select + i; | |||||
time_pb= s->pb_field_time - field_select + i; | |||||
}else{ | |||||
time_pp= s->pp_field_time + field_select - i; | |||||
time_pb= s->pb_field_time + field_select - i; | |||||
} | |||||
s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx; | |||||
s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my; | |||||
s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0] | |||||
: s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp; | |||||
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1] | |||||
: s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp; | |||||
} | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED; | |||||
}else{ | |||||
ff_mpeg4_set_one_direct_mv(s, mx, my, 0); | |||||
s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0]; | |||||
s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1]; | |||||
s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0]; | |||||
s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1]; | |||||
if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample) | |||||
s->mv_type= MV_TYPE_16X16; | |||||
else | |||||
s->mv_type= MV_TYPE_8X8; | |||||
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line | |||||
} | |||||
} | |||||
@@ -104,6 +104,13 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s); | |||||
int ff_mpeg4_decode_partitions(MpegEncContext *s); | int ff_mpeg4_decode_partitions(MpegEncContext *s); | ||||
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s); | int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s); | ||||
int mpeg4_decode_video_packet_header(MpegEncContext *s); | int mpeg4_decode_video_packet_header(MpegEncContext *s); | ||||
void ff_mpeg4_init_direct_mv(MpegEncContext *s); | |||||
/** | |||||
* | |||||
* @return the mb_type | |||||
*/ | |||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my); | |||||
extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; | extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; | ||||
@@ -840,8 +840,6 @@ void ff_clean_h263_qscales(MpegEncContext *s); | |||||
int ff_h263_resync(MpegEncContext *s); | int ff_h263_resync(MpegEncContext *s); | ||||
const uint8_t *ff_h263_find_resync_marker(const uint8_t *p, const uint8_t *end); | const uint8_t *ff_h263_find_resync_marker(const uint8_t *p, const uint8_t *end); | ||||
int ff_h263_get_gob_height(MpegEncContext *s); | int ff_h263_get_gob_height(MpegEncContext *s); | ||||
void ff_mpeg4_init_direct_mv(MpegEncContext *s); | |||||
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my); | |||||
void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code); | void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code); | ||||
void ff_init_qscale_tab(MpegEncContext *s); | void ff_init_qscale_tab(MpegEncContext *s); | ||||