adding AVVideoFrame moving quality, pict_type, key_frame, qscale_table, ... to AVVideoFrame removing obsolete variables in AVCodecContext skiping of MBs in b frames correctly initalizing AVCodecContext picture buffer cleanup Originally committed as revision 1302 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
@@ -285,6 +285,7 @@ int read_ffserver_streams(AVFormatContext *s, const char *filename) | |||||
s->nb_streams = ic->nb_streams; | s->nb_streams = ic->nb_streams; | ||||
for(i=0;i<ic->nb_streams;i++) { | for(i=0;i<ic->nb_streams;i++) { | ||||
AVStream *st; | AVStream *st; | ||||
st = av_mallocz(sizeof(AVFormatContext)); | st = av_mallocz(sizeof(AVFormatContext)); | ||||
memcpy(st, ic->streams[i], sizeof(AVStream)); | memcpy(st, ic->streams[i], sizeof(AVStream)); | ||||
s->streams[i] = st; | s->streams[i] = st; | ||||
@@ -605,15 +606,21 @@ static void do_video_out(AVFormatContext *s, | |||||
/* XXX: pb because no interleaving */ | /* XXX: pb because no interleaving */ | ||||
for(i=0;i<nb_frames;i++) { | for(i=0;i<nb_frames;i++) { | ||||
if (enc->codec_id != CODEC_ID_RAWVIDEO) { | if (enc->codec_id != CODEC_ID_RAWVIDEO) { | ||||
AVVideoFrame big_picture; | |||||
memset(&big_picture, 0, sizeof(AVVideoFrame)); | |||||
*(AVPicture*)&big_picture= *picture; | |||||
/* handles sameq here. This is not correct because it may | /* handles sameq here. This is not correct because it may | ||||
not be a global option */ | not be a global option */ | ||||
if (same_quality) { | if (same_quality) { | ||||
enc->quality = dec->quality; | |||||
} | |||||
big_picture.quality = ist->st->quality; | |||||
}else | |||||
big_picture.quality = ost->st->quality; | |||||
ret = avcodec_encode_video(enc, | ret = avcodec_encode_video(enc, | ||||
video_buffer, VIDEO_BUFFER_SIZE, | video_buffer, VIDEO_BUFFER_SIZE, | ||||
picture); | |||||
&big_picture); | |||||
//enc->frame_number = enc->real_pict_num; | //enc->frame_number = enc->real_pict_num; | ||||
av_write_frame(s, ost->index, video_buffer, ret); | av_write_frame(s, ost->index, video_buffer, ret); | ||||
*frame_size = ret; | *frame_size = ret; | ||||
@@ -674,7 +681,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, | |||||
total_size += frame_size; | total_size += frame_size; | ||||
if (enc->codec_type == CODEC_TYPE_VIDEO) { | if (enc->codec_type == CODEC_TYPE_VIDEO) { | ||||
frame_number = ost->frame_number; | frame_number = ost->frame_number; | ||||
fprintf(fvstats, "frame= %5d q= %2d ", frame_number, enc->quality); | |||||
fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_picture->quality); | |||||
if (do_psnr) | if (do_psnr) | ||||
fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y); | fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y); | ||||
@@ -688,7 +695,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost, | |||||
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0; | avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0; | ||||
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", | fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", | ||||
(double)total_size / 1024, ti1, bitrate, avg_bitrate); | (double)total_size / 1024, ti1, bitrate, avg_bitrate); | ||||
fprintf(fvstats,"type= %s\n", enc->key_frame == 1 ? "I" : "P"); | |||||
fprintf(fvstats,"type= %s\n", enc->coded_picture->key_frame == 1 ? "I" : "P"); | |||||
} | } | ||||
} | } | ||||
@@ -731,13 +738,13 @@ void print_report(AVFormatContext **output_files, | |||||
os = output_files[ost->file_index]; | os = output_files[ost->file_index]; | ||||
enc = &ost->st->codec; | enc = &ost->st->codec; | ||||
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) { | if (vid && enc->codec_type == CODEC_TYPE_VIDEO) { | ||||
sprintf(buf + strlen(buf), "q=%2d ", | |||||
enc->quality); | |||||
sprintf(buf + strlen(buf), "q=%2.1f ", | |||||
enc->coded_picture->quality); | |||||
} | } | ||||
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) { | if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) { | ||||
frame_number = ost->frame_number; | frame_number = ost->frame_number; | ||||
sprintf(buf + strlen(buf), "frame=%5d q=%2d ", | |||||
frame_number, enc->quality); | |||||
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ", | |||||
frame_number, enc->coded_picture ? enc->coded_picture->quality : 0); | |||||
if (do_psnr) | if (do_psnr) | ||||
sprintf(buf + strlen(buf), "PSNR=%6.2f ", enc->psnr_y); | sprintf(buf + strlen(buf), "PSNR=%6.2f ", enc->psnr_y); | ||||
vid = 1; | vid = 1; | ||||
@@ -1236,9 +1243,13 @@ static int av_encode(AVFormatContext **output_files, | |||||
ist->st->codec.height); | ist->st->codec.height); | ||||
ret = len; | ret = len; | ||||
} else { | } else { | ||||
AVVideoFrame big_picture; | |||||
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2; | data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2; | ||||
ret = avcodec_decode_video(&ist->st->codec, | ret = avcodec_decode_video(&ist->st->codec, | ||||
&picture, &got_picture, ptr, len); | |||||
&big_picture, &got_picture, ptr, len); | |||||
picture= *(AVPicture*)&big_picture; | |||||
ist->st->quality= big_picture.quality; | |||||
if (ret < 0) { | if (ret < 0) { | ||||
fail_decode: | fail_decode: | ||||
fprintf(stderr, "Error while decoding stream #%d.%d\n", | fprintf(stderr, "Error while decoding stream #%d.%d\n", | ||||
@@ -2046,6 +2057,7 @@ void opt_output_file(const char *filename) | |||||
fprintf(stderr, "Could not alloc stream\n"); | fprintf(stderr, "Could not alloc stream\n"); | ||||
exit(1); | exit(1); | ||||
} | } | ||||
avcodec_get_context_defaults(&st->codec); | |||||
video_enc = &st->codec; | video_enc = &st->codec; | ||||
if (video_stream_copy) { | if (video_stream_copy) { | ||||
@@ -2074,7 +2086,7 @@ void opt_output_file(const char *filename) | |||||
video_enc->gop_size = 0; | video_enc->gop_size = 0; | ||||
if (video_qscale || same_quality) { | if (video_qscale || same_quality) { | ||||
video_enc->flags |= CODEC_FLAG_QSCALE; | video_enc->flags |= CODEC_FLAG_QSCALE; | ||||
video_enc->quality = video_qscale; | |||||
st->quality = video_qscale; | |||||
} | } | ||||
if (use_hq) { | if (use_hq) { | ||||
@@ -2181,6 +2193,7 @@ void opt_output_file(const char *filename) | |||||
fprintf(stderr, "Could not alloc stream\n"); | fprintf(stderr, "Could not alloc stream\n"); | ||||
exit(1); | exit(1); | ||||
} | } | ||||
avcodec_get_context_defaults(&st->codec); | |||||
audio_enc = &st->codec; | audio_enc = &st->codec; | ||||
audio_enc->codec_type = CODEC_TYPE_AUDIO; | audio_enc->codec_type = CODEC_TYPE_AUDIO; | ||||
@@ -1955,7 +1955,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt) | |||||
/* we use the codec indication because it is | /* we use the codec indication because it is | ||||
more accurate than the demux flags */ | more accurate than the demux flags */ | ||||
pkt->flags = 0; | pkt->flags = 0; | ||||
if (st->codec.key_frame) | |||||
if (st->codec.coded_picture->key_frame) | |||||
pkt->flags |= PKT_FLAG_KEY; | pkt->flags |= PKT_FLAG_KEY; | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -3942,7 +3942,7 @@ int parse_ffconfig(const char *filename) | |||||
} else if (!strcasecmp(cmd, "AudioQuality")) { | } else if (!strcasecmp(cmd, "AudioQuality")) { | ||||
get_arg(arg, sizeof(arg), &p); | get_arg(arg, sizeof(arg), &p); | ||||
if (stream) { | if (stream) { | ||||
audio_enc.quality = atof(arg) * 1000; | |||||
// audio_enc.quality = atof(arg) * 1000; | |||||
} | } | ||||
} else if (!strcasecmp(cmd, "VideoBitRateRange")) { | } else if (!strcasecmp(cmd, "VideoBitRateRange")) { | ||||
if (stream) { | if (stream) { | ||||
@@ -5,8 +5,8 @@ | |||||
#define LIBAVCODEC_VERSION_INT 0x000406 | #define LIBAVCODEC_VERSION_INT 0x000406 | ||||
#define LIBAVCODEC_VERSION "0.4.6" | #define LIBAVCODEC_VERSION "0.4.6" | ||||
#define LIBAVCODEC_BUILD 4640 | |||||
#define LIBAVCODEC_BUILD_STR "4640" | |||||
#define LIBAVCODEC_BUILD 4641 | |||||
#define LIBAVCODEC_BUILD_STR "4641" | |||||
enum CodecID { | enum CodecID { | ||||
CODEC_ID_NONE, | CODEC_ID_NONE, | ||||
@@ -140,7 +140,6 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, | |||||
#define CODEC_FLAG_EXTERN_HUFF 0x1000 /* use external huffman table (for mjpeg) */ | #define CODEC_FLAG_EXTERN_HUFF 0x1000 /* use external huffman table (for mjpeg) */ | ||||
#define CODEC_FLAG_GRAY 0x2000 /* only decode/encode grayscale */ | #define CODEC_FLAG_GRAY 0x2000 /* only decode/encode grayscale */ | ||||
#define CODEC_FLAG_EMU_EDGE 0x4000/* dont draw edges */ | #define CODEC_FLAG_EMU_EDGE 0x4000/* dont draw edges */ | ||||
#define CODEC_FLAG_DR1 0x8000 /* direct renderig type 1 (store internal frames in external buffers) */ | |||||
#define CODEC_FLAG_TRUNCATED 0x00010000 /* input bitstream might be truncated at a random location instead | #define CODEC_FLAG_TRUNCATED 0x00010000 /* input bitstream might be truncated at a random location instead | ||||
of only at frame boundaries */ | of only at frame boundaries */ | ||||
#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 /* normalize adaptive quantization */ | #define CODEC_FLAG_NORMALIZE_AQP 0x00020000 /* normalize adaptive quantization */ | ||||
@@ -159,6 +158,111 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, | |||||
#define FRAME_RATE_BASE 10000 | #define FRAME_RATE_BASE 10000 | ||||
#define FF_COMMON_PICTURE \ | |||||
uint8_t *data[4];\ | |||||
int linesize[4];\ | |||||
/**\ | |||||
* pointer to the first allocated byte of the picture. can be used in get_buffer/release_buffer | |||||
* this isnt used by lavc unless the default get/release_buffer() is used\ | |||||
* encoding: \ | |||||
* decoding: \ | |||||
*/\ | |||||
uint8_t *base[4];\ | |||||
/**\ | |||||
* 1 -> keyframe, 0-> not\ | |||||
* encoding: set by lavc\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
int key_frame;\ | |||||
\ | |||||
/**\ | |||||
* picture type of the frame, see ?_TYPE below\ | |||||
* encoding: set by lavc for coded_picture (and set by user for input)\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
int pict_type;\ | |||||
\ | |||||
/**\ | |||||
* presentation timestamp in micro seconds (time when frame should be shown to user)\ | |||||
* if 0 then the frame_rate will be used as reference\ | |||||
* encoding: MUST be set by user\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
long long int pts;\ | |||||
\ | |||||
/**\ | |||||
* picture number in bitstream order.\ | |||||
* encoding: set by\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
int coded_picture_number;\ | |||||
/**\ | |||||
* encoding: set by\ | |||||
* decoding: set by lavc\ | |||||
* picture number in display order.\ | |||||
*/\ | |||||
int display_picture_number;\ | |||||
\ | |||||
/**\ | |||||
* quality (between 1 (good) and 31 (bad)) \ | |||||
* encoding: set by lavc for coded_picture (and set by user for input)\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
float quality; \ | |||||
\ | |||||
/**\ | |||||
* buffer age (1->was last buffer and dint change, 2->..., ...).\ | |||||
* set to something large if the buffer has not been used yet \ | |||||
* encoding: unused\ | |||||
* decoding: MUST be set by get_buffer()\ | |||||
*/\ | |||||
int age;\ | |||||
\ | |||||
/**\ | |||||
* is this picture used as reference\ | |||||
* encoding: unused\ | |||||
* decoding: set by lavc (before get_buffer() call))\ | |||||
*/\ | |||||
int reference;\ | |||||
\ | |||||
/**\ | |||||
* QP table\ | |||||
* encoding: unused\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
int8_t *qscale_table;\ | |||||
/**\ | |||||
* QP store stride\ | |||||
* encoding: unused\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
int qstride;\ | |||||
\ | |||||
/**\ | |||||
* mbskip_table[mb]>=1 if MB didnt change\ | |||||
* stride= mb_width = (width+15)>>4\ | |||||
* encoding: unused\ | |||||
* decoding: set by lavc\ | |||||
*/\ | |||||
uint8_t *mbskip_table;\ | |||||
\ | |||||
/**\ | |||||
* for some private data of the user\ | |||||
* encoding: unused\ | |||||
* decoding: set by user\ | |||||
*/\ | |||||
void *opaque;\ | |||||
/* FIXME: these should have FF_ */ | |||||
#define I_TYPE 1 // Intra | |||||
#define P_TYPE 2 // Predicted | |||||
#define B_TYPE 3 // Bi-dir predicted | |||||
#define S_TYPE 4 // S(GMC)-VOP MPEG4 | |||||
typedef struct AVVideoFrame { | |||||
FF_COMMON_PICTURE | |||||
} AVVideoFrame; | |||||
typedef struct AVCodecContext { | typedef struct AVCodecContext { | ||||
/** | /** | ||||
* the average bitrate | * the average bitrate | ||||
@@ -191,7 +295,7 @@ typedef struct AVCodecContext { | |||||
/** | /** | ||||
* motion estimation algorithm used for video coding | * motion estimation algorithm used for video coding | ||||
* encoding: set by user. | |||||
* encoding: MUST be set by user. | |||||
* decoding: unused | * decoding: unused | ||||
*/ | */ | ||||
int me_method; | int me_method; | ||||
@@ -212,21 +316,17 @@ typedef struct AVCodecContext { | |||||
* frames per sec multiplied by FRAME_RATE_BASE | * frames per sec multiplied by FRAME_RATE_BASE | ||||
* for variable fps this is the precission, so if the timestamps | * for variable fps this is the precission, so if the timestamps | ||||
* can be specified in msec precssion then this is 1000*FRAME_RATE_BASE | * can be specified in msec precssion then this is 1000*FRAME_RATE_BASE | ||||
* encoding: set by user | |||||
* encoding: MUST be set by user | |||||
* decoding: set by lavc. 0 or the frame_rate if available | * decoding: set by lavc. 0 or the frame_rate if available | ||||
*/ | */ | ||||
int frame_rate; | int frame_rate; | ||||
/** | /** | ||||
* encoding: set by user. | |||||
* encoding: MUST be set by user. | |||||
* decoding: set by user, some codecs might override / change it during playback | * decoding: set by user, some codecs might override / change it during playback | ||||
*/ | */ | ||||
int width, height; | int width, height; | ||||
/** | |||||
* Obsolete, will be removed | |||||
*/ | |||||
int aspect_ratio_info; | |||||
#define FF_ASPECT_SQUARE 1 | #define FF_ASPECT_SQUARE 1 | ||||
#define FF_ASPECT_4_3_625 2 | #define FF_ASPECT_4_3_625 2 | ||||
#define FF_ASPECT_4_3_525 3 | #define FF_ASPECT_4_3_525 3 | ||||
@@ -274,26 +374,14 @@ typedef struct AVCodecContext { | |||||
int frame_number; /* audio or video frame number */ | int frame_number; /* audio or video frame number */ | ||||
int real_pict_num; /* returns the real picture number of | int real_pict_num; /* returns the real picture number of | ||||
previous encoded frame */ | previous encoded frame */ | ||||
/** | /** | ||||
* 1 -> keyframe, 0-> not | |||||
* 1 -> keyframe, 0-> not (this if for audio only, for video, AVVideoFrame.key_frame should be used) | |||||
* encoding: set by lavc (for the outputed bitstream, not the input frame) | * encoding: set by lavc (for the outputed bitstream, not the input frame) | ||||
* decoding: set by lavc (for the decoded bitstream, not the displayed frame) | * decoding: set by lavc (for the decoded bitstream, not the displayed frame) | ||||
*/ | */ | ||||
int key_frame; | int key_frame; | ||||
/** | |||||
* picture type of the previous en/decoded frame, see ?_TYPE below | |||||
* encoding: set by lavc (for the outputed bitstream, not the input frame) | |||||
* decoding: set by lavc (for the decoded bitstream, not the displayed frame) | |||||
*/ | |||||
int pict_type; | |||||
/* FIXME: these should have FF_ */ | |||||
#define I_TYPE 1 // Intra | |||||
#define P_TYPE 2 // Predicted | |||||
#define B_TYPE 3 // Bi-dir predicted | |||||
#define S_TYPE 4 // S(GMC)-VOP MPEG4 | |||||
/** | /** | ||||
* number of frames the decoded output will be delayed relative to | * number of frames the decoded output will be delayed relative to | ||||
* the encoded input | * the encoded input | ||||
@@ -301,25 +389,8 @@ typedef struct AVCodecContext { | |||||
* decoding: unused | * decoding: unused | ||||
*/ | */ | ||||
int delay; | int delay; | ||||
/** | |||||
* mbskip_table[mb]=1 if MB didnt change, is only valid for I/P frames | |||||
* stride= mb_width = (width+15)>>4 (FIXME export stride?) | |||||
* encoding: unused | |||||
* decoding: set by lavc | |||||
*/ | |||||
uint8_t *mbskip_table; | |||||
/* encoding parameters */ | /* encoding parameters */ | ||||
/** | |||||
* quality (between 1 (good) and 31 (bad)) | |||||
* encoding: set by user if CODEC_FLAG_QSCALE is set otherwise set by lavc | |||||
* decoding: set by lavc | |||||
*/ | |||||
int quality; /* quality of the previous encoded frame | |||||
this is allso used to set the quality in vbr mode | |||||
and the per frame quality in CODEC_FLAG_TYPE (second pass mode) */ | |||||
float qcompress; /* amount of qscale change between easy & hard scenes (0.0-1.0)*/ | float qcompress; /* amount of qscale change between easy & hard scenes (0.0-1.0)*/ | ||||
float qblur; /* amount of qscale smoothing over time (0.0-1.0) */ | float qblur; /* amount of qscale smoothing over time (0.0-1.0) */ | ||||
@@ -485,46 +556,21 @@ typedef struct AVCodecContext { | |||||
int error_resilience; | int error_resilience; | ||||
/** | /** | ||||
* obsolete, just here to keep ABI compatible (should be removed perhaps, dunno) | |||||
*/ | |||||
int *quant_store; | |||||
/** | |||||
* QP store stride | |||||
* encoding: unused | |||||
* decoding: set by lavc | |||||
*/ | |||||
int qstride; | |||||
/** | |||||
* buffer, where the next picture should be decoded into | |||||
* called at the beginning of each frame to get a buffer for it. | |||||
* if pic.reference is set then the frame will be read later by lavc | |||||
* encoding: unused | * encoding: unused | ||||
* decoding: set by user in get_buffer_callback to a buffer into which the next part | |||||
* of the bitstream will be decoded, and set by lavc at end of frame to the | |||||
* next frame which needs to be displayed | |||||
* decoding: set by lavc, user can override | |||||
*/ | */ | ||||
uint8_t *dr_buffer[3]; | |||||
int (*get_buffer)(struct AVCodecContext *c, AVVideoFrame *pic); | |||||
/** | /** | ||||
* stride of the luminance part of the dr buffer | |||||
* called to release buffers which where allocated with get_buffer. | |||||
* a released buffer can be reused in get_buffer() | |||||
* pic.data[*] must be set to NULL | |||||
* encoding: unused | * encoding: unused | ||||
* decoding: set by user | |||||
* decoding: set by lavc, user can override | |||||
*/ | */ | ||||
int dr_stride; | |||||
/** | |||||
* same behavior as dr_buffer, just for some private data of the user | |||||
* encoding: unused | |||||
* decoding: set by user in get_buffer_callback, and set by lavc at end of frame | |||||
*/ | |||||
void *dr_opaque_frame; | |||||
/** | |||||
* called at the beginning of each frame to get a buffer for it | |||||
* encoding: unused | |||||
* decoding: set by user | |||||
*/ | |||||
int (*get_buffer_callback)(struct AVCodecContext *c, int width, int height, int pict_type); | |||||
void (*release_buffer)(struct AVCodecContext *c, AVVideoFrame *pic); | |||||
/** | /** | ||||
* is 1 if the decoded stream contains b frames, 0 otherwise | * is 1 if the decoded stream contains b frames, 0 otherwise | ||||
@@ -532,20 +578,6 @@ typedef struct AVCodecContext { | |||||
* decoding: set by lavc | * decoding: set by lavc | ||||
*/ | */ | ||||
int has_b_frames; | int has_b_frames; | ||||
/** | |||||
* stride of the chrominance part of the dr buffer | |||||
* encoding: unused | |||||
* decoding: set by user | |||||
*/ | |||||
int dr_uvstride; | |||||
/** | |||||
* number of dr buffers | |||||
* encoding: unused | |||||
* decoding: set by user | |||||
*/ | |||||
int dr_ip_buffer_count; | |||||
int block_align; /* used by some WAV based audio codecs */ | int block_align; /* used by some WAV based audio codecs */ | ||||
@@ -646,12 +678,6 @@ typedef struct AVCodecContext { | |||||
*/ | */ | ||||
float rc_initial_cplx; | float rc_initial_cplx; | ||||
/** | |||||
* Obsolete, will be removed | |||||
*/ | |||||
int aspected_width; | |||||
int aspected_height; | |||||
/** | /** | ||||
* dct algorithm, see FF_DCT_* below | * dct algorithm, see FF_DCT_* below | ||||
* encoding: set by user | * encoding: set by user | ||||
@@ -664,14 +690,6 @@ typedef struct AVCodecContext { | |||||
#define FF_DCT_MMX 3 | #define FF_DCT_MMX 3 | ||||
#define FF_DCT_MLIB 4 | #define FF_DCT_MLIB 4 | ||||
#define FF_DCT_ALTIVEC 5 | #define FF_DCT_ALTIVEC 5 | ||||
/** | |||||
* presentation timestamp in micro seconds (time when frame should be shown to user) | |||||
* if 0 then the frame_rate will be used as reference | |||||
* encoding: set by user | |||||
* decoding; set by lavc | |||||
*/ | |||||
long long int pts; | |||||
/** | /** | ||||
* luminance masking (0-> disabled) | * luminance masking (0-> disabled) | ||||
@@ -754,24 +772,6 @@ typedef struct AVCodecContext { | |||||
#define FF_EC_GUESS_MVS 1 | #define FF_EC_GUESS_MVS 1 | ||||
#define FF_EC_DEBLOCK 2 | #define FF_EC_DEBLOCK 2 | ||||
/** | |||||
* QP table of the currently decoded frame | |||||
* encoding; unused | |||||
* decoding: set by lavc | |||||
*/ | |||||
int8_t *current_qscale_table; | |||||
/** | |||||
* QP table of the currently displayed frame | |||||
* encoding; unused | |||||
* decoding: set by lavc | |||||
*/ | |||||
int8_t *display_qscale_table; | |||||
/** | |||||
* force specific pict_type. | |||||
* encoding; set by user (I/P/B_TYPE) | |||||
* decoding: unused | |||||
*/ | |||||
int force_type; | |||||
/** | /** | ||||
* dsp_mask could be used to disable unwanted | * dsp_mask could be used to disable unwanted | ||||
* CPU features (i.e. MMX, SSE. ...) | * CPU features (i.e. MMX, SSE. ...) | ||||
@@ -780,14 +780,14 @@ typedef struct AVCodecContext { | |||||
/** | /** | ||||
* bits per sample/pixel from the demuxer (needed for huffyuv) | * bits per sample/pixel from the demuxer (needed for huffyuv) | ||||
* encoding; set by lavc | |||||
* encoding: set by lavc | |||||
* decoding: set by user | * decoding: set by user | ||||
*/ | */ | ||||
int bits_per_sample; | int bits_per_sample; | ||||
/** | /** | ||||
* prediction method (needed for huffyuv) | * prediction method (needed for huffyuv) | ||||
* encoding; set by user | |||||
* encoding: set by user | |||||
* decoding: unused | * decoding: unused | ||||
*/ | */ | ||||
int prediction_method; | int prediction_method; | ||||
@@ -801,6 +801,13 @@ typedef struct AVCodecContext { | |||||
* decoding: set by lavc. | * decoding: set by lavc. | ||||
*/ | */ | ||||
float aspect_ratio; | float aspect_ratio; | ||||
/** | |||||
* the picture in the bitstream | |||||
* encoding: set by lavc | |||||
* decoding: set by lavc | |||||
*/ | |||||
AVVideoFrame *coded_picture; | |||||
} AVCodecContext; | } AVCodecContext; | ||||
typedef struct AVCodec { | typedef struct AVCodec { | ||||
@@ -928,6 +935,7 @@ void img_resample_close(ImgReSampleContext *s); | |||||
void avpicture_fill(AVPicture *picture, UINT8 *ptr, | void avpicture_fill(AVPicture *picture, UINT8 *ptr, | ||||
int pix_fmt, int width, int height); | int pix_fmt, int width, int height); | ||||
int avpicture_get_size(int pix_fmt, int width, int height); | int avpicture_get_size(int pix_fmt, int width, int height); | ||||
void avcodec_get_chroma_sub_sample(int fmt, int *h_shift, int *v_shift); | |||||
/* convert among pixel formats */ | /* convert among pixel formats */ | ||||
int img_convert(AVPicture *dst, int dst_pix_fmt, | int img_convert(AVPicture *dst, int dst_pix_fmt, | ||||
@@ -957,12 +965,18 @@ AVCodec *avcodec_find_decoder(enum CodecID id); | |||||
AVCodec *avcodec_find_decoder_by_name(const char *name); | AVCodec *avcodec_find_decoder_by_name(const char *name); | ||||
void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); | void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); | ||||
void avcodec_get_context_defaults(AVCodecContext *s); | |||||
AVCodecContext *avcodec_alloc_context(void); | AVCodecContext *avcodec_alloc_context(void); | ||||
AVVideoFrame *avcodec_alloc_picture(void); | |||||
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic); | |||||
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic); | |||||
int avcodec_open(AVCodecContext *avctx, AVCodec *codec); | int avcodec_open(AVCodecContext *avctx, AVCodec *codec); | ||||
int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples, | int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples, | ||||
int *frame_size_ptr, | int *frame_size_ptr, | ||||
UINT8 *buf, int buf_size); | UINT8 *buf, int buf_size); | ||||
int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture, | |||||
int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture, | |||||
int *got_picture_ptr, | int *got_picture_ptr, | ||||
UINT8 *buf, int buf_size); | UINT8 *buf, int buf_size); | ||||
int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata, | int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata, | ||||
@@ -971,7 +985,7 @@ int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata, | |||||
int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size, | int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size, | ||||
const short *samples); | const short *samples); | ||||
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, | int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, | ||||
const AVPicture *pict); | |||||
const AVVideoFrame *pict); | |||||
int avcodec_close(AVCodecContext *avctx); | int avcodec_close(AVCodecContext *avctx); | ||||
@@ -33,6 +33,7 @@ typedef struct DVVideoDecodeContext { | |||||
int sampling_411; /* 0 = 420, 1 = 411 */ | int sampling_411; /* 0 = 420, 1 = 411 */ | ||||
int width, height; | int width, height; | ||||
UINT8 *current_picture[3]; /* picture structure */ | UINT8 *current_picture[3]; /* picture structure */ | ||||
AVVideoFrame picture; | |||||
int linesize[3]; | int linesize[3]; | ||||
DCTELEM block[5*6][64] __align8; | DCTELEM block[5*6][64] __align8; | ||||
UINT8 dv_zigzag[2][64]; | UINT8 dv_zigzag[2][64]; | ||||
@@ -128,7 +129,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx) | |||||
/* XXX: do it only for constant case */ | /* XXX: do it only for constant case */ | ||||
dv_build_unquantize_tables(s); | dv_build_unquantize_tables(s); | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -499,7 +500,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||||
unsigned size; | unsigned size; | ||||
UINT8 *buf_ptr; | UINT8 *buf_ptr; | ||||
const UINT16 *mb_pos_ptr; | const UINT16 *mb_pos_ptr; | ||||
AVPicture *picture; | |||||
/* parse id */ | /* parse id */ | ||||
init_get_bits(&s->gb, buf, buf_size); | init_get_bits(&s->gb, buf, buf_size); | ||||
@@ -561,45 +561,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||||
avctx->width = width; | avctx->width = width; | ||||
avctx->height = height; | avctx->height = height; | ||||
if (avctx->flags & CODEC_FLAG_DR1) | |||||
{ | |||||
s->width = -1; | |||||
avctx->dr_buffer[0] = avctx->dr_buffer[1] = avctx->dr_buffer[2] = 0; | |||||
if(avctx->get_buffer_callback(avctx, width, height, I_TYPE) < 0 | |||||
&& avctx->flags & CODEC_FLAG_DR1) { | |||||
fprintf(stderr, "get_buffer() failed\n"); | |||||
return -1; | |||||
} | |||||
s->picture.reference= 0; | |||||
if(avctx->get_buffer(avctx, &s->picture) < 0) { | |||||
fprintf(stderr, "get_buffer() failed\n"); | |||||
return -1; | |||||
} | } | ||||
/* (re)alloc picture if needed */ | |||||
if (s->width != width || s->height != height) { | |||||
if (!(avctx->flags & CODEC_FLAG_DR1)) | |||||
for(i=0;i<3;i++) { | |||||
if (avctx->dr_buffer[i] != s->current_picture[i]) | |||||
av_freep(&s->current_picture[i]); | |||||
avctx->dr_buffer[i] = 0; | |||||
} | |||||
for(i=0;i<3;i++) { | |||||
if (avctx->dr_buffer[i]) { | |||||
s->current_picture[i] = avctx->dr_buffer[i]; | |||||
s->linesize[i] = (i == 0) ? avctx->dr_stride : avctx->dr_uvstride; | |||||
} else { | |||||
size = width * height; | |||||
s->linesize[i] = width; | |||||
if (i >= 1) { | |||||
size >>= 2; | |||||
s->linesize[i] >>= s->sampling_411 ? 2 : 1; | |||||
} | |||||
s->current_picture[i] = av_malloc(size); | |||||
} | |||||
if (!s->current_picture[i]) | |||||
return -1; | |||||
} | |||||
s->width = width; | |||||
s->height = height; | |||||
for(i=0;i<3;i++) { | |||||
s->current_picture[i] = s->picture.data[i]; | |||||
s->linesize[i] = s->picture.linesize[i]; | |||||
if (!s->current_picture[i]) | |||||
return -1; | |||||
} | } | ||||
s->width = width; | |||||
s->height = height; | |||||
/* for each DIF segment */ | /* for each DIF segment */ | ||||
buf_ptr = buf; | buf_ptr = buf; | ||||
@@ -620,12 +595,11 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||||
emms_c(); | emms_c(); | ||||
/* return image */ | /* return image */ | ||||
*data_size = sizeof(AVPicture); | |||||
picture = data; | |||||
for(i=0;i<3;i++) { | |||||
picture->data[i] = s->current_picture[i]; | |||||
picture->linesize[i] = s->linesize[i]; | |||||
} | |||||
*data_size = sizeof(AVVideoFrame); | |||||
*(AVVideoFrame*)data= s->picture; | |||||
avctx->release_buffer(avctx, &s->picture); | |||||
return packet_size; | return packet_size; | ||||
} | } | ||||
@@ -633,10 +607,15 @@ static int dvvideo_decode_end(AVCodecContext *avctx) | |||||
{ | { | ||||
DVVideoDecodeContext *s = avctx->priv_data; | DVVideoDecodeContext *s = avctx->priv_data; | ||||
int i; | int i; | ||||
if(avctx->get_buffer == avcodec_default_get_buffer){ | |||||
for(i=0; i<4; i++){ | |||||
av_freep(&s->picture.base[i]); | |||||
s->picture.data[i]= NULL; | |||||
} | |||||
av_freep(&s->picture.opaque); | |||||
} | |||||
for(i=0;i<3;i++) | |||||
if (avctx->dr_buffer[i] != s->current_picture[i]) | |||||
av_freep(&s->current_picture[i]); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -464,7 +464,7 @@ int score_sum=0; | |||||
s->mb_y= mb_y; | s->mb_y= mb_y; | ||||
for(j=0; j<pred_count; j++){ | for(j=0; j<pred_count; j++){ | ||||
int score=0; | int score=0; | ||||
UINT8 *src= s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
UINT8 *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; | s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; | ||||
s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; | s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; | ||||
@@ -556,8 +556,8 @@ static int is_intra_more_likely(MpegEncContext *s){ | |||||
if((j%skip_amount) != 0) continue; //skip a few to speed things up | if((j%skip_amount) != 0) continue; //skip a few to speed things up | ||||
if(s->pict_type==I_TYPE){ | if(s->pict_type==I_TYPE){ | ||||
UINT8 *mb_ptr = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
UINT8 *last_mb_ptr= s->last_picture [0] + mb_x*16 + mb_y*16*s->linesize; | |||||
UINT8 *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
UINT8 *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; | |||||
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize); | is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize); | ||||
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize); | is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize); | ||||
@@ -802,9 +802,9 @@ void ff_error_resilience(MpegEncContext *s){ | |||||
if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra | if(s->mb_type[i]&MB_TYPE_INTRA) continue; //intra | ||||
// if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? | // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? | ||||
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dc_ptr= &s->dc_val[0][mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2)]; | dc_ptr= &s->dc_val[0][mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2)]; | ||||
for(n=0; n<4; n++){ | for(n=0; n<4; n++){ | ||||
@@ -852,9 +852,9 @@ void ff_error_resilience(MpegEncContext *s){ | |||||
if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter | if(!(s->mb_type[i]&MB_TYPE_INTRA)) continue; //inter | ||||
if(!(error&AC_ERROR)) continue; //undamaged | if(!(error&AC_ERROR)) continue; //undamaged | ||||
dest_y = s->current_picture[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
dest_cb= s->current_picture[1] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_cr= s->current_picture[2] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | |||||
dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; | |||||
put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); | put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); | ||||
} | } | ||||
@@ -863,14 +863,14 @@ void ff_error_resilience(MpegEncContext *s){ | |||||
if(s->avctx->error_concealment&FF_EC_DEBLOCK){ | if(s->avctx->error_concealment&FF_EC_DEBLOCK){ | ||||
/* filter horizontal block boundaries */ | /* filter horizontal block boundaries */ | ||||
h_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); | |||||
h_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
h_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); | |||||
h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
/* filter vertical block boundaries */ | /* filter vertical block boundaries */ | ||||
v_block_filter(s, s->current_picture[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); | |||||
v_block_filter(s, s->current_picture[1], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
v_block_filter(s, s->current_picture[2], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); | |||||
v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); | |||||
} | } | ||||
/* clean a few tables */ | /* clean a few tables */ | ||||
@@ -272,6 +272,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
{ | { | ||||
int score0=0, score1=0; | int score0=0, score1=0; | ||||
int i, n; | int i, n; | ||||
int8_t * const qscale_table= s->current_picture.qscale_table; | |||||
for(n=0; n<6; n++){ | for(n=0; n<6; n++){ | ||||
INT16 *ac_val, *ac_val1; | INT16 *ac_val, *ac_val1; | ||||
@@ -282,7 +283,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; | const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; | ||||
/* top prediction */ | /* top prediction */ | ||||
ac_val-= s->block_wrap[n]*16; | ac_val-= s->block_wrap[n]*16; | ||||
if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ | |||||
if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1; i<8; i++){ | for(i=1; i<8; i++){ | ||||
const int level= block[n][s->idct_permutation[i ]]; | const int level= block[n][s->idct_permutation[i ]]; | ||||
@@ -296,7 +297,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
for(i=1; i<8; i++){ | for(i=1; i<8; i++){ | ||||
const int level= block[n][s->idct_permutation[i ]]; | const int level= block[n][s->idct_permutation[i ]]; | ||||
score0+= ABS(level); | score0+= ABS(level); | ||||
score1+= ABS(level - ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale)); | |||||
score1+= ABS(level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale)); | |||||
ac_val1[i ]= block[n][s->idct_permutation[i<<3]]; | ac_val1[i ]= block[n][s->idct_permutation[i<<3]]; | ||||
ac_val1[i+8]= level; | ac_val1[i+8]= level; | ||||
} | } | ||||
@@ -305,7 +306,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
const int xy= s->mb_x-1 + s->mb_y*s->mb_width; | const int xy= s->mb_x-1 + s->mb_y*s->mb_width; | ||||
/* left prediction */ | /* left prediction */ | ||||
ac_val-= 16; | ac_val-= 16; | ||||
if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ | |||||
if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1; i<8; i++){ | for(i=1; i<8; i++){ | ||||
const int level= block[n][s->idct_permutation[i<<3]]; | const int level= block[n][s->idct_permutation[i<<3]]; | ||||
@@ -319,7 +320,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
for(i=1; i<8; i++){ | for(i=1; i<8; i++){ | ||||
const int level= block[n][s->idct_permutation[i<<3]]; | const int level= block[n][s->idct_permutation[i<<3]]; | ||||
score0+= ABS(level); | score0+= ABS(level); | ||||
score1+= ABS(level - ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale)); | |||||
score1+= ABS(level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale)); | |||||
ac_val1[i ]= level; | ac_val1[i ]= level; | ||||
ac_val1[i+8]= block[n][s->idct_permutation[i ]]; | ac_val1[i+8]= block[n][s->idct_permutation[i ]]; | ||||
} | } | ||||
@@ -335,14 +336,15 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], int d | |||||
*/ | */ | ||||
void ff_clean_h263_qscales(MpegEncContext *s){ | void ff_clean_h263_qscales(MpegEncContext *s){ | ||||
int i; | int i; | ||||
int8_t * const qscale_table= s->current_picture.qscale_table; | |||||
for(i=1; i<s->mb_num; i++){ | for(i=1; i<s->mb_num; i++){ | ||||
if(s->qscale_table[i] - s->qscale_table[i-1] >2) | |||||
s->qscale_table[i]= s->qscale_table[i-1]+2; | |||||
if(qscale_table[i] - qscale_table[i-1] >2) | |||||
qscale_table[i]= qscale_table[i-1]+2; | |||||
} | } | ||||
for(i=s->mb_num-2; i>=0; i--){ | for(i=s->mb_num-2; i>=0; i--){ | ||||
if(s->qscale_table[i] - s->qscale_table[i+1] >2) | |||||
s->qscale_table[i]= s->qscale_table[i+1]+2; | |||||
if(qscale_table[i] - qscale_table[i+1] >2) | |||||
qscale_table[i]= qscale_table[i+1]+2; | |||||
} | } | ||||
} | } | ||||
@@ -351,11 +353,12 @@ void ff_clean_h263_qscales(MpegEncContext *s){ | |||||
*/ | */ | ||||
void ff_clean_mpeg4_qscales(MpegEncContext *s){ | void ff_clean_mpeg4_qscales(MpegEncContext *s){ | ||||
int i; | int i; | ||||
int8_t * const qscale_table= s->current_picture.qscale_table; | |||||
ff_clean_h263_qscales(s); | ff_clean_h263_qscales(s); | ||||
for(i=1; i<s->mb_num; i++){ | for(i=1; i<s->mb_num; i++){ | ||||
if(s->qscale_table[i] != s->qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_INTER4V)){ | |||||
if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_INTER4V)){ | |||||
s->mb_type[i]&= ~MB_TYPE_INTER4V; | s->mb_type[i]&= ~MB_TYPE_INTER4V; | ||||
s->mb_type[i]|= MB_TYPE_INTER; | s->mb_type[i]|= MB_TYPE_INTER; | ||||
} | } | ||||
@@ -367,21 +370,21 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ | |||||
for the actual adaptive quantization */ | for the actual adaptive quantization */ | ||||
for(i=0; i<s->mb_num; i++){ | for(i=0; i<s->mb_num; i++){ | ||||
odd += s->qscale_table[i]&1; | |||||
odd += qscale_table[i]&1; | |||||
} | } | ||||
if(2*odd > s->mb_num) odd=1; | if(2*odd > s->mb_num) odd=1; | ||||
else odd=0; | else odd=0; | ||||
for(i=0; i<s->mb_num; i++){ | for(i=0; i<s->mb_num; i++){ | ||||
if((s->qscale_table[i]&1) != odd) | |||||
s->qscale_table[i]++; | |||||
if(s->qscale_table[i] > 31) | |||||
s->qscale_table[i]= 31; | |||||
if((qscale_table[i]&1) != odd) | |||||
qscale_table[i]++; | |||||
if(qscale_table[i] > 31) | |||||
qscale_table[i]= 31; | |||||
} | } | ||||
for(i=1; i<s->mb_num; i++){ | for(i=1; i<s->mb_num; i++){ | ||||
if(s->qscale_table[i] != s->qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_DIRECT)){ | |||||
if(qscale_table[i] != qscale_table[i-1] && (s->mb_type[i]&MB_TYPE_DIRECT)){ | |||||
s->mb_type[i]&= ~MB_TYPE_DIRECT; | s->mb_type[i]&= ~MB_TYPE_DIRECT; | ||||
s->mb_type[i]|= MB_TYPE_BIDIR; | s->mb_type[i]|= MB_TYPE_BIDIR; | ||||
} | } | ||||
@@ -427,7 +430,7 @@ void mpeg4_encode_mb(MpegEncContext * s, | |||||
assert(mb_type>=0); | assert(mb_type>=0); | ||||
/* nothing to do if this MB was skiped in the next P Frame */ | /* nothing to do if this MB was skiped in the next P Frame */ | ||||
if(s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]){ //FIXME avoid DCT & ... | |||||
if(s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]){ //FIXME avoid DCT & ... | |||||
s->skip_count++; | s->skip_count++; | ||||
s->mv[0][0][0]= | s->mv[0][0][0]= | ||||
s->mv[0][0][1]= | s->mv[0][0][1]= | ||||
@@ -435,6 +438,8 @@ void mpeg4_encode_mb(MpegEncContext * s, | |||||
s->mv[1][0][1]= 0; | s->mv[1][0][1]= 0; | ||||
s->mv_dir= MV_DIR_FORWARD; //doesnt matter | s->mv_dir= MV_DIR_FORWARD; //doesnt matter | ||||
s->qscale -= s->dquant; | s->qscale -= s->dquant; | ||||
// s->mb_skiped=1; | |||||
return; | return; | ||||
} | } | ||||
@@ -451,6 +456,7 @@ void mpeg4_encode_mb(MpegEncContext * s, | |||||
s->skip_count++; | s->skip_count++; | ||||
return; | return; | ||||
} | } | ||||
put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ | put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ | ||||
put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge | put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge | ||||
put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we dont need it :) | put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we dont need it :) | ||||
@@ -547,16 +553,17 @@ void mpeg4_encode_mb(MpegEncContext * s, | |||||
if(y+16 > s->height) y= s->height-16; | if(y+16 > s->height) y= s->height-16; | ||||
offset= x + y*s->linesize; | offset= x + y*s->linesize; | ||||
p_pic= s->new_picture[0] + offset; | |||||
p_pic= s->new_picture.data[0] + offset; | |||||
s->mb_skiped=1; | s->mb_skiped=1; | ||||
for(i=0; i<s->max_b_frames; i++){ | for(i=0; i<s->max_b_frames; i++){ | ||||
uint8_t *b_pic; | uint8_t *b_pic; | ||||
int diff; | int diff; | ||||
Picture *pic= s->reordered_input_picture[i+1]; | |||||
if(s->coded_order[i+1].pict_type!=B_TYPE) break; | |||||
if(pic==NULL || pic->pict_type!=B_TYPE) break; | |||||
b_pic= s->coded_order[i+1].picture[0] + offset; | |||||
b_pic= pic->data[0] + offset + 16; //FIXME +16 | |||||
diff= s->dsp.pix_abs16x16(p_pic, b_pic, s->linesize); | diff= s->dsp.pix_abs16x16(p_pic, b_pic, s->linesize); | ||||
if(diff>s->qscale*70){ //FIXME check that 70 is optimal | if(diff>s->qscale*70){ //FIXME check that 70 is optimal | ||||
s->mb_skiped=0; | s->mb_skiped=0; | ||||
@@ -1493,8 +1500,8 @@ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ | |||||
s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; | s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; | ||||
} | } | ||||
if(s->avctx->pts) | |||||
s->time= (s->avctx->pts*s->time_increment_resolution + 500*1000)/(1000*1000); | |||||
if(s->current_picture.pts) | |||||
s->time= (s->current_picture.pts*s->time_increment_resolution + 500*1000)/(1000*1000); | |||||
else | else | ||||
s->time= picture_number*(INT64)FRAME_RATE_BASE*s->time_increment_resolution/s->frame_rate; | s->time= picture_number*(INT64)FRAME_RATE_BASE*s->time_increment_resolution/s->frame_rate; | ||||
time_div= s->time/s->time_increment_resolution; | time_div= s->time/s->time_increment_resolution; | ||||
@@ -1736,6 +1743,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
{ | { | ||||
int i; | int i; | ||||
INT16 *ac_val, *ac_val1; | INT16 *ac_val, *ac_val1; | ||||
int8_t * const qscale_table= s->current_picture.qscale_table; | |||||
/* find prediction */ | /* find prediction */ | ||||
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; | ac_val = s->ac_val[0][0] + s->block_index[n] * 16; | ||||
@@ -1746,7 +1754,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
/* left prediction */ | /* left prediction */ | ||||
ac_val -= 16; | ac_val -= 16; | ||||
if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ | |||||
if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i<<3]] += ac_val[i]; | block[s->idct_permutation[i<<3]] += ac_val[i]; | ||||
@@ -1754,7 +1762,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
}else{ | }else{ | ||||
/* different qscale, we must rescale */ | /* different qscale, we must rescale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i<<3]] += ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale); | |||||
block[s->idct_permutation[i<<3]] += ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale); | |||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
@@ -1762,7 +1770,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
/* top prediction */ | /* top prediction */ | ||||
ac_val -= 16 * s->block_wrap[n]; | ac_val -= 16 * s->block_wrap[n]; | ||||
if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ | |||||
if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i]] += ac_val[i + 8]; | block[s->idct_permutation[i]] += ac_val[i + 8]; | ||||
@@ -1770,7 +1778,7 @@ void mpeg4_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
}else{ | }else{ | ||||
/* different qscale, we must rescale */ | /* different qscale, we must rescale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale); | |||||
block[s->idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -1790,6 +1798,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
{ | { | ||||
int i; | int i; | ||||
INT16 *ac_val; | INT16 *ac_val; | ||||
int8_t * const qscale_table= s->current_picture.qscale_table; | |||||
/* find prediction */ | /* find prediction */ | ||||
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; | ac_val = s->ac_val[0][0] + s->block_index[n] * 16; | ||||
@@ -1798,7 +1807,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
const int xy= s->mb_x-1 + s->mb_y*s->mb_width; | const int xy= s->mb_x-1 + s->mb_y*s->mb_width; | ||||
/* left prediction */ | /* left prediction */ | ||||
ac_val -= 16; | ac_val -= 16; | ||||
if(s->mb_x==0 || s->qscale == s->qscale_table[xy] || n==1 || n==3){ | |||||
if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i<<3]] -= ac_val[i]; | block[s->idct_permutation[i<<3]] -= ac_val[i]; | ||||
@@ -1806,14 +1815,14 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
}else{ | }else{ | ||||
/* different qscale, we must rescale */ | /* different qscale, we must rescale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i<<3]] -= ROUNDED_DIV(ac_val[i]*s->qscale_table[xy], s->qscale); | |||||
block[s->idct_permutation[i<<3]] -= ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale); | |||||
} | } | ||||
} | } | ||||
} else { | } else { | ||||
const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; | const int xy= s->mb_x + s->mb_y*s->mb_width - s->mb_width; | ||||
/* top prediction */ | /* top prediction */ | ||||
ac_val -= 16 * s->block_wrap[n]; | ac_val -= 16 * s->block_wrap[n]; | ||||
if(s->mb_y==0 || s->qscale == s->qscale_table[xy] || n==2 || n==3){ | |||||
if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){ | |||||
/* same qscale */ | /* same qscale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i]] -= ac_val[i + 8]; | block[s->idct_permutation[i]] -= ac_val[i + 8]; | ||||
@@ -1821,7 +1830,7 @@ static void mpeg4_inv_pred_ac(MpegEncContext * s, INT16 *block, int n, | |||||
}else{ | }else{ | ||||
/* different qscale, we must rescale */ | /* different qscale, we must rescale */ | ||||
for(i=1;i<8;i++) { | for(i=1;i<8;i++) { | ||||
block[s->idct_permutation[i]] -= ROUNDED_DIV(ac_val[i + 8]*s->qscale_table[xy], s->qscale); | |||||
block[s->idct_permutation[i]] -= ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -2532,7 +2541,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ | |||||
if(cbpc & 4) { | if(cbpc & 4) { | ||||
change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | ||||
} | } | ||||
s->qscale_table[xy]= s->qscale; | |||||
s->current_picture.qscale_table[xy]= s->qscale; | |||||
s->mbintra_table[xy]= 1; | s->mbintra_table[xy]= 1; | ||||
for(i=0; i<6; i++){ | for(i=0; i<6; i++){ | ||||
@@ -2704,7 +2713,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ | |||||
if(s->cbp_table[xy] & 8) { | if(s->cbp_table[xy] & 8) { | ||||
change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | ||||
} | } | ||||
s->qscale_table[xy]= s->qscale; | |||||
s->current_picture.qscale_table[xy]= s->qscale; | |||||
for(i=0; i<6; i++){ | for(i=0; i<6; i++){ | ||||
int dc_pred_dir; | int dc_pred_dir; | ||||
@@ -2721,7 +2730,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ | |||||
s->pred_dir_table[xy]= dir | (ac_pred<<7); | s->pred_dir_table[xy]= dir | (ac_pred<<7); | ||||
s->error_status_table[xy]&= ~DC_ERROR; | s->error_status_table[xy]&= ~DC_ERROR; | ||||
}else if(s->mb_type[xy]&MB_TYPE_SKIPED){ | }else if(s->mb_type[xy]&MB_TYPE_SKIPED){ | ||||
s->qscale_table[xy]= s->qscale; | |||||
s->current_picture.qscale_table[xy]= s->qscale; | |||||
s->cbp_table[xy]= 0; | s->cbp_table[xy]= 0; | ||||
}else{ | }else{ | ||||
int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); | int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); | ||||
@@ -2734,7 +2743,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ | |||||
if(s->cbp_table[xy] & 8) { | if(s->cbp_table[xy] & 8) { | ||||
change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); | ||||
} | } | ||||
s->qscale_table[xy]= s->qscale; | |||||
s->current_picture.qscale_table[xy]= s->qscale; | |||||
s->cbp_table[xy]&= 3; //remove dquant | s->cbp_table[xy]&= 3; //remove dquant | ||||
s->cbp_table[xy]|= (cbpy^0xf)<<2; | s->cbp_table[xy]|= (cbpy^0xf)<<2; | ||||
@@ -2801,8 +2810,8 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) | |||||
mb_type= s->mb_type[xy]; | mb_type= s->mb_type[xy]; | ||||
cbp = s->cbp_table[xy]; | cbp = s->cbp_table[xy]; | ||||
if(s->qscale_table[xy] != s->qscale){ | |||||
s->qscale= s->qscale_table[xy]; | |||||
if(s->current_picture.qscale_table[xy] != s->qscale){ | |||||
s->qscale= s->current_picture.qscale_table[xy]; | |||||
s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; | s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; | ||||
s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; | s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; | ||||
} | } | ||||
@@ -3054,7 +3063,7 @@ int ff_h263_decode_mb(MpegEncContext *s, | |||||
} | } | ||||
/* if we skipped it in the future P Frame than skip it now too */ | /* if we skipped it in the future P Frame than skip it now too */ | ||||
s->mb_skiped= s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]; // Note, skiptab=0 if last was GMC | |||||
s->mb_skiped= s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x]; // Note, skiptab=0 if last was GMC | |||||
if(s->mb_skiped){ | if(s->mb_skiped){ | ||||
/* skip mb */ | /* skip mb */ | ||||
@@ -3287,7 +3296,7 @@ end: | |||||
/* per-MB end of slice check */ | /* per-MB end of slice check */ | ||||
if(s->codec_id==CODEC_ID_MPEG4){ | if(s->codec_id==CODEC_ID_MPEG4){ | ||||
if(mpeg4_is_resync(s)){ | if(mpeg4_is_resync(s)){ | ||||
if(s->pict_type==B_TYPE && s->mbskip_table[s->mb_y * s->mb_width + s->mb_x+1]) | |||||
if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[s->mb_y * s->mb_width + s->mb_x+1]) | |||||
return SLICE_OK; | return SLICE_OK; | ||||
return SLICE_END; | return SLICE_END; | ||||
} | } | ||||
@@ -4441,7 +4450,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ | |||||
- ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; | - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; | ||||
} | } | ||||
s->avctx->pts= s->time*1000LL*1000LL / s->time_increment_resolution; | |||||
s->current_picture.pts= s->time*1000LL*1000LL / s->time_increment_resolution; | |||||
if(check_marker(gb, "before vop_coded")==0 && s->picture_number==0){ | if(check_marker(gb, "before vop_coded")==0 && s->picture_number==0){ | ||||
printf("hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); | printf("hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); | ||||
@@ -199,6 +199,7 @@ static int decode_slice(MpegEncContext *s){ | |||||
s->mv_dir = MV_DIR_FORWARD; | s->mv_dir = MV_DIR_FORWARD; | ||||
s->mv_type = MV_TYPE_16X16; | s->mv_type = MV_TYPE_16X16; | ||||
// s->mb_skiped = 0; | |||||
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); | //printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); | ||||
ret= s->decode_mb(s, s->block); | ret= s->decode_mb(s, s->block); | ||||
@@ -347,7 +348,7 @@ static int h263_decode_frame(AVCodecContext *avctx, | |||||
{ | { | ||||
MpegEncContext *s = avctx->priv_data; | MpegEncContext *s = avctx->priv_data; | ||||
int ret,i; | int ret,i; | ||||
AVPicture *pict = data; | |||||
AVVideoFrame *pict = data; | |||||
float new_aspect; | float new_aspect; | ||||
#ifdef PRINT_FRAME_TIME | #ifdef PRINT_FRAME_TIME | ||||
@@ -357,7 +358,6 @@ uint64_t time= rdtsc(); | |||||
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); | printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); | ||||
printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); | printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); | ||||
#endif | #endif | ||||
s->flags= avctx->flags; | s->flags= avctx->flags; | ||||
*data_size = 0; | *data_size = 0; | ||||
@@ -523,8 +523,9 @@ retry: | |||||
return -1; | return -1; | ||||
} | } | ||||
s->avctx->key_frame = (s->pict_type == I_TYPE); | |||||
s->avctx->pict_type = s->pict_type; | |||||
// for hurry_up==5 | |||||
s->current_picture.pict_type= s->pict_type; | |||||
s->current_picture.key_frame= s->pict_type == I_TYPE; | |||||
/* skip b frames if we dont have reference frames */ | /* skip b frames if we dont have reference frames */ | ||||
if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); | if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); | ||||
@@ -580,7 +581,9 @@ retry: | |||||
} | } | ||||
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE) | if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE) | ||||
if(msmpeg4_decode_ext_header(s, buf_size) < 0) return -1; | |||||
if(msmpeg4_decode_ext_header(s, buf_size) < 0){ | |||||
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR; | |||||
} | |||||
/* divx 5.01+ bistream reorder stuff */ | /* divx 5.01+ bistream reorder stuff */ | ||||
if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_version>=500){ | if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_version>=500){ | ||||
@@ -644,7 +647,7 @@ retry: | |||||
int y= mb_y*16 + 8; | int y= mb_y*16 + 8; | ||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){ | for(mb_x=0; mb_x<s->mb_width; mb_x++){ | ||||
int x= mb_x*16 + 8; | int x= mb_x*16 + 8; | ||||
uint8_t *ptr= s->last_picture[0]; | |||||
uint8_t *ptr= s->last_picture.data[0]; | |||||
int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); | int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); | ||||
int mx= (s->motion_val[xy][0]>>1) + x; | int mx= (s->motion_val[xy][0]>>1) + x; | ||||
int my= (s->motion_val[xy][1]>>1) + y; | int my= (s->motion_val[xy][1]>>1) + y; | ||||
@@ -669,21 +672,12 @@ retry: | |||||
} | } | ||||
} | } | ||||
#endif | |||||
#endif | |||||
if(s->pict_type==B_TYPE || (!s->has_b_frames)){ | if(s->pict_type==B_TYPE || (!s->has_b_frames)){ | ||||
pict->data[0] = s->current_picture[0]; | |||||
pict->data[1] = s->current_picture[1]; | |||||
pict->data[2] = s->current_picture[2]; | |||||
*pict= *(AVVideoFrame*)&s->current_picture; | |||||
} else { | } else { | ||||
pict->data[0] = s->last_picture[0]; | |||||
pict->data[1] = s->last_picture[1]; | |||||
pict->data[2] = s->last_picture[2]; | |||||
*pict= *(AVVideoFrame*)&s->last_picture; | |||||
} | } | ||||
pict->linesize[0] = s->linesize; | |||||
pict->linesize[1] = s->uvlinesize; | |||||
pict->linesize[2] = s->uvlinesize; | |||||
avctx->quality = s->qscale; | |||||
/* Return the Picture timestamp as the frame number */ | /* Return the Picture timestamp as the frame number */ | ||||
/* we substract 1 because it is added on utils.c */ | /* we substract 1 because it is added on utils.c */ | ||||
@@ -692,7 +686,7 @@ retry: | |||||
/* dont output the last pic after seeking | /* dont output the last pic after seeking | ||||
note we allready added +1 for the current pix in MPV_frame_end(s) */ | note we allready added +1 for the current pix in MPV_frame_end(s) */ | ||||
if(s->num_available_buffers>=2 || (!s->has_b_frames)) | if(s->num_available_buffers>=2 || (!s->has_b_frames)) | ||||
*data_size = sizeof(AVPicture); | |||||
*data_size = sizeof(AVVideoFrame); | |||||
#ifdef PRINT_FRAME_TIME | #ifdef PRINT_FRAME_TIME | ||||
printf("%Ld\n", rdtsc()-time); | printf("%Ld\n", rdtsc()-time); | ||||
#endif | #endif | ||||
@@ -30,7 +30,7 @@ | |||||
#endif | #endif | ||||
#define VLC_BITS 11 | #define VLC_BITS 11 | ||||
typedef enum Predictor{ | typedef enum Predictor{ | ||||
LEFT= 0, | LEFT= 0, | ||||
PLANE, | PLANE, | ||||
@@ -52,13 +52,12 @@ typedef struct HYuvContext{ | |||||
int flags; | int flags; | ||||
int picture_number; | int picture_number; | ||||
int last_slice_end; | int last_slice_end; | ||||
int linesize[3]; | |||||
uint8_t __align8 temp[3][2500]; | uint8_t __align8 temp[3][2500]; | ||||
uint64_t stats[3][256]; | uint64_t stats[3][256]; | ||||
uint8_t len[3][256]; | uint8_t len[3][256]; | ||||
uint32_t bits[3][256]; | uint32_t bits[3][256]; | ||||
VLC vlc[3]; | VLC vlc[3]; | ||||
uint8_t __align8 *picture[3]; | |||||
AVVideoFrame picture; | |||||
uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution | uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution | ||||
DSPContext dsp; | DSPContext dsp; | ||||
}HYuvContext; | }HYuvContext; | ||||
@@ -324,7 +323,7 @@ static int read_old_huffman_tables(HYuvContext *s){ | |||||
static int decode_init(AVCodecContext *avctx) | static int decode_init(AVCodecContext *avctx) | ||||
{ | { | ||||
HYuvContext *s = avctx->priv_data; | HYuvContext *s = avctx->priv_data; | ||||
int width, height, y_size, c_size, stride; | |||||
int width, height; | |||||
s->avctx= avctx; | s->avctx= avctx; | ||||
s->flags= avctx->flags; | s->flags= avctx->flags; | ||||
@@ -333,6 +332,8 @@ static int decode_init(AVCodecContext *avctx) | |||||
width= s->width= avctx->width; | width= s->width= avctx->width; | ||||
height= s->height= avctx->height; | height= s->height= avctx->height; | ||||
avctx->coded_picture= &s->picture; | |||||
s->bgr32=1; | s->bgr32=1; | ||||
assert(width && height); | assert(width && height); | ||||
//if(avctx->extradata) | //if(avctx->extradata) | ||||
@@ -388,52 +389,27 @@ s->bgr32=1; | |||||
s->interlaced= height > 288; | s->interlaced= height > 288; | ||||
c_size= 0; | |||||
switch(s->bitstream_bpp){ | switch(s->bitstream_bpp){ | ||||
case 12: | case 12: | ||||
avctx->pix_fmt = PIX_FMT_YUV420P; | avctx->pix_fmt = PIX_FMT_YUV420P; | ||||
stride= (width+15)&~15; | |||||
c_size= height*stride/4; | |||||
break; | break; | ||||
case 16: | case 16: | ||||
if(s->yuy2){ | if(s->yuy2){ | ||||
avctx->pix_fmt = PIX_FMT_YUV422; | avctx->pix_fmt = PIX_FMT_YUV422; | ||||
stride= (width*2+15)&~15; | |||||
}else{ | }else{ | ||||
avctx->pix_fmt = PIX_FMT_YUV422P; | avctx->pix_fmt = PIX_FMT_YUV422P; | ||||
stride= (width+15)&~15; | |||||
c_size= height*stride/2; | |||||
} | } | ||||
break; | break; | ||||
case 24: | case 24: | ||||
case 32: | case 32: | ||||
if(s->bgr32){ | if(s->bgr32){ | ||||
avctx->pix_fmt = PIX_FMT_BGRA32; | avctx->pix_fmt = PIX_FMT_BGRA32; | ||||
stride= (width*4+15)&~15; | |||||
}else{ | }else{ | ||||
avctx->pix_fmt = PIX_FMT_BGR24; | avctx->pix_fmt = PIX_FMT_BGR24; | ||||
stride= (width*3+15)&~15; | |||||
} | } | ||||
break; | break; | ||||
default: | default: | ||||
assert(0); | assert(0); | ||||
stride=0; //gcc fix | |||||
} | |||||
y_size= height*stride; | |||||
if(!(avctx->flags&CODEC_FLAG_DR1)){ | |||||
s->linesize[0]= stride; | |||||
s->picture[0]= av_mallocz(y_size); | |||||
if(c_size){ | |||||
s->picture[1]= av_mallocz(c_size); | |||||
s->picture[2]= av_mallocz(c_size); | |||||
s->linesize[1]= s->linesize[2]= stride/2; | |||||
memset(s->picture[1], 128, c_size); | |||||
memset(s->picture[2], 128, c_size); | |||||
} | |||||
} | } | ||||
// printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced); | // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced); | ||||
@@ -484,6 +460,10 @@ static int encode_init(AVCodecContext *avctx) | |||||
avctx->stats_out= av_mallocz(1024*10); | avctx->stats_out= av_mallocz(1024*10); | ||||
s->version=2; | s->version=2; | ||||
avctx->coded_picture= &s->picture; | |||||
s->picture.pict_type= I_TYPE; | |||||
s->picture.key_frame= 1; | |||||
switch(avctx->pix_fmt){ | switch(avctx->pix_fmt){ | ||||
case PIX_FMT_YUV420P: | case PIX_FMT_YUV420P: | ||||
if(avctx->strict_std_compliance>=0){ | if(avctx->strict_std_compliance>=0){ | ||||
@@ -674,12 +654,12 @@ static void draw_slice(HYuvContext *s, int y){ | |||||
cy= y; | cy= y; | ||||
} | } | ||||
src_ptr[0] = s->picture[0] + s->linesize[0]*y; | |||||
src_ptr[1] = s->picture[1] + s->linesize[1]*cy; | |||||
src_ptr[2] = s->picture[2] + s->linesize[2]*cy; | |||||
src_ptr[0] = s->picture.data[0] + s->picture.linesize[0]*y; | |||||
src_ptr[1] = s->picture.data[1] + s->picture.linesize[1]*cy; | |||||
src_ptr[2] = s->picture.data[2] + s->picture.linesize[2]*cy; | |||||
emms_c(); | emms_c(); | ||||
s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize[0], y, s->width, h); | |||||
s->avctx->draw_horiz_band(s->avctx, src_ptr, s->picture.linesize[0], y, s->width, h); | |||||
s->last_slice_end= y + h; | s->last_slice_end= y + h; | ||||
} | } | ||||
@@ -690,9 +670,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
const int width2= s->width>>1; | const int width2= s->width>>1; | ||||
const int height= s->height; | const int height= s->height; | ||||
int fake_ystride, fake_ustride, fake_vstride; | int fake_ystride, fake_ustride, fake_vstride; | ||||
int i; | |||||
AVVideoFrame * const p= &s->picture; | |||||
AVPicture *picture = data; | |||||
AVVideoFrame *picture = data; | |||||
*data_size = 0; | *data_size = 0; | ||||
@@ -704,22 +684,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
init_get_bits(&s->gb, s->bitstream_buffer, buf_size); | init_get_bits(&s->gb, s->bitstream_buffer, buf_size); | ||||
if(avctx->flags&CODEC_FLAG_DR1){ | |||||
if(avctx->get_buffer_callback(avctx, s->width, s->height, I_TYPE) < 0){ | |||||
fprintf(stderr, "get_buffer() failed\n"); | |||||
return -1; | |||||
} | |||||
s->linesize[0]= avctx->dr_stride; | |||||
s->linesize[1]= | |||||
s->linesize[2]= avctx->dr_uvstride; | |||||
for(i=0; i<3;i++) | |||||
s->picture[i]= avctx->dr_buffer[i]; | |||||
p->reference= 0; | |||||
if(avctx->get_buffer(avctx, p) < 0){ | |||||
fprintf(stderr, "get_buffer() failed\n"); | |||||
return -1; | |||||
} | } | ||||
fake_ystride= s->interlaced ? s->linesize[0]*2 : s->linesize[0]; | |||||
fake_ustride= s->interlaced ? s->linesize[1]*2 : s->linesize[1]; | |||||
fake_vstride= s->interlaced ? s->linesize[2]*2 : s->linesize[2]; | |||||
fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; | |||||
fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; | |||||
fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; | |||||
s->last_slice_end= 0; | s->last_slice_end= 0; | ||||
@@ -729,28 +702,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
int lefttopy, lefttopu, lefttopv; | int lefttopy, lefttopu, lefttopv; | ||||
if(s->yuy2){ | if(s->yuy2){ | ||||
s->picture[0][3]= get_bits(&s->gb, 8); | |||||
s->picture[0][2]= get_bits(&s->gb, 8); | |||||
s->picture[0][1]= get_bits(&s->gb, 8); | |||||
s->picture[0][0]= get_bits(&s->gb, 8); | |||||
p->data[0][3]= get_bits(&s->gb, 8); | |||||
p->data[0][2]= get_bits(&s->gb, 8); | |||||
p->data[0][1]= get_bits(&s->gb, 8); | |||||
p->data[0][0]= get_bits(&s->gb, 8); | |||||
fprintf(stderr, "YUY2 output isnt implemenetd yet\n"); | fprintf(stderr, "YUY2 output isnt implemenetd yet\n"); | ||||
return -1; | return -1; | ||||
}else{ | }else{ | ||||
leftv= s->picture[2][0]= get_bits(&s->gb, 8); | |||||
lefty= s->picture[0][1]= get_bits(&s->gb, 8); | |||||
leftu= s->picture[1][0]= get_bits(&s->gb, 8); | |||||
s->picture[0][0]= get_bits(&s->gb, 8); | |||||
leftv= p->data[2][0]= get_bits(&s->gb, 8); | |||||
lefty= p->data[0][1]= get_bits(&s->gb, 8); | |||||
leftu= p->data[1][0]= get_bits(&s->gb, 8); | |||||
p->data[0][0]= get_bits(&s->gb, 8); | |||||
switch(s->predictor){ | switch(s->predictor){ | ||||
case LEFT: | case LEFT: | ||||
case PLANE: | case PLANE: | ||||
decode_422_bitstream(s, width-2); | decode_422_bitstream(s, width-2); | ||||
lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty); | |||||
lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); | |||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu); | |||||
leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv); | |||||
leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); | |||||
leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); | |||||
} | } | ||||
for(cy=y=1; y<s->height; y++,cy++){ | for(cy=y=1; y<s->height; y++,cy++){ | ||||
@@ -759,7 +732,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
if(s->bitstream_bpp==12){ | if(s->bitstream_bpp==12){ | ||||
decode_gray_bitstream(s, width); | decode_gray_bitstream(s, width); | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
lefty= add_left_prediction(ydst, s->temp[0], width, lefty); | lefty= add_left_prediction(ydst, s->temp[0], width, lefty); | ||||
if(s->predictor == PLANE){ | if(s->predictor == PLANE){ | ||||
@@ -772,12 +745,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
draw_slice(s, y); | draw_slice(s, y); | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
udst= s->picture[1] + s->linesize[1]*cy; | |||||
vdst= s->picture[2] + s->linesize[2]*cy; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
udst= p->data[1] + p->linesize[1]*cy; | |||||
vdst= p->data[2] + p->linesize[2]*cy; | |||||
decode_422_bitstream(s, width); | decode_422_bitstream(s, width); | ||||
lefty= add_left_prediction(ydst, s->temp[0], width, lefty); | lefty= add_left_prediction(ydst, s->temp[0], width, lefty); | ||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
leftu= add_left_prediction(udst, s->temp[1], width2, leftu); | leftu= add_left_prediction(udst, s->temp[1], width2, leftu); | ||||
@@ -799,10 +771,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
case MEDIAN: | case MEDIAN: | ||||
/* first line except first 2 pixels is left predicted */ | /* first line except first 2 pixels is left predicted */ | ||||
decode_422_bitstream(s, width-2); | decode_422_bitstream(s, width-2); | ||||
lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty); | |||||
lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); | |||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu); | |||||
leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv); | |||||
leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); | |||||
leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); | |||||
} | } | ||||
cy=y=1; | cy=y=1; | ||||
@@ -810,31 +782,31 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
/* second line is left predicted for interlaced case */ | /* second line is left predicted for interlaced case */ | ||||
if(s->interlaced){ | if(s->interlaced){ | ||||
decode_422_bitstream(s, width); | decode_422_bitstream(s, width); | ||||
lefty= add_left_prediction(s->picture[0] + s->linesize[0], s->temp[0], width, lefty); | |||||
lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); | |||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
leftu= add_left_prediction(s->picture[1] + s->linesize[2], s->temp[1], width2, leftu); | |||||
leftv= add_left_prediction(s->picture[2] + s->linesize[1], s->temp[2], width2, leftv); | |||||
leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); | |||||
leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); | |||||
} | } | ||||
y++; cy++; | y++; cy++; | ||||
} | } | ||||
/* next 4 pixels are left predicted too */ | /* next 4 pixels are left predicted too */ | ||||
decode_422_bitstream(s, 4); | decode_422_bitstream(s, 4); | ||||
lefty= add_left_prediction(s->picture[0] + fake_ystride, s->temp[0], 4, lefty); | |||||
lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); | |||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
leftu= add_left_prediction(s->picture[1] + fake_ustride, s->temp[1], 2, leftu); | |||||
leftv= add_left_prediction(s->picture[2] + fake_vstride, s->temp[2], 2, leftv); | |||||
leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); | |||||
leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); | |||||
} | } | ||||
/* next line except the first 4 pixels is median predicted */ | /* next line except the first 4 pixels is median predicted */ | ||||
lefttopy= s->picture[0][3]; | |||||
lefttopy= p->data[0][3]; | |||||
decode_422_bitstream(s, width-4); | decode_422_bitstream(s, width-4); | ||||
add_median_prediction(s->picture[0] + fake_ystride+4, s->picture[0]+4, s->temp[0], width-4, &lefty, &lefttopy); | |||||
add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); | |||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
lefttopu= s->picture[1][1]; | |||||
lefttopv= s->picture[2][1]; | |||||
add_median_prediction(s->picture[1] + fake_ustride+2, s->picture[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); | |||||
add_median_prediction(s->picture[2] + fake_vstride+2, s->picture[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); | |||||
lefttopu= p->data[1][1]; | |||||
lefttopv= p->data[2][1]; | |||||
add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); | |||||
add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); | |||||
} | } | ||||
y++; cy++; | y++; cy++; | ||||
@@ -844,7 +816,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
if(s->bitstream_bpp==12){ | if(s->bitstream_bpp==12){ | ||||
while(2*cy > y){ | while(2*cy > y){ | ||||
decode_gray_bitstream(s, width); | decode_gray_bitstream(s, width); | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); | add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); | ||||
y++; | y++; | ||||
} | } | ||||
@@ -854,9 +826,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
decode_422_bitstream(s, width); | decode_422_bitstream(s, width); | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
udst= s->picture[1] + s->linesize[1]*cy; | |||||
vdst= s->picture[2] + s->linesize[2]*cy; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
udst= p->data[1] + p->linesize[1]*cy; | |||||
vdst= p->data[2] + p->linesize[2]*cy; | |||||
add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); | add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); | ||||
if(!(s->flags&CODEC_FLAG_GRAY)){ | if(!(s->flags&CODEC_FLAG_GRAY)){ | ||||
@@ -872,17 +844,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
}else{ | }else{ | ||||
int y; | int y; | ||||
int leftr, leftg, leftb; | int leftr, leftg, leftb; | ||||
const int last_line= (height-1)*s->linesize[0]; | |||||
const int last_line= (height-1)*p->linesize[0]; | |||||
if(s->bitstream_bpp==32){ | if(s->bitstream_bpp==32){ | ||||
s->picture[0][last_line+3]= get_bits(&s->gb, 8); | |||||
leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8); | |||||
leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8); | |||||
leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8); | |||||
p->data[0][last_line+3]= get_bits(&s->gb, 8); | |||||
leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8); | |||||
leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8); | |||||
leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8); | |||||
}else{ | }else{ | ||||
leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8); | |||||
leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8); | |||||
leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8); | |||||
leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8); | |||||
leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8); | |||||
leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8); | |||||
skip_bits(&s->gb, 8); | skip_bits(&s->gb, 8); | ||||
} | } | ||||
@@ -891,16 +863,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
case LEFT: | case LEFT: | ||||
case PLANE: | case PLANE: | ||||
decode_bgr_bitstream(s, width-1); | decode_bgr_bitstream(s, width-1); | ||||
add_left_prediction_bgr32(s->picture[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb); | |||||
add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb); | |||||
for(y=s->height-2; y>=0; y--){ //yes its stored upside down | for(y=s->height-2; y>=0; y--){ //yes its stored upside down | ||||
decode_bgr_bitstream(s, width); | decode_bgr_bitstream(s, width); | ||||
add_left_prediction_bgr32(s->picture[0] + s->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb); | |||||
add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb); | |||||
if(s->predictor == PLANE){ | if(s->predictor == PLANE){ | ||||
if((y&s->interlaced)==0){ | if((y&s->interlaced)==0){ | ||||
s->dsp.add_bytes(s->picture[0] + s->linesize[0]*y, | |||||
s->picture[0] + s->linesize[0]*y + fake_ystride, fake_ystride); | |||||
s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, | |||||
p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -917,12 +889,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 | |||||
} | } | ||||
emms_c(); | emms_c(); | ||||
for(i=0;i<3;i++) { | |||||
picture->data[i] = s->picture[i]; | |||||
picture->linesize[i]= s->linesize[i]; | |||||
} | |||||
*picture= *p; | |||||
avctx->release_buffer(avctx, p); | |||||
*data_size = sizeof(AVPicture); | |||||
*data_size = sizeof(AVVideoFrame); | |||||
return (get_bits_count(&s->gb)+7)>>3; | return (get_bits_count(&s->gb)+7)>>3; | ||||
} | } | ||||
@@ -933,44 +904,47 @@ static int decode_end(AVCodecContext *avctx) | |||||
int i; | int i; | ||||
for(i=0; i<3; i++){ | for(i=0; i<3; i++){ | ||||
if(!(avctx->flags&CODEC_FLAG_DR1)) | |||||
av_freep(&s->picture[i]); | |||||
free_vlc(&s->vlc[i]); | free_vlc(&s->vlc[i]); | ||||
} | } | ||||
if(avctx->get_buffer == avcodec_default_get_buffer){ | |||||
for(i=0; i<4; i++){ | |||||
av_freep(&s->picture.base[i]); | |||||
s->picture.data[i]= NULL; | |||||
} | |||||
av_freep(&s->picture.opaque); | |||||
} | |||||
return 0; | return 0; | ||||
} | } | ||||
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ | static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ | ||||
HYuvContext *s = avctx->priv_data; | HYuvContext *s = avctx->priv_data; | ||||
AVPicture *pict = data; | |||||
AVVideoFrame *pict = data; | |||||
const int width= s->width; | const int width= s->width; | ||||
const int width2= s->width>>1; | const int width2= s->width>>1; | ||||
const int height= s->height; | const int height= s->height; | ||||
const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; | const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; | ||||
const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; | const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; | ||||
const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; | const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; | ||||
AVVideoFrame * const p= &s->picture; | |||||
int i, size; | int i, size; | ||||
init_put_bits(&s->pb, buf, buf_size, NULL, NULL); | init_put_bits(&s->pb, buf, buf_size, NULL, NULL); | ||||
for(i=0; i<3; i++){ | |||||
s->picture[i]= pict->data[i]; | |||||
s->linesize[i]= pict->linesize[i]; | |||||
} | |||||
*p = *pict; | |||||
if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ | if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ | ||||
int lefty, leftu, leftv, y, cy; | int lefty, leftu, leftv, y, cy; | ||||
put_bits(&s->pb, 8, leftv= s->picture[2][0]); | |||||
put_bits(&s->pb, 8, lefty= s->picture[0][1]); | |||||
put_bits(&s->pb, 8, leftu= s->picture[1][0]); | |||||
put_bits(&s->pb, 8, s->picture[0][0]); | |||||
put_bits(&s->pb, 8, leftv= p->data[2][0]); | |||||
put_bits(&s->pb, 8, lefty= p->data[0][1]); | |||||
put_bits(&s->pb, 8, leftu= p->data[1][0]); | |||||
put_bits(&s->pb, 8, p->data[0][0]); | |||||
lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+2, width-2 , lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+1, width2-1, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+1, width2-1, leftv); | |||||
lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv); | |||||
encode_422_bitstream(s, width-2); | encode_422_bitstream(s, width-2); | ||||
@@ -978,26 +952,26 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, | |||||
int lefttopy, lefttopu, lefttopv; | int lefttopy, lefttopu, lefttopv; | ||||
cy=y=1; | cy=y=1; | ||||
if(s->interlaced){ | if(s->interlaced){ | ||||
lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+s->linesize[0], width , lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+s->linesize[1], width2, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+s->linesize[2], width2, leftv); | |||||
lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); | |||||
encode_422_bitstream(s, width); | encode_422_bitstream(s, width); | ||||
y++; cy++; | y++; cy++; | ||||
} | } | ||||
lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+fake_ystride, 4, lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+fake_ystride, 2, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+fake_ystride, 2, leftv); | |||||
lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); | |||||
leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ystride, 2, leftu); | |||||
leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_ystride, 2, leftv); | |||||
encode_422_bitstream(s, 4); | encode_422_bitstream(s, 4); | ||||
lefttopy= s->picture[0][3]; | |||||
lefttopu= s->picture[1][1]; | |||||
lefttopv= s->picture[2][1]; | |||||
sub_median_prediction(s->temp[0], s->picture[0]+4, s->picture[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); | |||||
sub_median_prediction(s->temp[1], s->picture[1]+2, s->picture[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); | |||||
sub_median_prediction(s->temp[2], s->picture[2]+2, s->picture[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); | |||||
lefttopy= p->data[0][3]; | |||||
lefttopu= p->data[1][1]; | |||||
lefttopv= p->data[2][1]; | |||||
sub_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); | |||||
sub_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); | |||||
sub_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); | |||||
encode_422_bitstream(s, width-4); | encode_422_bitstream(s, width-4); | ||||
y++; cy++; | y++; cy++; | ||||
@@ -1006,16 +980,16 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, | |||||
if(s->bitstream_bpp==12){ | if(s->bitstream_bpp==12){ | ||||
while(2*cy > y){ | while(2*cy > y){ | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); | sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); | ||||
encode_gray_bitstream(s, width); | encode_gray_bitstream(s, width); | ||||
y++; | y++; | ||||
} | } | ||||
if(y>=height) break; | if(y>=height) break; | ||||
} | } | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
udst= s->picture[1] + s->linesize[1]*cy; | |||||
vdst= s->picture[2] + s->linesize[2]*cy; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
udst= p->data[1] + p->linesize[1]*cy; | |||||
vdst= p->data[2] + p->linesize[2]*cy; | |||||
sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); | sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); | ||||
sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); | sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); | ||||
@@ -1029,7 +1003,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, | |||||
/* encode a luma only line & y++ */ | /* encode a luma only line & y++ */ | ||||
if(s->bitstream_bpp==12){ | if(s->bitstream_bpp==12){ | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
if(s->predictor == PLANE && s->interlaced < y){ | if(s->predictor == PLANE && s->interlaced < y){ | ||||
s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | ||||
@@ -1043,9 +1017,9 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, | |||||
if(y>=height) break; | if(y>=height) break; | ||||
} | } | ||||
ydst= s->picture[0] + s->linesize[0]*y; | |||||
udst= s->picture[1] + s->linesize[1]*cy; | |||||
vdst= s->picture[2] + s->linesize[2]*cy; | |||||
ydst= p->data[0] + p->linesize[0]*y; | |||||
udst= p->data[1] + p->linesize[1]*cy; | |||||
vdst= p->data[2] + p->linesize[2]*cy; | |||||
if(s->predictor == PLANE && s->interlaced < cy){ | if(s->predictor == PLANE && s->interlaced < cy){ | ||||
s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); | ||||
@@ -1088,11 +1062,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, | |||||
bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); | bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); | ||||
} | } | ||||
avctx->key_frame= 1; | |||||
avctx->pict_type= I_TYPE; | |||||
s->picture_number++; | s->picture_number++; | ||||
return size*4; | return size*4; | ||||
} | } | ||||
@@ -1180,9 +1180,11 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) | |||||
get_bits(&s->gb, 8), get_bits(&s->gb, 8)); | get_bits(&s->gb, 8), get_bits(&s->gb, 8)); | ||||
if (get_bits(&s->gb, 8) == 0) | if (get_bits(&s->gb, 8) == 0) | ||||
{ | { | ||||
s->avctx->aspect_ratio_info = FF_ASPECT_EXTENDED; | |||||
s->avctx->aspected_width = get_bits(&s->gb, 16); | |||||
s->avctx->aspected_height = get_bits(&s->gb, 16); | |||||
int x_density = get_bits(&s->gb, 16); | |||||
int y_density = get_bits(&s->gb, 16); | |||||
//MN: needs to be checked | |||||
s->avctx->aspect_ratio= s->width*y_density/((float)s->height*x_density); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -1468,7 +1470,7 @@ eoi_parser: | |||||
} | } | ||||
/* dummy quality */ | /* dummy quality */ | ||||
/* XXX: infer it with matrix */ | /* XXX: infer it with matrix */ | ||||
avctx->quality = 3; | |||||
// avctx->quality = 3; | |||||
goto the_end; | goto the_end; | ||||
} | } | ||||
break; | break; | ||||
@@ -1635,7 +1637,7 @@ read_header: | |||||
} | } | ||||
/* dummy quality */ | /* dummy quality */ | ||||
/* XXX: infer it with matrix */ | /* XXX: infer it with matrix */ | ||||
avctx->quality = 3; | |||||
// avctx->quality = 3; | |||||
return buf_ptr - buf; | return buf_ptr - buf; | ||||
} | } | ||||
@@ -92,7 +92,7 @@ static int full_motion_search(MpegEncContext * s, | |||||
y2 = yy + range - 1; | y2 = yy + range - 1; | ||||
if (y2 > ymax) | if (y2 > ymax) | ||||
y2 = ymax; | y2 = ymax; | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
dmin = 0x7fffffff; | dmin = 0x7fffffff; | ||||
mx = 0; | mx = 0; | ||||
my = 0; | my = 0; | ||||
@@ -155,7 +155,7 @@ static int log_motion_search(MpegEncContext * s, | |||||
if (y2 > ymax) | if (y2 > ymax) | ||||
y2 = ymax; | y2 = ymax; | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
dmin = 0x7fffffff; | dmin = 0x7fffffff; | ||||
mx = 0; | mx = 0; | ||||
my = 0; | my = 0; | ||||
@@ -231,7 +231,7 @@ static int phods_motion_search(MpegEncContext * s, | |||||
if (y2 > ymax) | if (y2 > ymax) | ||||
y2 = ymax; | y2 = ymax; | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
mx = 0; | mx = 0; | ||||
my = 0; | my = 0; | ||||
@@ -560,7 +560,7 @@ static int epzs_motion_search(MpegEncContext * s, | |||||
uint16_t *score_map= s->me_score_map; | uint16_t *score_map= s->me_score_map; | ||||
int map_generation; | int map_generation; | ||||
new_pic = s->new_picture[0] + pic_xy; | |||||
new_pic = s->new_picture.data[0] + pic_xy; | |||||
old_pic = ref_picture + pic_xy; | old_pic = ref_picture + pic_xy; | ||||
map_generation= update_map_generation(s); | map_generation= update_map_generation(s); | ||||
@@ -649,7 +649,7 @@ static int epzs_motion_search4(MpegEncContext * s, int block, | |||||
uint16_t *score_map= s->me_score_map; | uint16_t *score_map= s->me_score_map; | ||||
int map_generation; | int map_generation; | ||||
new_pic = s->new_picture[0] + pic_xy; | |||||
new_pic = s->new_picture.data[0] + pic_xy; | |||||
old_pic = ref_picture + pic_xy; | old_pic = ref_picture + pic_xy; | ||||
map_generation= update_map_generation(s); | map_generation= update_map_generation(s); | ||||
@@ -723,7 +723,7 @@ static inline int halfpel_motion_search(MpegEncContext * s, | |||||
xx = 16 * s->mb_x + 8*(n&1); | xx = 16 * s->mb_x + 8*(n&1); | ||||
yy = 16 * s->mb_y + 8*(n>>1); | yy = 16 * s->mb_y + 8*(n>>1); | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
mx = *mx_ptr; | mx = *mx_ptr; | ||||
my = *my_ptr; | my = *my_ptr; | ||||
@@ -789,7 +789,7 @@ static inline int fast_halfpel_motion_search(MpegEncContext * s, | |||||
xx = 16 * s->mb_x + 8*(n&1); | xx = 16 * s->mb_x + 8*(n&1); | ||||
yy = 16 * s->mb_y + 8*(n>>1); | yy = 16 * s->mb_y + 8*(n>>1); | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
mx = *mx_ptr; | mx = *mx_ptr; | ||||
my = *my_ptr; | my = *my_ptr; | ||||
@@ -931,7 +931,7 @@ static inline int mv4_search(MpegEncContext *s, int xmin, int ymin, int xmax, in | |||||
{ | { | ||||
int block; | int block; | ||||
int P[10][2]; | int P[10][2]; | ||||
uint8_t *ref_picture= s->last_picture[0]; | |||||
uint8_t *ref_picture= s->last_picture.data[0]; | |||||
int dmin_sum=0; | int dmin_sum=0; | ||||
for(block=0; block<4; block++){ | for(block=0; block<4; block++){ | ||||
@@ -1019,7 +1019,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
int P[10][2]; | int P[10][2]; | ||||
const int shift= 1+s->quarter_sample; | const int shift= 1+s->quarter_sample; | ||||
int mb_type=0; | int mb_type=0; | ||||
uint8_t *ref_picture= s->last_picture[0]; | |||||
uint8_t *ref_picture= s->last_picture.data[0]; | |||||
Picture * const pic= &s->current_picture; | |||||
get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code); | get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code); | ||||
rel_xmin= xmin - mb_x*16; | rel_xmin= xmin - mb_x*16; | ||||
@@ -1104,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
xx = mb_x * 16; | xx = mb_x * 16; | ||||
yy = mb_y * 16; | yy = mb_y * 16; | ||||
pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
/* At this point (mx,my) are full-pell and the relative displacement */ | /* At this point (mx,my) are full-pell and the relative displacement */ | ||||
ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx); | ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx); | ||||
@@ -1115,11 +1116,11 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, | |||||
vard = (s->dsp.pix_norm(pix, ppix, s->linesize)+128)>>8; | vard = (s->dsp.pix_norm(pix, ppix, s->linesize)+128)>>8; | ||||
//printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); | //printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); | ||||
s->mb_var [s->mb_width * mb_y + mb_x] = varc; | |||||
s->mc_mb_var[s->mb_width * mb_y + mb_x] = vard; | |||||
s->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8; | |||||
s->mb_var_sum += varc; | |||||
s->mc_mb_var_sum += vard; | |||||
pic->mb_var [s->mb_width * mb_y + mb_x] = varc; | |||||
pic->mc_mb_var[s->mb_width * mb_y + mb_x] = vard; | |||||
pic->mb_mean [s->mb_width * mb_y + mb_x] = (sum+128)>>8; | |||||
pic->mb_var_sum += varc; | |||||
pic->mc_mb_var_sum += vard; | |||||
//printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); | //printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); | ||||
#if 0 | #if 0 | ||||
@@ -1318,7 +1319,7 @@ static inline int check_bidir_mv(MpegEncContext * s, | |||||
if (src_y == s->height) | if (src_y == s->height) | ||||
dxy&= 1; | dxy&= 1; | ||||
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x; | |||||
ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x; | |||||
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | ||||
fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale; | fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale; | ||||
@@ -1333,10 +1334,10 @@ static inline int check_bidir_mv(MpegEncContext * s, | |||||
if (src_y == s->height) | if (src_y == s->height) | ||||
dxy&= 1; | dxy&= 1; | ||||
ptr = s->next_picture[0] + (src_y * s->linesize) + src_x; | |||||
ptr = s->next_picture.data[0] + (src_y * s->linesize) + src_x; | |||||
s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | ||||
fbmin += s->dsp.pix_abs16x16(s->new_picture[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize); | |||||
fbmin += s->dsp.pix_abs16x16(s->new_picture.data[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize); | |||||
return fbmin; | return fbmin; | ||||
} | } | ||||
@@ -1418,7 +1419,7 @@ static inline int direct_search(MpegEncContext * s, | |||||
src_y = clip(src_y, -16, height); | src_y = clip(src_y, -16, height); | ||||
if (src_y == height) dxy &= ~2; | if (src_y == height) dxy &= ~2; | ||||
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x; | |||||
ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x; | |||||
s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); | ||||
dxy = ((motion_by & 1) << 1) | (motion_bx & 1); | dxy = ((motion_by & 1) << 1) | (motion_bx & 1); | ||||
@@ -1511,8 +1512,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, | |||||
dmin= direct_search(s, mb_x, mb_y); | dmin= direct_search(s, mb_x, mb_y); | ||||
fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture[0], s->f_code); | |||||
bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture[0], s->b_code) - quant; | |||||
fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, s->last_picture.data[0], s->f_code); | |||||
bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, s->next_picture.data[0], s->b_code) - quant; | |||||
//printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); | //printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); | ||||
fbmin= bidir_refine(s, mb_x, mb_y); | fbmin= bidir_refine(s, mb_x, mb_y); | ||||
@@ -1534,8 +1535,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, | |||||
type= MB_TYPE_BIDIR; | type= MB_TYPE_BIDIR; | ||||
} | } | ||||
score= ((unsigned)(score*score + 128*256))>>16; | score= ((unsigned)(score*score + 128*256))>>16; | ||||
s->mc_mb_var_sum += score; | |||||
s->mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD | |||||
s->current_picture.mc_mb_var_sum += score; | |||||
s->current_picture.mc_mb_var[mb_y*s->mb_width + mb_x] = score; //FIXME use SSD | |||||
} | } | ||||
if(s->flags&CODEC_FLAG_HQ){ | if(s->flags&CODEC_FLAG_HQ){ | ||||
@@ -1581,7 +1582,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type) | |||||
int j; | int j; | ||||
for(j=0; j<fcode && j<8; j++){ | for(j=0; j<fcode && j<8; j++){ | ||||
if(s->pict_type==B_TYPE || s->mc_mb_var[i] < s->mb_var[i]) | |||||
if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[i] < s->current_picture.mb_var[i]) | |||||
score[j]-= 170; | score[j]-= 170; | ||||
} | } | ||||
} | } | ||||
@@ -134,7 +134,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) | |||||
int n; | int n; | ||||
UINT64 time_code; | UINT64 time_code; | ||||
if (s->picture_in_gop_number == 0) { | |||||
if (s->current_picture.key_frame) { | |||||
/* mpeg1 header repeated every gop */ | /* mpeg1 header repeated every gop */ | ||||
put_header(s, SEQ_START_CODE); | put_header(s, SEQ_START_CODE); | ||||
@@ -1359,7 +1359,6 @@ static int mpeg_decode_init(AVCodecContext *avctx) | |||||
s->mpeg_enc_ctx.picture_number = 0; | s->mpeg_enc_ctx.picture_number = 0; | ||||
s->repeat_field = 0; | s->repeat_field = 0; | ||||
s->mpeg_enc_ctx.codec_id= avctx->codec->id; | s->mpeg_enc_ctx.codec_id= avctx->codec->id; | ||||
avctx->mbskip_table= s->mpeg_enc_ctx.mbskip_table; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1403,9 +1402,6 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, | |||||
s->pict_type = get_bits(&s->gb, 3); | s->pict_type = get_bits(&s->gb, 3); | ||||
dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number); | dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number); | ||||
avctx->pict_type= s->pict_type; | |||||
avctx->key_frame= s->pict_type == I_TYPE; | |||||
skip_bits(&s->gb, 16); | skip_bits(&s->gb, 16); | ||||
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { | if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { | ||||
s->full_pel[0] = get_bits1(&s->gb); | s->full_pel[0] = get_bits1(&s->gb); | ||||
@@ -1423,6 +1419,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, | |||||
s->mpeg_f_code[1][0] = f_code; | s->mpeg_f_code[1][0] = f_code; | ||||
s->mpeg_f_code[1][1] = f_code; | s->mpeg_f_code[1][1] = f_code; | ||||
} | } | ||||
s->current_picture.pict_type= s->pict_type; | |||||
s->current_picture.key_frame= s->pict_type == I_TYPE; | |||||
s->y_dc_scale = 8; | s->y_dc_scale = 8; | ||||
s->c_dc_scale = 8; | s->c_dc_scale = 8; | ||||
s->first_slice = 1; | s->first_slice = 1; | ||||
@@ -1576,7 +1574,7 @@ static void mpeg_decode_extension(AVCodecContext *avctx, | |||||
* DECODE_SLICE_EOP if the end of the picture is reached | * DECODE_SLICE_EOP if the end of the picture is reached | ||||
*/ | */ | ||||
static int mpeg_decode_slice(AVCodecContext *avctx, | static int mpeg_decode_slice(AVCodecContext *avctx, | ||||
AVPicture *pict, | |||||
AVVideoFrame *pict, | |||||
int start_code, | int start_code, | ||||
UINT8 *buf, int buf_size) | UINT8 *buf, int buf_size) | ||||
{ | { | ||||
@@ -1677,38 +1675,25 @@ eos: //end of slice | |||||
if (/*s->mb_x == 0 &&*/ | if (/*s->mb_x == 0 &&*/ | ||||
s->mb_y == s->mb_height) { | s->mb_y == s->mb_height) { | ||||
/* end of image */ | /* end of image */ | ||||
UINT8 **picture; | |||||
if(s->mpeg2) | |||||
s->qscale >>=1; | |||||
MPV_frame_end(s); | MPV_frame_end(s); | ||||
if (s->pict_type == B_TYPE) { | if (s->pict_type == B_TYPE) { | ||||
picture = s->current_picture; | |||||
avctx->quality = s->qscale; | |||||
*pict= *(AVVideoFrame*)&s->current_picture; | |||||
} else { | } else { | ||||
s->picture_number++; | |||||
/* latency of 1 frame for I and P frames */ | /* latency of 1 frame for I and P frames */ | ||||
/* XXX: use another variable than picture_number */ | /* XXX: use another variable than picture_number */ | ||||
if (s->picture_number == 0) { | |||||
picture = NULL; | |||||
if (s->picture_number == 1) { | |||||
return DECODE_SLICE_OK; | |||||
} else { | } else { | ||||
picture = s->last_picture; | |||||
avctx->quality = s->last_qscale; | |||||
*pict= *(AVVideoFrame*)&s->last_picture; | |||||
} | } | ||||
s->last_qscale = s->qscale; | |||||
s->picture_number++; | |||||
} | |||||
if(s->mpeg2) | |||||
avctx->quality>>=1; | |||||
if (picture) { | |||||
pict->data[0] = picture[0]; | |||||
pict->data[1] = picture[1]; | |||||
pict->data[2] = picture[2]; | |||||
pict->linesize[0] = s->linesize; | |||||
pict->linesize[1] = s->uvlinesize; | |||||
pict->linesize[2] = s->uvlinesize; | |||||
return DECODE_SLICE_EOP; | |||||
} else { | |||||
return DECODE_SLICE_OK; | |||||
} | } | ||||
return DECODE_SLICE_EOP; | |||||
} else { | } else { | ||||
return DECODE_SLICE_OK; | return DECODE_SLICE_OK; | ||||
} | } | ||||
@@ -1827,7 +1812,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, | |||||
Mpeg1Context *s = avctx->priv_data; | Mpeg1Context *s = avctx->priv_data; | ||||
UINT8 *buf_end, *buf_ptr, *buf_start; | UINT8 *buf_end, *buf_ptr, *buf_start; | ||||
int len, start_code_found, ret, code, start_code, input_size; | int len, start_code_found, ret, code, start_code, input_size; | ||||
AVPicture *picture = data; | |||||
AVVideoFrame *picture = data; | |||||
MpegEncContext *s2 = &s->mpeg_enc_ctx; | MpegEncContext *s2 = &s->mpeg_enc_ctx; | ||||
dprintf("fill_buffer\n"); | dprintf("fill_buffer\n"); | ||||
@@ -1837,13 +1822,9 @@ static int mpeg_decode_frame(AVCodecContext *avctx, | |||||
/* special case for last picture */ | /* special case for last picture */ | ||||
if (buf_size == 0) { | if (buf_size == 0) { | ||||
if (s2->picture_number > 0) { | if (s2->picture_number > 0) { | ||||
picture->data[0] = s2->next_picture[0]; | |||||
picture->data[1] = s2->next_picture[1]; | |||||
picture->data[2] = s2->next_picture[2]; | |||||
picture->linesize[0] = s2->linesize; | |||||
picture->linesize[1] = s2->uvlinesize; | |||||
picture->linesize[2] = s2->uvlinesize; | |||||
*data_size = sizeof(AVPicture); | |||||
*picture= *(AVVideoFrame*)&s2->next_picture; | |||||
*data_size = sizeof(AVVideoFrame); | |||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -46,7 +46,6 @@ void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edg | |||||
static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, | static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, | ||||
int src_x, int src_y, int w, int h); | int src_x, int src_y, int w, int h); | ||||
#define EDGE_WIDTH 16 | |||||
/* enable all paranoid tests for rounding, overflows, etc... */ | /* enable all paranoid tests for rounding, overflows, etc... */ | ||||
//#define PARANOID | //#define PARANOID | ||||
@@ -268,10 +267,47 @@ int DCT_common_init(MpegEncContext *s) | |||||
return 0; | return 0; | ||||
} | } | ||||
/** | |||||
* allocates various arrays for a Picture structure, except the pixels themself. | |||||
* The pixels are allocated/set in te get_buffer() | |||||
*/ | |||||
static int alloc_picture(MpegEncContext *s, Picture *pic){ | |||||
if (s->encoding) { | |||||
CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16)) | |||||
CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16)) | |||||
CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8)) | |||||
} | |||||
CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check | |||||
CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8)) | |||||
pic->qstride= s->mb_width; | |||||
return 0; | |||||
fail: //for the CHECKED_ALLOCZ macro | |||||
return -1; | |||||
} | |||||
static void free_picture(MpegEncContext *s, Picture *pic){ | |||||
int i; | |||||
av_freep(&pic->mb_var); | |||||
av_freep(&pic->mc_mb_var); | |||||
av_freep(&pic->mb_mean); | |||||
av_freep(&pic->mbskip_table); | |||||
av_freep(&pic->qscale_table); | |||||
if(s->avctx->get_buffer == avcodec_default_get_buffer){ | |||||
for(i=0; i<4; i++){ | |||||
av_freep(&pic->base[i]); | |||||
pic->data[i]= NULL; | |||||
} | |||||
av_freep(&pic->opaque); | |||||
} | |||||
} | |||||
/* init common structure for both encoder and decoder */ | /* init common structure for both encoder and decoder */ | ||||
int MPV_common_init(MpegEncContext *s) | int MPV_common_init(MpegEncContext *s) | ||||
{ | { | ||||
UINT8 *pict; | |||||
int y_size, c_size, yc_size, i; | int y_size, c_size, yc_size, i; | ||||
dsputil_init(&s->dsp, s->avctx->dsp_mask); | dsputil_init(&s->dsp, s->avctx->dsp_mask); | ||||
@@ -279,7 +315,7 @@ int MPV_common_init(MpegEncContext *s) | |||||
s->flags= s->avctx->flags; | s->flags= s->avctx->flags; | ||||
s->mb_width = (s->width + 15) / 16; | |||||
s->mb_width = (s->width + 15) / 16; | |||||
s->mb_height = (s->height + 15) / 16; | s->mb_height = (s->height + 15) / 16; | ||||
/* set default edge pos, will be overriden in decode_header if needed */ | /* set default edge pos, will be overriden in decode_header if needed */ | ||||
@@ -298,51 +334,12 @@ int MPV_common_init(MpegEncContext *s) | |||||
+ (toupper((s->avctx->fourcc>>16)&0xFF)<<16) | + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) | ||||
+ (toupper((s->avctx->fourcc>>24)&0xFF)<<24); | + (toupper((s->avctx->fourcc>>24)&0xFF)<<24); | ||||
if(!(s->flags&CODEC_FLAG_DR1)){ | |||||
s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH; | |||||
s->uvlinesize = s->mb_width * 8 + EDGE_WIDTH; | |||||
for(i=0;i<3;i++) { | |||||
int w, h, shift, pict_start; | |||||
unsigned size; | |||||
w = s->linesize; | |||||
h = s->mb_height * 16 + 2 * EDGE_WIDTH; | |||||
shift = (i == 0) ? 0 : 1; | |||||
size = (s->linesize>>shift) * (h >> shift); | |||||
pict_start = (s->linesize>>shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift); | |||||
CHECKED_ALLOCZ(pict, size) | |||||
s->last_picture_base[i] = pict; | |||||
s->last_picture[i] = pict + pict_start; | |||||
if(i>0) memset(s->last_picture_base[i], 128, size); | |||||
CHECKED_ALLOCZ(pict, size) | |||||
s->next_picture_base[i] = pict; | |||||
s->next_picture[i] = pict + pict_start; | |||||
if(i>0) memset(s->next_picture_base[i], 128, size); | |||||
if (s->has_b_frames || s->codec_id==CODEC_ID_MPEG4) { | |||||
/* Note the MPEG4 stuff is here cuz of buggy encoders which dont set the low_delay flag but | |||||
do low-delay encoding, so we cant allways distinguish b-frame containing streams from low_delay streams */ | |||||
CHECKED_ALLOCZ(pict, size) | |||||
s->aux_picture_base[i] = pict; | |||||
s->aux_picture[i] = pict + pict_start; | |||||
if(i>0) memset(s->aux_picture_base[i], 128, size); | |||||
} | |||||
} | |||||
s->ip_buffer_count= 2; | |||||
} | |||||
CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance | CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance | ||||
s->avctx->coded_picture= (AVVideoFrame*)&s->current_picture; | |||||
if (s->encoding) { | if (s->encoding) { | ||||
int j; | |||||
int mv_table_size= (s->mb_width+2)*(s->mb_height+2); | int mv_table_size= (s->mb_width+2)*(s->mb_height+2); | ||||
CHECKED_ALLOCZ(s->mb_var , s->mb_num * sizeof(INT16)) | |||||
CHECKED_ALLOCZ(s->mc_mb_var, s->mb_num * sizeof(INT16)) | |||||
CHECKED_ALLOCZ(s->mb_mean , s->mb_num * sizeof(INT8)) | |||||
/* Allocate MV tables */ | /* Allocate MV tables */ | ||||
CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16)) | CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16)) | ||||
@@ -354,28 +351,12 @@ int MPV_common_init(MpegEncContext *s) | |||||
CHECKED_ALLOCZ(s->b_direct_back_mv_table, mv_table_size * 2 * sizeof(INT16)) | CHECKED_ALLOCZ(s->b_direct_back_mv_table, mv_table_size * 2 * sizeof(INT16)) | ||||
CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16)) | CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16)) | ||||
CHECKED_ALLOCZ(s->me_scratchpad, s->linesize*16*3*sizeof(uint8_t)) | |||||
//FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer() | |||||
CHECKED_ALLOCZ(s->me_scratchpad, s->width*2*16*3*sizeof(uint8_t)) | |||||
CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t)) | CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t)) | ||||
CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t)) | CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t)) | ||||
if(s->max_b_frames){ | |||||
for(j=0; j<REORDER_BUFFER_SIZE; j++){ | |||||
int i; | |||||
for(i=0;i<3;i++) { | |||||
int w, h, shift, size; | |||||
w = s->linesize; | |||||
h = s->mb_height * 16; | |||||
shift = (i == 0) ? 0 : 1; | |||||
size = (w >> shift) * (h >> shift); | |||||
CHECKED_ALLOCZ(pict, size); | |||||
s->picture_buffer[j][i] = pict; | |||||
} | |||||
} | |||||
} | |||||
if(s->codec_id==CODEC_ID_MPEG4){ | if(s->codec_id==CODEC_ID_MPEG4){ | ||||
CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE); | CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE); | ||||
CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE); | CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE); | ||||
@@ -434,12 +415,6 @@ int MPV_common_init(MpegEncContext *s) | |||||
s->dc_val[0][i] = 1024; | s->dc_val[0][i] = 1024; | ||||
} | } | ||||
CHECKED_ALLOCZ(s->next_qscale_table , s->mb_num * sizeof(UINT8)) | |||||
CHECKED_ALLOCZ(s->last_qscale_table , s->mb_num * sizeof(UINT8)) | |||||
CHECKED_ALLOCZ(s->aux_qscale_table , s->mb_num * sizeof(UINT8)) | |||||
s->qscale_table= s->next_qscale_table; | |||||
s->avctx->qstride= s->mb_width; | |||||
/* which mb is a intra block */ | /* which mb is a intra block */ | ||||
CHECKED_ALLOCZ(s->mbintra_table, s->mb_num); | CHECKED_ALLOCZ(s->mbintra_table, s->mb_num); | ||||
memset(s->mbintra_table, 1, s->mb_num); | memset(s->mbintra_table, 1, s->mb_num); | ||||
@@ -470,10 +445,13 @@ void MPV_common_end(MpegEncContext *s) | |||||
{ | { | ||||
int i; | int i; | ||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0]){ | |||||
s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]); | |||||
} | |||||
} | |||||
av_freep(&s->mb_type); | av_freep(&s->mb_type); | ||||
av_freep(&s->mb_var); | |||||
av_freep(&s->mc_mb_var); | |||||
av_freep(&s->mb_mean); | |||||
av_freep(&s->p_mv_table); | av_freep(&s->p_mv_table); | ||||
av_freep(&s->b_forw_mv_table); | av_freep(&s->b_forw_mv_table); | ||||
av_freep(&s->b_back_mv_table); | av_freep(&s->b_back_mv_table); | ||||
@@ -489,9 +467,6 @@ void MPV_common_end(MpegEncContext *s) | |||||
av_freep(&s->mbintra_table); | av_freep(&s->mbintra_table); | ||||
av_freep(&s->cbp_table); | av_freep(&s->cbp_table); | ||||
av_freep(&s->pred_dir_table); | av_freep(&s->pred_dir_table); | ||||
av_freep(&s->next_qscale_table); | |||||
av_freep(&s->last_qscale_table); | |||||
av_freep(&s->aux_qscale_table); | |||||
av_freep(&s->me_scratchpad); | av_freep(&s->me_scratchpad); | ||||
av_freep(&s->me_map); | av_freep(&s->me_map); | ||||
av_freep(&s->me_score_map); | av_freep(&s->me_score_map); | ||||
@@ -507,24 +482,9 @@ void MPV_common_end(MpegEncContext *s) | |||||
av_freep(&s->avctx->stats_out); | av_freep(&s->avctx->stats_out); | ||||
av_freep(&s->ac_stats); | av_freep(&s->ac_stats); | ||||
av_freep(&s->error_status_table); | av_freep(&s->error_status_table); | ||||
for(i=0;i<3;i++) { | |||||
int j; | |||||
if(!(s->flags&CODEC_FLAG_DR1)){ | |||||
av_freep(&s->last_picture_base[i]); | |||||
av_freep(&s->next_picture_base[i]); | |||||
av_freep(&s->aux_picture_base[i]); | |||||
} | |||||
s->last_picture_base[i]= | |||||
s->next_picture_base[i]= | |||||
s->aux_picture_base [i] = NULL; | |||||
s->last_picture[i]= | |||||
s->next_picture[i]= | |||||
s->aux_picture [i] = NULL; | |||||
for(j=0; j<REORDER_BUFFER_SIZE; j++){ | |||||
av_freep(&s->picture_buffer[j][i]); | |||||
} | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
free_picture(s, &s->picture[i]); | |||||
} | } | ||||
s->context_initialized = 0; | s->context_initialized = 0; | ||||
} | } | ||||
@@ -813,70 +773,70 @@ static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w) | |||||
/* generic function for encode/decode called before a frame is coded/decoded */ | /* generic function for encode/decode called before a frame is coded/decoded */ | ||||
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) | int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) | ||||
{ | { | ||||
int i; | |||||
UINT8 *tmp; | |||||
int i, r; | |||||
AVVideoFrame *pic; | |||||
s->mb_skiped = 0; | s->mb_skiped = 0; | ||||
avctx->mbskip_table= s->mbskip_table; | |||||
s->hurry_up= s->avctx->hurry_up; | |||||
s->error_resilience= avctx->error_resilience; | |||||
if(avctx->flags&CODEC_FLAG_DR1){ | |||||
if(avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type) < 0){ | |||||
fprintf(stderr, "get_buffer() failed\n"); | |||||
return -1; | |||||
/* mark&release old frames */ | |||||
if (s->pict_type != B_TYPE && s->last_picture.data[0]) { | |||||
Picture *pic= NULL; | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0] == s->last_picture.data[0]){ | |||||
// s->picture[i].reference=0; | |||||
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]); | |||||
break; | |||||
} | |||||
} | |||||
assert(i<MAX_PICTURE_COUNT); | |||||
/* release forgotten pictures */ | |||||
/* if(mpeg124/h263) */ | |||||
if(!s->encoding){ | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){ | |||||
fprintf(stderr, "releasing zombie picture\n"); | |||||
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]); | |||||
} | |||||
} | |||||
} | } | ||||
s->linesize = avctx->dr_stride; | |||||
s->uvlinesize= avctx->dr_uvstride; | |||||
s->ip_buffer_count= avctx->dr_ip_buffer_count; | |||||
} | } | ||||
avctx->dr_ip_buffer_count= s->ip_buffer_count; | |||||
if (s->pict_type == B_TYPE) { | |||||
for(i=0;i<3;i++) { | |||||
if(avctx->flags&CODEC_FLAG_DR1) | |||||
s->aux_picture[i]= avctx->dr_buffer[i]; | |||||
//FIXME the following should never be needed, the decoder should drop b frames if no reference is available | |||||
if(s->next_picture[i]==NULL) | |||||
s->next_picture[i]= s->aux_picture[i]; | |||||
if(s->last_picture[i]==NULL) | |||||
s->last_picture[i]= s->next_picture[i]; | |||||
s->current_picture[i] = s->aux_picture[i]; | |||||
if(!s->encoding){ | |||||
/* find unused Picture */ | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0]==NULL) break; | |||||
} | |||||
assert(i<MAX_PICTURE_COUNT); | |||||
pic= (AVVideoFrame*)&s->picture[i]; | |||||
pic->reference= s->pict_type != B_TYPE; | |||||
pic->coded_picture_number= s->current_picture.coded_picture_number+1; | |||||
r= avctx->get_buffer(avctx, pic); | |||||
if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){ | |||||
fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n"); | |||||
return -1; | |||||
} | } | ||||
s->avctx->display_qscale_table= | |||||
s->avctx->current_qscale_table= | |||||
s->qscale_table= s->aux_qscale_table; | |||||
} else { | |||||
for(i=0;i<3;i++) { | |||||
/* swap next and last */ | |||||
if(avctx->flags&CODEC_FLAG_DR1) | |||||
tmp= avctx->dr_buffer[i]; | |||||
else | |||||
tmp = s->last_picture[i]; | |||||
s->last_picture[i] = s->next_picture[i]; | |||||
s->next_picture[i] = tmp; | |||||
s->current_picture[i] = tmp; | |||||
s->linesize = pic->linesize[0]; | |||||
s->uvlinesize= pic->linesize[1]; | |||||
if(pic->qscale_table==NULL) | |||||
alloc_picture(s, (Picture*)pic); | |||||
if(s->last_picture[i]==NULL) | |||||
s->last_picture[i]= s->next_picture[i]; | |||||
s->current_picture= s->picture[i]; | |||||
} | |||||
s->last_dr_opaque= s->next_dr_opaque; | |||||
s->next_dr_opaque= avctx->dr_opaque_frame; | |||||
s->hurry_up= s->avctx->hurry_up; | |||||
s->error_resilience= avctx->error_resilience; | |||||
if(s->has_b_frames && s->last_dr_opaque && s->codec_id!=CODEC_ID_SVQ1) | |||||
avctx->dr_opaque_frame= s->last_dr_opaque; | |||||
else | |||||
avctx->dr_opaque_frame= s->next_dr_opaque; | |||||
} | |||||
s->avctx->current_qscale_table= s->qscale_table = s->last_qscale_table; | |||||
s->avctx->display_qscale_table= s->last_qscale_table = s->next_qscale_table; | |||||
s->next_qscale_table= s->qscale_table; | |||||
if (s->pict_type != B_TYPE) { | |||||
s->last_picture= s->next_picture; | |||||
s->next_picture= s->current_picture; | |||||
} | } | ||||
/* set dequantizer, we cant do it during init as it might change for mpeg4 | /* set dequantizer, we cant do it during init as it might change for mpeg4 | ||||
and we cant do it in the header decode as init isnt called for mpeg4 there yet */ | and we cant do it in the header decode as init isnt called for mpeg4 there yet */ | ||||
if(s->out_format == FMT_H263){ | if(s->out_format == FMT_H263){ | ||||
@@ -893,14 +853,15 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) | |||||
/* generic function for encode/decode called after a frame has been coded/decoded */ | /* generic function for encode/decode called after a frame has been coded/decoded */ | ||||
void MPV_frame_end(MpegEncContext *s) | void MPV_frame_end(MpegEncContext *s) | ||||
{ | { | ||||
s->avctx->key_frame = (s->pict_type == I_TYPE); | |||||
s->avctx->pict_type = s->pict_type; | |||||
int i; | |||||
/* draw edge for correct motion prediction if outside */ | /* draw edge for correct motion prediction if outside */ | ||||
if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { | |||||
draw_edges(s->current_picture[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); | |||||
draw_edges(s->current_picture[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); | |||||
draw_edges(s->current_picture[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); | |||||
if(s->codec_id!=CODEC_ID_SVQ1){ | |||||
if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { | |||||
draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH ); | |||||
draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); | |||||
draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); | |||||
} | |||||
} | } | ||||
emms_c(); | emms_c(); | ||||
@@ -910,84 +871,154 @@ void MPV_frame_end(MpegEncContext *s) | |||||
s->num_available_buffers++; | s->num_available_buffers++; | ||||
if(s->num_available_buffers>2) s->num_available_buffers= 2; | if(s->num_available_buffers>2) s->num_available_buffers= 2; | ||||
} | } | ||||
s->current_picture.quality= s->qscale; //FIXME get average of qscale_table | |||||
s->current_picture.pict_type= s->pict_type; | |||||
s->current_picture.key_frame= s->pict_type == I_TYPE; | |||||
/* copy back current_picture variables */ | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0] == s->current_picture.data[0]){ | |||||
s->picture[i]= s->current_picture; | |||||
break; | |||||
} | |||||
} | |||||
assert(i<MAX_PICTURE_COUNT); | |||||
/* release non refernce frames */ | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0] && !s->picture[i].reference) | |||||
s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]); | |||||
} | |||||
} | } | ||||
/* reorder input for encoding */ | |||||
void reorder_input(MpegEncContext *s, AVPicture *pict) | |||||
{ | |||||
int i, j, index; | |||||
if(s->max_b_frames > FF_MAX_B_FRAMES) s->max_b_frames= FF_MAX_B_FRAMES; | |||||
// delay= s->max_b_frames+1; (or 0 if no b frames cuz decoder diff) | |||||
for(j=0; j<REORDER_BUFFER_SIZE-1; j++){ | |||||
s->coded_order[j]= s->coded_order[j+1]; | |||||
} | |||||
s->coded_order[j].picture[0]= s->coded_order[j].picture[1]= s->coded_order[j].picture[2]= NULL; //catch uninitalized buffers | |||||
s->coded_order[j].pict_type=0; | |||||
switch(s->input_pict_type){ | |||||
default: | |||||
case I_TYPE: | |||||
case S_TYPE: | |||||
case P_TYPE: | |||||
index= s->max_b_frames - s->b_frames_since_non_b; | |||||
s->b_frames_since_non_b=0; | |||||
break; | |||||
case B_TYPE: | |||||
index= s->max_b_frames + 1; | |||||
s->b_frames_since_non_b++; | |||||
break; | |||||
} | |||||
//printf("index:%d type:%d strides: %d %d\n", index, s->input_pict_type, pict->linesize[0], s->linesize); | |||||
if( (index==0 || (s->flags&CODEC_FLAG_INPUT_PRESERVED)) | |||||
&& pict->linesize[0] == s->linesize | |||||
&& pict->linesize[1] == s->uvlinesize | |||||
&& pict->linesize[2] == s->uvlinesize){ | |||||
//printf("ptr\n"); | |||||
for(i=0; i<3; i++){ | |||||
s->coded_order[index].picture[i]= pict->data[i]; | |||||
} | |||||
static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){ | |||||
AVVideoFrame *pic; | |||||
int i,r; | |||||
const int encoding_delay= s->max_b_frames; | |||||
/* find unused Picture */ | |||||
for(i=0; i<MAX_PICTURE_COUNT; i++){ | |||||
if(s->picture[i].data[0]==NULL) break; | |||||
} | |||||
assert(i<MAX_PICTURE_COUNT); | |||||
pic= (AVVideoFrame*)&s->picture[i]; | |||||
pic->reference= 1; | |||||
// assert(avctx->get_buffer == default_get_buffer || avctx->get_buffer==NULL); | |||||
r= s->avctx->get_buffer(s->avctx, pic); | |||||
if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){ | |||||
fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n"); | |||||
return -1; | |||||
} | |||||
assert(s->linesize==0 || s->linesize ==pic->linesize[0]); | |||||
assert(s->uvlinesize==0 || s->uvlinesize==pic->linesize[1]); | |||||
assert(pic->linesize[1] == pic->linesize[2]); | |||||
s->linesize = pic->linesize[0]; | |||||
s->uvlinesize= pic->linesize[1]; | |||||
if(pic->qscale_table==NULL) | |||||
alloc_picture(s, (Picture*)pic); | |||||
// assert(s->input_picture[0]==NULL || s->input_picture[0]->data[0]==NULL); | |||||
if(s->input_picture[encoding_delay]) | |||||
pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1; | |||||
//printf("dpn2:%d\n", pic->display_picture_number); | |||||
/* shift buffer entries */ | |||||
for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++) | |||||
s->input_picture[i-1]= s->input_picture[i]; | |||||
s->input_picture[encoding_delay]= (Picture*)pic; | |||||
pic->pict_type= pic_arg->pict_type; | |||||
pic->quality= pic_arg->quality; | |||||
if( pic->data[0] == pic_arg->data[0] | |||||
&& pic->data[1] == pic_arg->data[1] | |||||
&& pic->data[2] == pic_arg->data[2]){ | |||||
// empty | |||||
}else{ | }else{ | ||||
//printf("copy\n"); | |||||
int h_chroma_shift, v_chroma_shift; | |||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); | |||||
for(i=0; i<3; i++){ | for(i=0; i<3; i++){ | ||||
uint8_t *src = pict->data[i]; | |||||
uint8_t *dest; | |||||
int src_wrap = pict->linesize[i]; | |||||
int dest_wrap = s->linesize; | |||||
int w = s->width; | |||||
int h = s->height; | |||||
if(index==0) dest= s->last_picture[i]+16; //is current_picture indeed but the switch hapens after reordering | |||||
else dest= s->picture_buffer[s->picture_buffer_index][i]; | |||||
if (i >= 1) { | |||||
dest_wrap >>= 1; | |||||
w >>= 1; | |||||
h >>= 1; | |||||
int src_stride= pic_arg->linesize[i]; | |||||
int dst_stride= i ? s->uvlinesize : s->linesize; | |||||
int h_shift= i ? h_chroma_shift : 0; | |||||
int v_shift= i ? v_chroma_shift : 0; | |||||
int w= s->width >>h_shift; | |||||
int h= s->height>>v_shift; | |||||
uint8_t *src= pic_arg->data[i]; | |||||
uint8_t *dst= pic->data[i] + 16; | |||||
if(src_stride==dst_stride) | |||||
memcpy(dst, src, src_stride*h); | |||||
else{ | |||||
while(h--){ | |||||
memcpy(dst, src, w); | |||||
dst += dst_stride; | |||||
src += src_stride; | |||||
} | |||||
} | } | ||||
} | |||||
} | |||||
s->coded_order[index].picture[i]= dest; | |||||
for(j=0;j<h;j++) { | |||||
memcpy(dest, src, w); | |||||
dest += dest_wrap; | |||||
src += src_wrap; | |||||
return 0; | |||||
} | |||||
static void select_input_picture(MpegEncContext *s){ | |||||
int i; | |||||
const int encoding_delay= s->max_b_frames; | |||||
int coded_pic_num=0; | |||||
if(s->reordered_input_picture[0]) | |||||
coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1; | |||||
//printf("cpn:%d\n", coded_pic_num); | |||||
for(i=1; i<MAX_PICTURE_COUNT; i++) | |||||
s->reordered_input_picture[i-1]= s->reordered_input_picture[i]; | |||||
s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; | |||||
/* set next picture types & ordering */ | |||||
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ | |||||
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){ | |||||
s->reordered_input_picture[0]= s->input_picture[0]; | |||||
s->reordered_input_picture[0]->pict_type= I_TYPE; | |||||
s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; | |||||
}else{ | |||||
s->reordered_input_picture[0]= s->input_picture[s->max_b_frames]; | |||||
if(s->picture_in_gop_number + s->max_b_frames >= s->gop_size) | |||||
s->reordered_input_picture[0]->pict_type= I_TYPE; | |||||
else | |||||
s->reordered_input_picture[0]->pict_type= P_TYPE; | |||||
s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; | |||||
for(i=0; i<s->max_b_frames; i++){ | |||||
coded_pic_num++; | |||||
s->reordered_input_picture[i+1]= s->input_picture[i]; | |||||
s->reordered_input_picture[i+1]->pict_type= B_TYPE; | |||||
s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num; | |||||
} | } | ||||
} | } | ||||
if(index!=0){ | |||||
s->picture_buffer_index++; | |||||
if(s->picture_buffer_index >= REORDER_BUFFER_SIZE) s->picture_buffer_index=0; | |||||
} | |||||
} | } | ||||
s->coded_order[index].pict_type = s->input_pict_type; | |||||
s->coded_order[index].qscale = s->input_qscale; | |||||
s->coded_order[index].force_type= s->force_input_type; | |||||
s->coded_order[index].picture_in_gop_number= s->input_picture_in_gop_number; | |||||
s->coded_order[index].picture_number= s->input_picture_number; | |||||
for(i=0; i<3; i++){ | |||||
s->new_picture[i]= s->coded_order[0].picture[i]; | |||||
if(s->reordered_input_picture[0]){ | |||||
if(s->reordered_input_picture[0]->pict_type==B_TYPE){ | |||||
s->reordered_input_picture[0]->reference=0; | |||||
} | |||||
s->current_picture= *s->reordered_input_picture[0]; | |||||
s->new_picture= s->current_picture; | |||||
s->new_picture.data[0]+=16; | |||||
s->new_picture.data[1]+=16; | |||||
s->new_picture.data[2]+=16; | |||||
s->picture_number= s->new_picture.display_picture_number; | |||||
//printf("dpn:%d\n", s->picture_number); | |||||
}else{ | |||||
memset(&s->new_picture, 0, sizeof(Picture)); | |||||
} | } | ||||
} | } | ||||
@@ -995,52 +1026,26 @@ int MPV_encode_picture(AVCodecContext *avctx, | |||||
unsigned char *buf, int buf_size, void *data) | unsigned char *buf, int buf_size, void *data) | ||||
{ | { | ||||
MpegEncContext *s = avctx->priv_data; | MpegEncContext *s = avctx->priv_data; | ||||
AVPicture *pict = data; | |||||
s->input_qscale = avctx->quality; | |||||
AVVideoFrame *pic_arg = data; | |||||
init_put_bits(&s->pb, buf, buf_size, NULL, NULL); | init_put_bits(&s->pb, buf, buf_size, NULL, NULL); | ||||
if(avctx->force_type){ | |||||
s->input_pict_type= | |||||
s->force_input_type= avctx->force_type; | |||||
}else if(s->flags&CODEC_FLAG_PASS2){ | |||||
s->input_pict_type= | |||||
s->force_input_type= s->rc_context.entry[s->input_picture_number].new_pict_type; | |||||
}else{ | |||||
s->force_input_type=0; | |||||
if (!s->intra_only) { | |||||
/* first picture of GOP is intra */ | |||||
if (s->input_picture_in_gop_number % s->gop_size==0){ | |||||
s->input_pict_type = I_TYPE; | |||||
}else if(s->max_b_frames==0){ | |||||
s->input_pict_type = P_TYPE; | |||||
}else{ | |||||
if(s->b_frames_since_non_b < s->max_b_frames) //FIXME more IQ | |||||
s->input_pict_type = B_TYPE; | |||||
else | |||||
s->input_pict_type = P_TYPE; | |||||
} | |||||
} else { | |||||
s->input_pict_type = I_TYPE; | |||||
} | |||||
} | |||||
s->picture_in_gop_number++; | |||||
if(s->input_pict_type==I_TYPE) | |||||
s->input_picture_in_gop_number=0; | |||||
load_input_picture(s, pic_arg); | |||||
reorder_input(s, pict); | |||||
select_input_picture(s); | |||||
/* output? */ | /* output? */ | ||||
if(s->coded_order[0].picture[0]){ | |||||
s->pict_type= s->coded_order[0].pict_type; | |||||
if (s->fixed_qscale) /* the ratecontrol needs the last qscale so we dont touch it for CBR */ | |||||
s->qscale= s->coded_order[0].qscale; | |||||
s->force_type= s->coded_order[0].force_type; | |||||
s->picture_in_gop_number= s->coded_order[0].picture_in_gop_number; | |||||
s->picture_number= s->coded_order[0].picture_number; | |||||
if(s->new_picture.data[0]){ | |||||
s->pict_type= s->new_picture.pict_type; | |||||
if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */ | |||||
s->qscale= (int)(s->new_picture.quality+0.5); | |||||
assert(s->qscale); | |||||
} | |||||
//emms_c(); | |||||
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale); | |||||
MPV_frame_start(s, avctx); | MPV_frame_start(s, avctx); | ||||
encode_picture(s, s->picture_number); | encode_picture(s, s->picture_number); | ||||
@@ -1059,17 +1064,12 @@ int MPV_encode_picture(AVCodecContext *avctx, | |||||
if (s->out_format == FMT_MJPEG) | if (s->out_format == FMT_MJPEG) | ||||
mjpeg_picture_trailer(s); | mjpeg_picture_trailer(s); | ||||
if(!s->fixed_qscale) | |||||
avctx->quality = s->qscale; | |||||
if(s->flags&CODEC_FLAG_PASS1) | if(s->flags&CODEC_FLAG_PASS1) | ||||
ff_write_pass1_stats(s); | ff_write_pass1_stats(s); | ||||
} | } | ||||
s->input_picture_number++; | s->input_picture_number++; | ||||
s->input_picture_in_gop_number++; | |||||
flush_put_bits(&s->pb); | flush_put_bits(&s->pb); | ||||
s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; | s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; | ||||
@@ -1088,14 +1088,16 @@ if(s->max_b_frames==0) | |||||
fprintf(f, "%7d, %7d, %2.4f\n", pbBufPtr(&s->pb) - s->pb.buf, s->qscale, avctx->psnr_y); | fprintf(f, "%7d, %7d, %2.4f\n", pbBufPtr(&s->pb) - s->pb.buf, s->qscale, avctx->psnr_y); | ||||
} | } | ||||
#endif | #endif | ||||
#if 0 | |||||
if (avctx->get_psnr) { | if (avctx->get_psnr) { | ||||
/* At this point pict->data should have the original frame */ | /* At this point pict->data should have the original frame */ | ||||
/* an s->current_picture should have the coded/decoded frame */ | /* an s->current_picture should have the coded/decoded frame */ | ||||
get_psnr(pict->data, s->current_picture, | |||||
get_psnr(pict->data, s->current_picture.data, | |||||
pict->linesize, s->linesize, avctx); | pict->linesize, s->linesize, avctx); | ||||
// printf("%f\n", avctx->psnr_y); | // printf("%f\n", avctx->psnr_y); | ||||
} | } | ||||
#endif | |||||
return pbBufPtr(&s->pb) - s->pb.buf; | return pbBufPtr(&s->pb) - s->pb.buf; | ||||
} | } | ||||
@@ -1757,7 +1759,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) | |||||
mb_x = s->mb_x; | mb_x = s->mb_x; | ||||
mb_y = s->mb_y; | mb_y = s->mb_y; | ||||
s->qscale_table[mb_xy]= s->qscale; | |||||
s->current_picture.qscale_table[mb_xy]= s->qscale; | |||||
/* update DC predictors for P macroblocks */ | /* update DC predictors for P macroblocks */ | ||||
if (!s->mb_intra) { | if (!s->mb_intra) { | ||||
@@ -1823,33 +1825,47 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) | |||||
op_pixels_func (*op_pix)[4]; | op_pixels_func (*op_pix)[4]; | ||||
qpel_mc_func (*op_qpix)[16]; | qpel_mc_func (*op_qpix)[16]; | ||||
/* avoid copy if macroblock skipped in last frame too | |||||
dont touch it for B-frames as they need the skip info from the next p-frame */ | |||||
/* avoid copy if macroblock skipped in last frame too */ | |||||
if (s->pict_type != B_TYPE) { | if (s->pict_type != B_TYPE) { | ||||
s->current_picture.mbskip_table[mb_xy]= s->mb_skiped; | |||||
} | |||||
/* skip only during decoding as we might trash the buffers during encoding a bit */ | |||||
if(!s->encoding){ | |||||
UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy]; | UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy]; | ||||
if (s->mb_skiped) { | |||||
s->mb_skiped = 0; | |||||
const int age= s->current_picture.age; | |||||
assert(age); | |||||
if (s->mb_skiped) { | |||||
s->mb_skiped= 0; | |||||
assert(s->pict_type!=I_TYPE); | |||||
(*mbskip_ptr) ++; /* indicate that this time we skiped it */ | (*mbskip_ptr) ++; /* indicate that this time we skiped it */ | ||||
if(*mbskip_ptr >99) *mbskip_ptr= 99; | if(*mbskip_ptr >99) *mbskip_ptr= 99; | ||||
/* if previous was skipped too, then nothing to do ! | |||||
skip only during decoding as we might trash the buffers during encoding a bit */ | |||||
if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding) | |||||
return; | |||||
/* if previous was skipped too, then nothing to do ! */ | |||||
if (*mbskip_ptr >= age){ | |||||
//if(s->pict_type!=B_TYPE && s->mb_x==0) printf("\n"); | |||||
//if(s->pict_type!=B_TYPE) printf("%d%d ", *mbskip_ptr, age); | |||||
if(s->pict_type!=B_TYPE) return; | |||||
if(s->avctx->draw_horiz_band==NULL && *mbskip_ptr > age) return; | |||||
/* we dont draw complete frames here so we cant skip */ | |||||
} | |||||
} else { | } else { | ||||
*mbskip_ptr = 0; /* not skipped */ | *mbskip_ptr = 0; /* not skipped */ | ||||
} | } | ||||
} | |||||
}else | |||||
s->mb_skiped= 0; | |||||
if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){ | if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){ | ||||
dest_y = s->current_picture [0] + mb_x * 16; | |||||
dest_cb = s->current_picture[1] + mb_x * 8; | |||||
dest_cr = s->current_picture[2] + mb_x * 8; | |||||
dest_y = s->current_picture.data[0] + mb_x * 16; | |||||
dest_cb = s->current_picture.data[1] + mb_x * 8; | |||||
dest_cr = s->current_picture.data[2] + mb_x * 8; | |||||
}else{ | }else{ | ||||
dest_y = s->current_picture [0] + (mb_y * 16* s->linesize ) + mb_x * 16; | |||||
dest_cb = s->current_picture[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; | |||||
dest_cr = s->current_picture[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; | |||||
dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16; | |||||
dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; | |||||
dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; | |||||
} | } | ||||
if (s->interlaced_dct) { | if (s->interlaced_dct) { | ||||
@@ -1873,12 +1889,12 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) | |||||
} | } | ||||
if (s->mv_dir & MV_DIR_FORWARD) { | if (s->mv_dir & MV_DIR_FORWARD) { | ||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix); | |||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); | |||||
op_pix = s->dsp.avg_pixels_tab; | op_pix = s->dsp.avg_pixels_tab; | ||||
op_qpix= s->dsp.avg_qpel_pixels_tab; | op_qpix= s->dsp.avg_qpel_pixels_tab; | ||||
} | } | ||||
if (s->mv_dir & MV_DIR_BACKWARD) { | if (s->mv_dir & MV_DIR_BACKWARD) { | ||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix); | |||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); | |||||
} | } | ||||
} | } | ||||
@@ -2154,13 +2170,13 @@ void ff_draw_horiz_band(MpegEncContext *s){ | |||||
offset = y * s->linesize; | offset = y * s->linesize; | ||||
if(s->pict_type==B_TYPE || (!s->has_b_frames)){ | if(s->pict_type==B_TYPE || (!s->has_b_frames)){ | ||||
src_ptr[0] = s->current_picture[0] + offset; | |||||
src_ptr[1] = s->current_picture[1] + (offset >> 2); | |||||
src_ptr[2] = s->current_picture[2] + (offset >> 2); | |||||
src_ptr[0] = s->current_picture.data[0] + offset; | |||||
src_ptr[1] = s->current_picture.data[1] + (offset >> 2); | |||||
src_ptr[2] = s->current_picture.data[2] + (offset >> 2); | |||||
} else { | } else { | ||||
src_ptr[0] = s->last_picture[0] + offset; | |||||
src_ptr[1] = s->last_picture[1] + (offset >> 2); | |||||
src_ptr[2] = s->last_picture[2] + (offset >> 2); | |||||
src_ptr[0] = s->last_picture.data[0] + offset; | |||||
src_ptr[1] = s->last_picture.data[1] + (offset >> 2); | |||||
src_ptr[2] = s->last_picture.data[2] + (offset >> 2); | |||||
} | } | ||||
emms_c(); | emms_c(); | ||||
@@ -2180,7 +2196,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
for(i=0; i<6; i++) skip_dct[i]=0; | for(i=0; i<6; i++) skip_dct[i]=0; | ||||
if(s->adaptive_quant){ | if(s->adaptive_quant){ | ||||
s->dquant= s->qscale_table[mb_x + mb_y*s->mb_width] - s->qscale; | |||||
s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale; | |||||
if(s->out_format==FMT_H263){ | if(s->out_format==FMT_H263){ | ||||
if (s->dquant> 2) s->dquant= 2; | if (s->dquant> 2) s->dquant= 2; | ||||
@@ -2206,7 +2222,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
int emu=0; | int emu=0; | ||||
wrap_y = s->linesize; | wrap_y = s->linesize; | ||||
ptr = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16; | |||||
ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; | |||||
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ | if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ | ||||
emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); | emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); | ||||
@@ -2239,14 +2255,14 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
skip_dct[5]= 1; | skip_dct[5]= 1; | ||||
}else{ | }else{ | ||||
int wrap_c = s->uvlinesize; | int wrap_c = s->uvlinesize; | ||||
ptr = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
if(emu){ | if(emu){ | ||||
emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); | emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); | ||||
ptr= s->edge_emu_buffer; | ptr= s->edge_emu_buffer; | ||||
} | } | ||||
s->dsp.get_pixels(s->block[4], ptr, wrap_c); | s->dsp.get_pixels(s->block[4], ptr, wrap_c); | ||||
ptr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
if(emu){ | if(emu){ | ||||
emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); | emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); | ||||
ptr= s->edge_emu_buffer; | ptr= s->edge_emu_buffer; | ||||
@@ -2261,14 +2277,14 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
int wrap_y, wrap_c; | int wrap_y, wrap_c; | ||||
int emu=0; | int emu=0; | ||||
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize ) + mb_x * 16; | |||||
dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; | |||||
dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; | |||||
dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16; | |||||
dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; | |||||
dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; | |||||
wrap_y = s->linesize; | wrap_y = s->linesize; | ||||
wrap_c = s->uvlinesize; | wrap_c = s->uvlinesize; | ||||
ptr_y = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16; | |||||
ptr_cb = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
ptr_cr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; | |||||
ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; | |||||
if ((!s->no_rounding) || s->pict_type==B_TYPE){ | if ((!s->no_rounding) || s->pict_type==B_TYPE){ | ||||
op_pix = s->dsp.put_pixels_tab; | op_pix = s->dsp.put_pixels_tab; | ||||
@@ -2279,12 +2295,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
} | } | ||||
if (s->mv_dir & MV_DIR_FORWARD) { | if (s->mv_dir & MV_DIR_FORWARD) { | ||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix); | |||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); | |||||
op_pix = s->dsp.avg_pixels_tab; | op_pix = s->dsp.avg_pixels_tab; | ||||
op_qpix= s->dsp.avg_qpel_pixels_tab; | op_qpix= s->dsp.avg_qpel_pixels_tab; | ||||
} | } | ||||
if (s->mv_dir & MV_DIR_BACKWARD) { | if (s->mv_dir & MV_DIR_BACKWARD) { | ||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix); | |||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); | |||||
} | } | ||||
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ | if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ | ||||
@@ -2330,9 +2346,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) | |||||
} | } | ||||
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); | s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); | ||||
} | } | ||||
/* pre quantization */ | /* pre quantization */ | ||||
if(s->mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){ | |||||
if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){ | |||||
//FIXME optimize | //FIXME optimize | ||||
if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1; | if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1; | ||||
if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1; | if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1; | ||||
@@ -2557,8 +2572,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
s->block_wrap[5]= s->mb_width + 2; | s->block_wrap[5]= s->mb_width + 2; | ||||
/* Reset the average MB variance */ | /* Reset the average MB variance */ | ||||
s->mb_var_sum = 0; | |||||
s->mc_mb_var_sum = 0; | |||||
s->current_picture.mb_var_sum = 0; | |||||
s->current_picture.mc_mb_var_sum = 0; | |||||
/* we need to initialize some time vars before we can encode b-frames */ | /* we need to initialize some time vars before we can encode b-frames */ | ||||
if (s->h263_pred && !s->h263_msmpeg4) | if (s->h263_pred && !s->h263_msmpeg4) | ||||
@@ -2604,15 +2619,15 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
for(mb_x=0; mb_x < s->mb_width; mb_x++) { | for(mb_x=0; mb_x < s->mb_width; mb_x++) { | ||||
int xx = mb_x * 16; | int xx = mb_x * 16; | ||||
int yy = mb_y * 16; | int yy = mb_y * 16; | ||||
uint8_t *pix = s->new_picture[0] + (yy * s->linesize) + xx; | |||||
uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; | |||||
int varc; | int varc; | ||||
int sum = s->dsp.pix_sum(pix, s->linesize); | int sum = s->dsp.pix_sum(pix, s->linesize); | ||||
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; | varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; | ||||
s->mb_var [s->mb_width * mb_y + mb_x] = varc; | |||||
s->mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8; | |||||
s->mb_var_sum += varc; | |||||
s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc; | |||||
s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8; | |||||
s->current_picture.mb_var_sum += varc; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -2622,13 +2637,9 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
if(s->scene_change_score > 0 && s->pict_type == P_TYPE){ | if(s->scene_change_score > 0 && s->pict_type == P_TYPE){ | ||||
s->pict_type= I_TYPE; | s->pict_type= I_TYPE; | ||||
memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height); | memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height); | ||||
if(s->max_b_frames==0){ | |||||
s->input_pict_type= I_TYPE; | |||||
s->input_picture_in_gop_number=0; | |||||
} | |||||
//printf("Scene change detected, encoding as I Frame %d %d\n", s->mb_var_sum, s->mc_mb_var_sum); | |||||
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); | |||||
} | } | ||||
if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) | if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) | ||||
s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER); | s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER); | ||||
ff_fix_long_p_mvs(s); | ff_fix_long_p_mvs(s); | ||||
@@ -2643,7 +2654,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
} | } | ||||
if (s->fixed_qscale) | if (s->fixed_qscale) | ||||
s->frame_qscale = s->avctx->quality; | |||||
s->frame_qscale = s->current_picture.quality; | |||||
else | else | ||||
s->frame_qscale = ff_rate_estimate_qscale(s); | s->frame_qscale = ff_rate_estimate_qscale(s); | ||||
@@ -2658,7 +2669,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
break; | break; | ||||
} | } | ||||
s->qscale= s->qscale_table[0]; | |||||
s->qscale= s->current_picture.qscale_table[0]; | |||||
}else | }else | ||||
s->qscale= (int)(s->frame_qscale + 0.5); | s->qscale= (int)(s->frame_qscale + 0.5); | ||||
@@ -2673,6 +2684,13 @@ static void encode_picture(MpegEncContext *s, int picture_number) | |||||
convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, | convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, | ||||
s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8); | s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8); | ||||
} | } | ||||
//FIXME var duplication | |||||
s->current_picture.key_frame= s->pict_type == I_TYPE; | |||||
s->current_picture.pict_type= s->pict_type; | |||||
if(s->current_picture.key_frame) | |||||
s->picture_in_gop_number=0; | |||||
s->last_bits= get_bit_count(&s->pb); | s->last_bits= get_bit_count(&s->pb); | ||||
switch(s->out_format) { | switch(s->out_format) { | ||||
@@ -28,6 +28,8 @@ enum OutputFormat { | |||||
FMT_MJPEG, | FMT_MJPEG, | ||||
}; | }; | ||||
#define EDGE_WIDTH 16 | |||||
#define MPEG_BUF_SIZE (16 * 1024) | #define MPEG_BUF_SIZE (16 * 1024) | ||||
#define QMAT_SHIFT_MMX 16 | #define QMAT_SHIFT_MMX 16 | ||||
@@ -35,7 +37,8 @@ enum OutputFormat { | |||||
#define MAX_FCODE 7 | #define MAX_FCODE 7 | ||||
#define MAX_MV 2048 | #define MAX_MV 2048 | ||||
#define REORDER_BUFFER_SIZE (FF_MAX_B_FRAMES+2) | |||||
#define MAX_PICTURE_COUNT 7 | |||||
#define ME_MAP_SIZE 64 | #define ME_MAP_SIZE 64 | ||||
#define ME_MAP_SHIFT 3 | #define ME_MAP_SHIFT 3 | ||||
@@ -90,14 +93,6 @@ typedef struct RateControlContext{ | |||||
int last_non_b_pict_type; | int last_non_b_pict_type; | ||||
}RateControlContext; | }RateControlContext; | ||||
typedef struct ReorderBuffer{ | |||||
UINT8 *picture[3]; | |||||
int pict_type; | |||||
int qscale; | |||||
int force_type; | |||||
int picture_number; | |||||
int picture_in_gop_number; | |||||
} ReorderBuffer; | |||||
typedef struct ScanTable{ | typedef struct ScanTable{ | ||||
const UINT8 *scantable; | const UINT8 *scantable; | ||||
@@ -109,6 +104,16 @@ typedef struct ScanTable{ | |||||
#endif | #endif | ||||
} ScanTable; | } ScanTable; | ||||
typedef struct Picture{ | |||||
FF_COMMON_PICTURE | |||||
int mb_var_sum; /* sum of MB variance for current frame */ | |||||
int mc_mb_var_sum; /* motion compensated MB variance for current frame */ | |||||
uint16_t *mb_var; /* Table for MB variances */ | |||||
uint16_t *mc_mb_var; /* Table for motion compensated MB variances */ | |||||
uint8_t *mb_mean; /* Table for MB luminance */ | |||||
} Picture; | |||||
typedef struct ParseContext{ | typedef struct ParseContext{ | ||||
UINT8 *buffer; | UINT8 *buffer; | ||||
int index; | int index; | ||||
@@ -145,7 +150,6 @@ typedef struct MpegEncContext { | |||||
int max_qdiff; /* max qscale difference between frames */ | int max_qdiff; /* max qscale difference between frames */ | ||||
int encoding; /* true if we are encoding (vs decoding) */ | int encoding; /* true if we are encoding (vs decoding) */ | ||||
int flags; /* AVCodecContext.flags (HQ, MV4, ...) */ | int flags; /* AVCodecContext.flags (HQ, MV4, ...) */ | ||||
int force_input_type;/* 0= no force, otherwise I_TYPE, P_TYPE, ... */ | |||||
int max_b_frames; /* max number of b-frames for encoding */ | int max_b_frames; /* max number of b-frames for encoding */ | ||||
int b_frame_strategy; | int b_frame_strategy; | ||||
int luma_elim_threshold; | int luma_elim_threshold; | ||||
@@ -160,10 +164,7 @@ typedef struct MpegEncContext { | |||||
/* sequence parameters */ | /* sequence parameters */ | ||||
int context_initialized; | int context_initialized; | ||||
int input_picture_number; | int input_picture_number; | ||||
int input_picture_in_gop_number; /* 0-> first pic in gop, ... */ | |||||
int picture_number; | int picture_number; | ||||
int fake_picture_number; /* picture number at the bitstream frame rate */ | |||||
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */ | |||||
int picture_in_gop_number; /* 0-> first pic in gop, ... */ | int picture_in_gop_number; /* 0-> first pic in gop, ... */ | ||||
int b_frames_since_non_b; /* used for encoding, relative to not yet reordered input */ | int b_frames_since_non_b; /* used for encoding, relative to not yet reordered input */ | ||||
int mb_width, mb_height; /* number of MBs horizontally & vertically */ | int mb_width, mb_height; /* number of MBs horizontally & vertically */ | ||||
@@ -171,20 +172,13 @@ typedef struct MpegEncContext { | |||||
int mb_num; /* number of MBs of a picture */ | int mb_num; /* number of MBs of a picture */ | ||||
int linesize; /* line size, in bytes, may be different from width */ | int linesize; /* line size, in bytes, may be different from width */ | ||||
int uvlinesize; /* line size, for chroma in bytes, may be different from width */ | int uvlinesize; /* line size, for chroma in bytes, may be different from width */ | ||||
UINT8 *new_picture[3]; /* picture to be compressed */ | |||||
UINT8 *picture_buffer[REORDER_BUFFER_SIZE][3]; /* internal buffers used for reordering of input pictures */ | |||||
int picture_buffer_index; | |||||
ReorderBuffer coded_order[REORDER_BUFFER_SIZE]; | |||||
UINT8 *last_picture[3]; /* previous picture */ | |||||
UINT8 *last_picture_base[3]; /* real start of the picture */ | |||||
UINT8 *next_picture[3]; /* previous picture (for bidir pred) */ | |||||
UINT8 *next_picture_base[3]; /* real start of the picture */ | |||||
UINT8 *aux_picture[3]; /* aux picture (for B frames only) */ | |||||
UINT8 *aux_picture_base[3]; /* real start of the picture */ | |||||
UINT8 *current_picture[3]; /* buffer to store the decompressed current picture */ | |||||
void *last_dr_opaque; | |||||
void *next_dr_opaque; | |||||
int ip_buffer_count; /* number of buffers, currently only >2 if dr1 is used */ | |||||
Picture picture[MAX_PICTURE_COUNT]; /* main picture buffer */ | |||||
Picture *input_picture[MAX_PICTURE_COUNT]; /* next pictures on display order for encoding*/ | |||||
Picture *reordered_input_picture[MAX_PICTURE_COUNT]; /* pointer to the next pictures in codedorder for encoding*/ | |||||
Picture last_picture; /* previous picture */ | |||||
Picture next_picture; /* previous picture (for bidir pred) */ | |||||
Picture new_picture; /* source picture for encoding */ | |||||
Picture current_picture; /* buffer to store the decompressed current picture */ | |||||
int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */ | int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */ | ||||
int last_dc[3]; /* last DC values for MPEG1 */ | int last_dc[3]; /* last DC values for MPEG1 */ | ||||
INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */ | INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */ | ||||
@@ -200,17 +194,10 @@ typedef struct MpegEncContext { | |||||
UINT8 *mbintra_table; /* used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding */ | UINT8 *mbintra_table; /* used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding */ | ||||
UINT8 *cbp_table; /* used to store cbp, ac_pred for partitioned decoding */ | UINT8 *cbp_table; /* used to store cbp, ac_pred for partitioned decoding */ | ||||
UINT8 *pred_dir_table; /* used to store pred_dir for partitioned decoding */ | UINT8 *pred_dir_table; /* used to store pred_dir for partitioned decoding */ | ||||
INT8 *qscale_table; /* used to store qscale */ | |||||
INT8 *aux_qscale_table; | |||||
INT8 *next_qscale_table; | |||||
INT8 *last_qscale_table; //FIXME move these into some picture struct (MpegEncContext.aux.qscale_table[]) | |||||
UINT8 *edge_emu_buffer; | UINT8 *edge_emu_buffer; | ||||
int input_qscale; /* qscale prior to reordering of frames */ | |||||
int input_pict_type; /* pict_type prior to reordering of frames */ | |||||
int force_type; /* 0= no force, otherwise I_TYPE, P_TYPE, ... */ | |||||
int qscale; /* QP */ | int qscale; /* QP */ | ||||
float frame_qscale; /* qscale from the frame level rc */ | |||||
float frame_qscale; /* qscale from the frame level rc FIXME remove*/ | |||||
int adaptive_quant; /* use adaptive quantization */ | int adaptive_quant; /* use adaptive quantization */ | ||||
int dquant; /* qscale difference to prev qscale */ | int dquant; /* qscale difference to prev qscale */ | ||||
int pict_type; /* I_TYPE, P_TYPE, B_TYPE, ... */ | int pict_type; /* I_TYPE, P_TYPE, B_TYPE, ... */ | ||||
@@ -272,9 +259,6 @@ typedef struct MpegEncContext { | |||||
int mb_x, mb_y; | int mb_x, mb_y; | ||||
int mb_incr; | int mb_incr; | ||||
int mb_intra; | int mb_intra; | ||||
UINT16 *mb_var; /* Table for MB variances */ | |||||
UINT16 *mc_mb_var; /* Table for motion compensated MB variances */ | |||||
UINT8 *mb_mean; /* Table for MB luminance */ | |||||
UINT8 *mb_type; /* Table for MB type */ | UINT8 *mb_type; /* Table for MB type */ | ||||
#define MB_TYPE_INTRA 0x01 | #define MB_TYPE_INTRA 0x01 | ||||
#define MB_TYPE_INTER 0x02 | #define MB_TYPE_INTER 0x02 | ||||
@@ -325,8 +309,6 @@ typedef struct MpegEncContext { | |||||
/* bit rate control */ | /* bit rate control */ | ||||
int I_frame_bits; //FIXME used in mpeg12 ... | int I_frame_bits; //FIXME used in mpeg12 ... | ||||
int mb_var_sum; /* sum of MB variance for current frame */ | |||||
int mc_mb_var_sum; /* motion compensated MB variance for current frame */ | |||||
INT64 wanted_bits; | INT64 wanted_bits; | ||||
INT64 total_bits; | INT64 total_bits; | ||||
int frame_bits; /* bits used for the current frame */ | int frame_bits; /* bits used for the current frame */ | ||||
@@ -476,6 +458,10 @@ typedef struct MpegEncContext { | |||||
/* decompression specific */ | /* decompression specific */ | ||||
GetBitContext gb; | GetBitContext gb; | ||||
/* Mpeg1 specific */ | |||||
int fake_picture_number; /* picture number at the bitstream frame rate */ | |||||
int gop_picture_number; /* index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */ | |||||
/* MPEG2 specific - I wish I had not to support this mess. */ | /* MPEG2 specific - I wish I had not to support this mess. */ | ||||
int progressive_sequence; | int progressive_sequence; | ||||
int mpeg_f_code[2][2]; | int mpeg_f_code[2][2]; | ||||
@@ -498,7 +484,6 @@ typedef struct MpegEncContext { | |||||
int mpeg2; | int mpeg2; | ||||
int full_pel[2]; | int full_pel[2]; | ||||
int interlaced_dct; | int interlaced_dct; | ||||
int last_qscale; | |||||
int first_slice; | int first_slice; | ||||
/* RTP specific */ | /* RTP specific */ | ||||
@@ -759,10 +759,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n, | |||||
}else{ | }else{ | ||||
if(n<4){ | if(n<4){ | ||||
wrap= s->linesize; | wrap= s->linesize; | ||||
dest= s->current_picture[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8; | |||||
dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8; | |||||
}else{ | }else{ | ||||
wrap= s->uvlinesize; | wrap= s->uvlinesize; | ||||
dest= s->current_picture[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; | |||||
dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8; | |||||
} | } | ||||
if(s->mb_x==0) a= (1024 + (scale>>1))/scale; | if(s->mb_x==0) a= (1024 + (scale>>1))/scale; | ||||
else a= get_dc(dest-8, wrap, scale*8); | else a= get_dc(dest-8, wrap, scale*8); | ||||
@@ -41,7 +41,7 @@ void ff_write_pass1_stats(MpegEncContext *s){ | |||||
sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n", | sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n", | ||||
s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type, | s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type, | ||||
s->frame_qscale, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, | s->frame_qscale, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, | ||||
s->f_code, s->b_code, s->mc_mb_var_sum, s->mb_var_sum, s->i_count); | |||||
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count); | |||||
} | } | ||||
int ff_rate_control_init(MpegEncContext *s) | int ff_rate_control_init(MpegEncContext *s) | ||||
@@ -475,11 +475,12 @@ static void adaptive_quantization(MpegEncContext *s, double q){ | |||||
float bits_tab[s->mb_num]; | float bits_tab[s->mb_num]; | ||||
const int qmin= 2; //s->avctx->mb_qmin; | const int qmin= 2; //s->avctx->mb_qmin; | ||||
const int qmax= 31; //s->avctx->mb_qmax; | const int qmax= 31; //s->avctx->mb_qmax; | ||||
Picture * const pic= &s->current_picture; | |||||
for(i=0; i<s->mb_num; i++){ | for(i=0; i<s->mb_num; i++){ | ||||
float temp_cplx= sqrt(s->mc_mb_var[i]); | |||||
float spat_cplx= sqrt(s->mb_var[i]); | |||||
const int lumi= s->mb_mean[i]; | |||||
float temp_cplx= sqrt(pic->mc_mb_var[i]); | |||||
float spat_cplx= sqrt(pic->mb_var[i]); | |||||
const int lumi= pic->mb_mean[i]; | |||||
float bits, cplx, factor; | float bits, cplx, factor; | ||||
if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune | if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune | ||||
@@ -533,8 +534,8 @@ static void adaptive_quantization(MpegEncContext *s, double q){ | |||||
newq*= bits_sum/cplx_sum; | newq*= bits_sum/cplx_sum; | ||||
} | } | ||||
if(i && ABS(s->qscale_table[i-1] - newq)<0.75) | |||||
intq= s->qscale_table[i-1]; | |||||
if(i && ABS(pic->qscale_table[i-1] - newq)<0.75) | |||||
intq= pic->qscale_table[i-1]; | |||||
else | else | ||||
intq= (int)(newq + 0.5); | intq= (int)(newq + 0.5); | ||||
@@ -542,7 +543,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){ | |||||
else if(intq < qmin) intq= qmin; | else if(intq < qmin) intq= qmin; | ||||
//if(i%s->mb_width==0) printf("\n"); | //if(i%s->mb_width==0) printf("\n"); | ||||
//printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i])); | //printf("%2d%3d ", intq, ff_sqrt(s->mc_mb_var[i])); | ||||
s->qscale_table[i]= intq; | |||||
pic->qscale_table[i]= intq; | |||||
} | } | ||||
} | } | ||||
@@ -562,6 +563,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) | |||||
double rate_factor; | double rate_factor; | ||||
int var; | int var; | ||||
const int pict_type= s->pict_type; | const int pict_type= s->pict_type; | ||||
Picture * const pic= &s->current_picture; | |||||
emms_c(); | emms_c(); | ||||
get_qminmax(&qmin, &qmax, s, pict_type); | get_qminmax(&qmin, &qmax, s, pict_type); | ||||
@@ -588,7 +590,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) | |||||
br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance; | br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance; | ||||
if(br_compensation<=0.0) br_compensation=0.001; | if(br_compensation<=0.0) br_compensation=0.001; | ||||
var= pict_type == I_TYPE ? s->mb_var_sum : s->mc_mb_var_sum; | |||||
var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum; | |||||
if(s->flags&CODEC_FLAG_PASS2){ | if(s->flags&CODEC_FLAG_PASS2){ | ||||
if(pict_type!=I_TYPE) | if(pict_type!=I_TYPE) | ||||
@@ -599,8 +601,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s) | |||||
}else{ | }else{ | ||||
rce->pict_type= | rce->pict_type= | ||||
rce->new_pict_type= pict_type; | rce->new_pict_type= pict_type; | ||||
rce->mc_mb_var_sum= s->mc_mb_var_sum; | |||||
rce->mb_var_sum = s-> mb_var_sum; | |||||
rce->mc_mb_var_sum= pic->mc_mb_var_sum; | |||||
rce->mb_var_sum = pic-> mb_var_sum; | |||||
rce->qscale = 2; | rce->qscale = 2; | ||||
rce->f_code = s->f_code; | rce->f_code = s->f_code; | ||||
rce->b_code = s->b_code; | rce->b_code = s->b_code; | ||||
@@ -663,10 +665,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s) | |||||
else if(q>qmax) q=qmax; | else if(q>qmax) q=qmax; | ||||
// printf("%f %d %d %d\n", q, picture_number, (int)wanted_bits, (int)s->total_bits); | // printf("%f %d %d %d\n", q, picture_number, (int)wanted_bits, (int)s->total_bits); | ||||
//printf("%f %f %f\n", q, br_compensation, short_term_q); | |||||
//printf("q:%d diff:%d comp:%f st_q:%f last_size:%d type:%d\n", qscale, (int)diff, br_compensation, | |||||
//printf("diff:%d comp:%f st_q:%f last_size:%d type:%d\n", (int)diff, br_compensation, | |||||
// short_term_q, s->frame_bits, pict_type); | // short_term_q, s->frame_bits, pict_type); | ||||
//printf("%d %d\n", s->bit_rate, (int)fps); | //printf("%d %d\n", s->bit_rate, (int)fps); | ||||
@@ -676,8 +676,16 @@ float ff_rate_estimate_qscale(MpegEncContext *s) | |||||
q= (int)(q + 0.5); | q= (int)(q + 0.5); | ||||
rcc->last_qscale= q; | rcc->last_qscale= q; | ||||
rcc->last_mc_mb_var_sum= s->mc_mb_var_sum; | |||||
rcc->last_mb_var_sum= s->mb_var_sum; | |||||
rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum; | |||||
rcc->last_mb_var_sum= pic->mb_var_sum; | |||||
#if 0 | |||||
{ | |||||
static int mvsum=0, texsum=0; | |||||
mvsum += s->mv_bits; | |||||
texsum += s->i_tex_bits + s->p_tex_bits; | |||||
printf("%d %d//\n\n", mvsum, texsum); | |||||
} | |||||
#endif | |||||
return q; | return q; | ||||
} | } | ||||
@@ -472,7 +472,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, | |||||
{ | { | ||||
MpegEncContext *s = avctx->priv_data; | MpegEncContext *s = avctx->priv_data; | ||||
int i; | int i; | ||||
AVPicture *pict = data; | |||||
AVVideoFrame *pict = data; | |||||
#ifdef DEBUG | #ifdef DEBUG | ||||
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); | printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); | ||||
@@ -505,15 +505,9 @@ static int rv10_decode_frame(AVCodecContext *avctx, | |||||
if(s->mb_y>=s->mb_height){ | if(s->mb_y>=s->mb_height){ | ||||
MPV_frame_end(s); | MPV_frame_end(s); | ||||
pict->data[0] = s->current_picture[0]; | |||||
pict->data[1] = s->current_picture[1]; | |||||
pict->data[2] = s->current_picture[2]; | |||||
pict->linesize[0] = s->linesize; | |||||
pict->linesize[1] = s->uvlinesize; | |||||
pict->linesize[2] = s->uvlinesize; | |||||
*pict= *(AVVideoFrame*)&s->current_picture; | |||||
avctx->quality = s->qscale; | |||||
*data_size = sizeof(AVPicture); | |||||
*data_size = sizeof(AVVideoFrame); | |||||
}else{ | }else{ | ||||
*data_size = 0; | *data_size = 0; | ||||
} | } | ||||
@@ -1063,7 +1063,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
MpegEncContext *s=avctx->priv_data; | MpegEncContext *s=avctx->priv_data; | ||||
uint8_t *current, *previous; | uint8_t *current, *previous; | ||||
int result, i, x, y, width, height; | int result, i, x, y, width, height; | ||||
AVPicture *pict = data; | |||||
AVVideoFrame *pict = data; | |||||
/* initialize bit buffer */ | /* initialize bit buffer */ | ||||
init_get_bits(&s->gb,buf,buf_size); | init_get_bits(&s->gb,buf,buf_size); | ||||
@@ -1084,9 +1084,6 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
} | } | ||||
result = svq1_decode_frame_header (&s->gb, s); | result = svq1_decode_frame_header (&s->gb, s); | ||||
if(MPV_frame_start(s, avctx) < 0) | |||||
return -1; | |||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
@@ -1098,6 +1095,9 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size; | if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size; | ||||
if(MPV_frame_start(s, avctx) < 0) | |||||
return -1; | |||||
/* decode y, u and v components */ | /* decode y, u and v components */ | ||||
for (i=0; i < 3; i++) { | for (i=0; i < 3; i++) { | ||||
int linesize; | int linesize; | ||||
@@ -1112,12 +1112,12 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
linesize= s->uvlinesize; | linesize= s->uvlinesize; | ||||
} | } | ||||
current = s->current_picture[i]; | |||||
current = s->current_picture.data[i]; | |||||
if(s->pict_type==B_TYPE){ | if(s->pict_type==B_TYPE){ | ||||
previous = s->next_picture[i]; | |||||
previous = s->next_picture.data[i]; | |||||
}else{ | }else{ | ||||
previous = s->last_picture[i]; | |||||
previous = s->last_picture.data[i]; | |||||
} | } | ||||
if (s->pict_type == I_TYPE) { | if (s->pict_type == I_TYPE) { | ||||
@@ -1159,12 +1159,14 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
current += 16*linesize; | current += 16*linesize; | ||||
} | } | ||||
} | } | ||||
pict->data[i] = s->current_picture[i]; | |||||
pict->linesize[i] = linesize; | |||||
} | } | ||||
*pict = *(AVVideoFrame*)&s->current_picture; | |||||
*data_size=sizeof(AVPicture); | |||||
MPV_frame_end(s); | |||||
*data_size=sizeof(AVVideoFrame); | |||||
return buf_size; | return buf_size; | ||||
} | } | ||||
@@ -1176,7 +1178,6 @@ static int svq1_decode_init(AVCodecContext *avctx) | |||||
s->width = (avctx->width+3)&~3; | s->width = (avctx->width+3)&~3; | ||||
s->height = (avctx->height+3)&~3; | s->height = (avctx->height+3)&~3; | ||||
s->codec_id= avctx->codec->id; | s->codec_id= avctx->codec->id; | ||||
avctx->mbskip_table= s->mbskip_table; | |||||
avctx->pix_fmt = PIX_FMT_YUV410P; | avctx->pix_fmt = PIX_FMT_YUV410P; | ||||
avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames | avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames | ||||
s->flags= avctx->flags; | s->flags= avctx->flags; | ||||
@@ -86,6 +86,123 @@ void register_avcodec(AVCodec *format) | |||||
format->next = NULL; | format->next = NULL; | ||||
} | } | ||||
void avcodec_get_chroma_sub_sample(int fmt, int *h_shift, int *v_shift){ | |||||
switch(fmt){ | |||||
case PIX_FMT_YUV410P: | |||||
*h_shift=2; | |||||
*v_shift=2; | |||||
break; | |||||
case PIX_FMT_YUV420P: | |||||
*h_shift=1; | |||||
*v_shift=1; | |||||
break; | |||||
case PIX_FMT_YUV411P: | |||||
*h_shift=2; | |||||
*v_shift=0; | |||||
break; | |||||
case PIX_FMT_YUV422P: | |||||
case PIX_FMT_YUV422: | |||||
*h_shift=1; | |||||
*v_shift=0; | |||||
break; | |||||
default: //RGB/... | |||||
*h_shift=0; | |||||
*v_shift=0; | |||||
break; | |||||
} | |||||
} | |||||
typedef struct DefaultPicOpaque{ | |||||
int last_pic_num; | |||||
uint8_t *data[4]; | |||||
}DefaultPicOpaque; | |||||
int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){ | |||||
int i; | |||||
const int width = s->width; | |||||
const int height= s->height; | |||||
DefaultPicOpaque *opaque; | |||||
if(pic->opaque){ | |||||
opaque= (DefaultPicOpaque *)pic->opaque; | |||||
for(i=0; i<3; i++) | |||||
pic->data[i]= opaque->data[i]; | |||||
// printf("get_buffer %X coded_pic_num:%d last:%d\n", pic->opaque, pic->coded_picture_number, opaque->last_pic_num); | |||||
pic->age= pic->coded_picture_number - opaque->last_pic_num; | |||||
opaque->last_pic_num= pic->coded_picture_number; | |||||
//printf("age: %d %d %d\n", pic->age, c->picture_number, pic->coded_picture_number); | |||||
}else{ | |||||
int align, h_chroma_shift, v_chroma_shift; | |||||
int w, h, pixel_size; | |||||
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); | |||||
switch(s->pix_fmt){ | |||||
case PIX_FMT_YUV422: | |||||
pixel_size=2; | |||||
break; | |||||
case PIX_FMT_RGB24: | |||||
case PIX_FMT_BGR24: | |||||
pixel_size=3; | |||||
break; | |||||
case PIX_FMT_BGRA32: | |||||
case PIX_FMT_RGBA32: | |||||
pixel_size=4; | |||||
break; | |||||
default: | |||||
pixel_size=1; | |||||
} | |||||
if(s->codec_id==CODEC_ID_SVQ1) align=63; | |||||
else align=15; | |||||
w= (width +align)&~align; | |||||
h= (height+align)&~align; | |||||
if(!(s->flags&CODEC_FLAG_EMU_EDGE)){ | |||||
w+= EDGE_WIDTH*2; | |||||
h+= EDGE_WIDTH*2; | |||||
} | |||||
opaque= av_mallocz(sizeof(DefaultPicOpaque)); | |||||
if(opaque==NULL) return -1; | |||||
pic->opaque= opaque; | |||||
opaque->last_pic_num= -256*256*256*64; | |||||
for(i=0; i<3; i++){ | |||||
int h_shift= i==0 ? 0 : h_chroma_shift; | |||||
int v_shift= i==0 ? 0 : v_chroma_shift; | |||||
pic->linesize[i]= pixel_size*w>>h_shift; | |||||
pic->base[i]= av_mallocz((pic->linesize[i]*h>>v_shift)+16); //FIXME 16 | |||||
if(pic->base[i]==NULL) return -1; | |||||
memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift); | |||||
if(s->flags&CODEC_FLAG_EMU_EDGE) | |||||
pic->data[i] = pic->base[i]; | |||||
else | |||||
pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift); | |||||
opaque->data[i]= pic->data[i]; | |||||
} | |||||
pic->age= 256*256*256*64; | |||||
} | |||||
return 0; | |||||
} | |||||
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){ | |||||
int i; | |||||
for(i=0; i<3; i++) | |||||
pic->data[i]=NULL; | |||||
//printf("R%X\n", pic->opaque); | |||||
} | |||||
void avcodec_get_context_defaults(AVCodecContext *s){ | void avcodec_get_context_defaults(AVCodecContext *s){ | ||||
s->bit_rate= 800*1000; | s->bit_rate= 800*1000; | ||||
s->bit_rate_tolerance= s->bit_rate*10; | s->bit_rate_tolerance= s->bit_rate*10; | ||||
@@ -104,6 +221,8 @@ void avcodec_get_context_defaults(AVCodecContext *s){ | |||||
s->frame_rate = 25 * FRAME_RATE_BASE; | s->frame_rate = 25 * FRAME_RATE_BASE; | ||||
s->gop_size= 50; | s->gop_size= 50; | ||||
s->me_method= ME_EPZS; | s->me_method= ME_EPZS; | ||||
s->get_buffer= avcodec_default_get_buffer; | |||||
s->release_buffer= avcodec_default_release_buffer; | |||||
} | } | ||||
/** | /** | ||||
@@ -120,6 +239,16 @@ AVCodecContext *avcodec_alloc_context(void){ | |||||
return avctx; | return avctx; | ||||
} | } | ||||
/** | |||||
* allocates a AVPicture and set it to defaults. | |||||
* this can be deallocated by simply calling free() | |||||
*/ | |||||
AVVideoFrame *avcodec_alloc_picture(void){ | |||||
AVVideoFrame *pic= av_mallocz(sizeof(AVVideoFrame)); | |||||
return pic; | |||||
} | |||||
int avcodec_open(AVCodecContext *avctx, AVCodec *codec) | int avcodec_open(AVCodecContext *avctx, AVCodec *codec) | ||||
{ | { | ||||
int ret; | int ret; | ||||
@@ -152,7 +281,7 @@ int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size, | |||||
} | } | ||||
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, | int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, | ||||
const AVPicture *pict) | |||||
const AVVideoFrame *pict) | |||||
{ | { | ||||
int ret; | int ret; | ||||
@@ -167,17 +296,17 @@ int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size, | |||||
/* decode a frame. return -1 if error, otherwise return the number of | /* decode a frame. return -1 if error, otherwise return the number of | ||||
bytes used. If no frame could be decompressed, *got_picture_ptr is | bytes used. If no frame could be decompressed, *got_picture_ptr is | ||||
zero. Otherwise, it is non zero */ | zero. Otherwise, it is non zero */ | ||||
int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture, | |||||
int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture, | |||||
int *got_picture_ptr, | int *got_picture_ptr, | ||||
UINT8 *buf, int buf_size) | UINT8 *buf, int buf_size) | ||||
{ | { | ||||
int ret; | int ret; | ||||
ret = avctx->codec->decode(avctx, picture, got_picture_ptr, | ret = avctx->codec->decode(avctx, picture, got_picture_ptr, | ||||
buf, buf_size); | buf, buf_size); | ||||
emms_c(); //needed to avoid a emms_c() call before every return; | emms_c(); //needed to avoid a emms_c() call before every return; | ||||
if (*got_picture_ptr) | if (*got_picture_ptr) | ||||
avctx->frame_number++; | avctx->frame_number++; | ||||
return ret; | return ret; | ||||
@@ -556,7 +556,7 @@ static void put_frame_header(AVFormatContext *s, ASFStream *stream, int timestam | |||||
int val; | int val; | ||||
val = stream->num; | val = stream->num; | ||||
if (s->streams[val - 1]->codec.key_frame /* && frag_offset == 0 */) | |||||
if (s->streams[val - 1]->codec.coded_picture->key_frame /* && frag_offset == 0 */) | |||||
val |= 0x80; | val |= 0x80; | ||||
put_byte(pb, val); | put_byte(pb, val); | ||||
put_byte(pb, stream->seq); | put_byte(pb, stream->seq); | ||||
@@ -793,6 +793,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st = av_mallocz(sizeof(AVStream)); | st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
goto fail; | goto fail; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s->streams[s->nb_streams] = st; | s->streams[s->nb_streams] = st; | ||||
asf_st = av_mallocz(sizeof(ASFStream)); | asf_st = av_mallocz(sizeof(ASFStream)); | ||||
if (!asf_st) | if (!asf_st) | ||||
@@ -143,6 +143,8 @@ static int au_read_header(AVFormatContext *s, | |||||
st = av_malloc(sizeof(AVStream)); | st = av_malloc(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
return -1; | return -1; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s->nb_streams = 1; | s->nb_streams = 1; | ||||
s->streams[0] = st; | s->streams[0] = st; | ||||
@@ -144,6 +144,9 @@ typedef struct AVStream { | |||||
AVFrac pts; | AVFrac pts; | ||||
/* ffmpeg.c private use */ | /* ffmpeg.c private use */ | ||||
int stream_copy; /* if TRUE, just copy stream */ | int stream_copy; /* if TRUE, just copy stream */ | ||||
/* quality, as it has been removed from AVCodecContext and put in AVVideoFrame | |||||
* MN:dunno if thats the right place, for it */ | |||||
float quality; | |||||
} AVStream; | } AVStream; | ||||
#define MAX_STREAMS 20 | #define MAX_STREAMS 20 | ||||
@@ -103,6 +103,8 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
AVStream *st = av_mallocz(sizeof(AVStream)); | AVStream *st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
goto fail; | goto fail; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s->streams[i] = st; | s->streams[i] = st; | ||||
} | } | ||||
url_fskip(pb, size - 7 * 4); | url_fskip(pb, size - 7 * 4); | ||||
@@ -320,7 +320,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index, | |||||
if (enc->codec_type == CODEC_TYPE_VIDEO) { | if (enc->codec_type == CODEC_TYPE_VIDEO) { | ||||
tag[2] = 'd'; | tag[2] = 'd'; | ||||
tag[3] = 'c'; | tag[3] = 'c'; | ||||
flags = enc->key_frame ? 0x10 : 0x00; | |||||
flags = enc->coded_picture->key_frame ? 0x10 : 0x00; | |||||
} else { | } else { | ||||
tag[2] = 'w'; | tag[2] = 'w'; | ||||
tag[3] = 'b'; | tag[3] = 'b'; | ||||
@@ -151,7 +151,7 @@ static int ffm_write_header(AVFormatContext *s) | |||||
put_be32(pb, codec->codec_id); | put_be32(pb, codec->codec_id); | ||||
put_byte(pb, codec->codec_type); | put_byte(pb, codec->codec_type); | ||||
put_be32(pb, codec->bit_rate); | put_be32(pb, codec->bit_rate); | ||||
put_be32(pb, codec->quality); | |||||
put_be32(pb, st->quality); | |||||
put_be32(pb, codec->flags); | put_be32(pb, codec->flags); | ||||
/* specific info */ | /* specific info */ | ||||
switch(codec->codec_type) { | switch(codec->codec_type) { | ||||
@@ -232,7 +232,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index, | |||||
/* packet size & key_frame */ | /* packet size & key_frame */ | ||||
header[0] = stream_index; | header[0] = stream_index; | ||||
header[1] = 0; | header[1] = 0; | ||||
if (st->codec.key_frame) | |||||
if (st->codec.coded_picture->key_frame) | |||||
header[1] |= FLAG_KEY_FRAME; | header[1] |= FLAG_KEY_FRAME; | ||||
header[2] = (size >> 16) & 0xff; | header[2] = (size >> 16) & 0xff; | ||||
header[3] = (size >> 8) & 0xff; | header[3] = (size >> 8) & 0xff; | ||||
@@ -394,6 +394,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st = av_mallocz(sizeof(AVStream)); | st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
goto fail; | goto fail; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s->streams[i] = st; | s->streams[i] = st; | ||||
fst = av_mallocz(sizeof(FFMStream)); | fst = av_mallocz(sizeof(FFMStream)); | ||||
if (!fst) | if (!fst) | ||||
@@ -405,7 +406,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st->codec.codec_id = get_be32(pb); | st->codec.codec_id = get_be32(pb); | ||||
st->codec.codec_type = get_byte(pb); /* codec_type */ | st->codec.codec_type = get_byte(pb); /* codec_type */ | ||||
codec->bit_rate = get_be32(pb); | codec->bit_rate = get_be32(pb); | ||||
codec->quality = get_be32(pb); | |||||
st->quality = get_be32(pb); | |||||
codec->flags = get_be32(pb); | codec->flags = get_be32(pb); | ||||
/* specific info */ | /* specific info */ | ||||
switch(codec->codec_type) { | switch(codec->codec_type) { | ||||
@@ -170,6 +170,8 @@ static int jpeg_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
av_free(s); | av_free(s); | ||||
return -ENOMEM; | return -ENOMEM; | ||||
} | } | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s1->streams[0] = st; | s1->streams[0] = st; | ||||
s->img_number = 0; | s->img_number = 0; | ||||
@@ -352,7 +352,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size) | |||||
RMContext *rm = s->priv_data; | RMContext *rm = s->priv_data; | ||||
ByteIOContext *pb = &s->pb; | ByteIOContext *pb = &s->pb; | ||||
StreamInfo *stream = rm->video_stream; | StreamInfo *stream = rm->video_stream; | ||||
int key_frame = stream->enc->key_frame; | |||||
int key_frame = stream->enc->coded_picture->key_frame; | |||||
/* XXX: this is incorrect: should be a parameter */ | /* XXX: this is incorrect: should be a parameter */ | ||||
@@ -527,6 +527,7 @@ static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st = av_mallocz(sizeof(AVStream)); | st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
goto fail; | goto fail; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
s->streams[s->nb_streams++] = st; | s->streams[s->nb_streams++] = st; | ||||
st->id = get_be16(pb); | st->id = get_be16(pb); | ||||
get_be32(pb); /* max bit rate */ | get_be32(pb); /* max bit rate */ | ||||
@@ -482,6 +482,8 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st = av_mallocz(sizeof(AVStream)); | st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
return -ENOMEM; | return -ENOMEM; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
if (v & 0x01) | if (v & 0x01) | ||||
st->codec.channels = 2; | st->codec.channels = 2; | ||||
else | else | ||||
@@ -458,7 +458,7 @@ int av_find_stream_info(AVFormatContext *ic) | |||||
AVCodec *codec; | AVCodec *codec; | ||||
AVStream *st; | AVStream *st; | ||||
AVPacket *pkt; | AVPacket *pkt; | ||||
AVPicture picture; | |||||
AVVideoFrame picture; | |||||
AVPacketList *pktl=NULL, **ppktl; | AVPacketList *pktl=NULL, **ppktl; | ||||
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]; | short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]; | ||||
UINT8 *ptr; | UINT8 *ptr; | ||||
@@ -694,6 +694,8 @@ AVStream *av_new_stream(AVFormatContext *s, int id) | |||||
st = av_mallocz(sizeof(AVStream)); | st = av_mallocz(sizeof(AVStream)); | ||||
if (!st) | if (!st) | ||||
return NULL; | return NULL; | ||||
avcodec_get_context_defaults(&st->codec); | |||||
st->index = s->nb_streams; | st->index = s->nb_streams; | ||||
st->id = id; | st->id = id; | ||||
s->streams[s->nb_streams++] = st; | s->streams[s->nb_streams++] = st; | ||||