(and fix) stream copying. By now force_pts it's just honoured by the MPEG muxer. ASF could honour this also, but it should be fixed to use Tickers first. - MPEG audio decoder exports it's frame size in bytes. - Hope this fix the floating point exception found in ffserver. Originally committed as revision 382 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
@@ -274,7 +274,7 @@ static void do_audio_out(AVFormatContext *s, | |||||
&ost->fifo.rptr) == 0) { | &ost->fifo.rptr) == 0) { | ||||
ret = avcodec_encode_audio(enc, audio_out, sizeof(audio_out), | ret = avcodec_encode_audio(enc, audio_out, sizeof(audio_out), | ||||
(short *)audio_buf); | (short *)audio_buf); | ||||
s->format->write_packet(s, ost->index, audio_out, ret); | |||||
s->format->write_packet(s, ost->index, audio_out, ret, 0); | |||||
} | } | ||||
} else { | } else { | ||||
/* output a pcm frame */ | /* output a pcm frame */ | ||||
@@ -291,7 +291,7 @@ static void do_audio_out(AVFormatContext *s, | |||||
} | } | ||||
ret = avcodec_encode_audio(enc, audio_out, size_out, | ret = avcodec_encode_audio(enc, audio_out, size_out, | ||||
(short *)buftmp); | (short *)buftmp); | ||||
s->format->write_packet(s, ost->index, audio_out, ret); | |||||
s->format->write_packet(s, ost->index, audio_out, ret, 0); | |||||
} | } | ||||
} | } | ||||
@@ -387,7 +387,7 @@ static void write_picture(AVFormatContext *s, int index, AVPicture *picture, | |||||
default: | default: | ||||
return; | return; | ||||
} | } | ||||
s->format->write_packet(s, index, buf, size); | |||||
s->format->write_packet(s, index, buf, size, 0); | |||||
free(buf); | free(buf); | ||||
} | } | ||||
@@ -484,7 +484,7 @@ static void do_video_out(AVFormatContext *s, | |||||
ret = avcodec_encode_video(enc, | ret = avcodec_encode_video(enc, | ||||
video_buffer, sizeof(video_buffer), | video_buffer, sizeof(video_buffer), | ||||
picture); | picture); | ||||
s->format->write_packet(s, ost->index, video_buffer, ret); | |||||
s->format->write_packet(s, ost->index, video_buffer, ret, 0); | |||||
*frame_size = ret; | *frame_size = ret; | ||||
} else { | } else { | ||||
write_picture(s, ost->index, picture, enc->pix_fmt, enc->width, enc->height); | write_picture(s, ost->index, picture, enc->pix_fmt, enc->width, enc->height); | ||||
@@ -728,6 +728,11 @@ static int av_encode(AVFormatContext **output_files, | |||||
codec->sample_rate == icodec->sample_rate && | codec->sample_rate == icodec->sample_rate && | ||||
codec->channels == icodec->channels) { | codec->channels == icodec->channels) { | ||||
/* no reencoding */ | /* no reencoding */ | ||||
/* use the same frame size */ | |||||
codec->frame_size = icodec->frame_size; | |||||
//codec->frame_size = 8*icodec->sample_rate*icodec->frame_size/ | |||||
// icodec->bit_rate; | |||||
//fprintf(stderr,"\nFrame size: %d", codec->frame_size); | |||||
} else { | } else { | ||||
if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE)) | if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE)) | ||||
goto fail; | goto fail; | ||||
@@ -999,7 +1004,8 @@ static int av_encode(AVFormatContext **output_files, | |||||
} | } | ||||
} else { | } else { | ||||
/* no reencoding needed : output the packet directly */ | /* no reencoding needed : output the packet directly */ | ||||
os->format->write_packet(os, ost->index, data_buf, data_size); | |||||
/* force the input stream PTS */ | |||||
os->format->write_packet(os, ost->index, data_buf, data_size, pkt.pts); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -882,7 +882,7 @@ static int http_prepare_data(HTTPContext *c) | |||||
} | } | ||||
} else { | } else { | ||||
send_it: | send_it: | ||||
if (av_write_packet(&c->fmt_ctx, &pkt)) | |||||
if (av_write_packet(&c->fmt_ctx, &pkt, 0)) | |||||
c->state = HTTPSTATE_SEND_DATA_TRAILER; | c->state = HTTPSTATE_SEND_DATA_TRAILER; | ||||
} | } | ||||
@@ -524,7 +524,7 @@ static void put_frame(AVFormatContext *s, ASFStream *stream, int timestamp, | |||||
static int asf_write_packet(AVFormatContext *s, int stream_index, | static int asf_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
ASFContext *asf = s->priv_data; | ASFContext *asf = s->priv_data; | ||||
int timestamp; | int timestamp; | ||||
@@ -161,7 +161,7 @@ static int audio_write_header(AVFormatContext *s1) | |||||
} | } | ||||
static int audio_write_packet(AVFormatContext *s1, int stream_index, | static int audio_write_packet(AVFormatContext *s1, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
AudioData *s = s1->priv_data; | AudioData *s = s1->priv_data; | ||||
int len, ret; | int len, ret; | ||||
@@ -44,7 +44,7 @@ typedef struct AVFormat { | |||||
int (*write_header)(struct AVFormatContext *); | int (*write_header)(struct AVFormatContext *); | ||||
int (*write_packet)(struct AVFormatContext *, | int (*write_packet)(struct AVFormatContext *, | ||||
int stream_index, | int stream_index, | ||||
unsigned char *buf, int size); | |||||
unsigned char *buf, int size, int force_pts); | |||||
int (*write_trailer)(struct AVFormatContext *); | int (*write_trailer)(struct AVFormatContext *); | ||||
/* optional input support */ | /* optional input support */ | ||||
@@ -187,7 +187,7 @@ AVFormatContext *av_open_input_file(const char *filename, | |||||
int av_read_packet(AVFormatContext *s, AVPacket *pkt); | int av_read_packet(AVFormatContext *s, AVPacket *pkt); | ||||
void av_close_input_file(AVFormatContext *s); | void av_close_input_file(AVFormatContext *s); | ||||
int av_write_packet(AVFormatContext *s, AVPacket *pkt); | |||||
int av_write_packet(AVFormatContext *s, AVPacket *pkt, int force_pts); | |||||
void dump_format(AVFormatContext *ic, | void dump_format(AVFormatContext *ic, | ||||
int index, | int index, | ||||
@@ -277,7 +277,7 @@ static int avi_write_header(AVFormatContext *s) | |||||
} | } | ||||
static int avi_write_packet(AVFormatContext *s, int stream_index, | static int avi_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
AVIContext *avi = s->priv_data; | AVIContext *avi = s->priv_data; | ||||
ByteIOContext *pb = &s->pb; | ByteIOContext *pb = &s->pb; | ||||
@@ -195,7 +195,7 @@ static int ffm_write_header(AVFormatContext *s) | |||||
} | } | ||||
static int ffm_write_packet(AVFormatContext *s, int stream_index, | static int ffm_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
AVStream *st = s->streams[stream_index]; | AVStream *st = s->streams[stream_index]; | ||||
FFMStream *fst = st->priv_data; | FFMStream *fst = st->priv_data; | ||||
@@ -515,7 +515,7 @@ static int img_write_header(AVFormatContext *s) | |||||
} | } | ||||
static int img_write_packet(AVFormatContext *s, int stream_index, | static int img_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
VideoData *img = s->priv_data; | VideoData *img = s->priv_data; | ||||
AVStream *st = s->streams[stream_index]; | AVStream *st = s->streams[stream_index]; | ||||
@@ -32,8 +32,8 @@ static int mpjpeg_write_header(AVFormatContext *s) | |||||
return 0; | return 0; | ||||
} | } | ||||
static int mpjpeg_write_packet(AVFormatContext *s, | |||||
int stream_index, UINT8 *buf, int size) | |||||
static int mpjpeg_write_packet(AVFormatContext *s, int stream_index, | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
UINT8 buf1[256]; | UINT8 buf1[256]; | ||||
@@ -74,7 +74,7 @@ static int single_jpeg_write_header(AVFormatContext *s) | |||||
} | } | ||||
static int single_jpeg_write_packet(AVFormatContext *s, int stream_index, | static int single_jpeg_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
put_buffer(&s->pb, buf, size); | put_buffer(&s->pb, buf, size); | ||||
put_flush_packet(&s->pb); | put_flush_packet(&s->pb); | ||||
@@ -120,7 +120,7 @@ static int jpeg_write_header(AVFormatContext *s1) | |||||
} | } | ||||
static int jpeg_write_packet(AVFormatContext *s1, int stream_index, | static int jpeg_write_packet(AVFormatContext *s1, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
JpegContext *s = s1->priv_data; | JpegContext *s = s1->priv_data; | ||||
char filename[1024]; | char filename[1024]; | ||||
@@ -321,18 +321,21 @@ static void flush_packet(AVFormatContext *ctx, int stream_index) | |||||
stream->start_pts = -1; | stream->start_pts = -1; | ||||
} | } | ||||
static int mpeg_mux_write_packet(AVFormatContext *ctx, | |||||
int stream_index, UINT8 *buf, int size) | |||||
static int mpeg_mux_write_packet(AVFormatContext *ctx, int stream_index, | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
MpegMuxContext *s = ctx->priv_data; | MpegMuxContext *s = ctx->priv_data; | ||||
AVStream *st = ctx->streams[stream_index]; | AVStream *st = ctx->streams[stream_index]; | ||||
StreamInfo *stream = st->priv_data; | StreamInfo *stream = st->priv_data; | ||||
int len; | int len; | ||||
while (size > 0) { | while (size > 0) { | ||||
/* set pts */ | /* set pts */ | ||||
if (stream->start_pts == -1) | |||||
if (stream->start_pts == -1) { | |||||
if (force_pts) | |||||
stream->pts = force_pts; | |||||
stream->start_pts = stream->pts; | stream->start_pts = stream->pts; | ||||
} | |||||
len = s->packet_data_max_size - stream->buffer_ptr; | len = s->packet_data_max_size - stream->buffer_ptr; | ||||
if (len > size) | if (len > size) | ||||
len = size; | len = size; | ||||
@@ -714,6 +717,8 @@ static int mpeg_mux_read_packet(AVFormatContext *s, | |||||
goto redo; | goto redo; | ||||
found: | found: | ||||
av_new_packet(pkt, len); | av_new_packet(pkt, len); | ||||
//printf("\nRead Packet ID: %x PTS: %f Size: %d", startcode, | |||||
// (float)pts/90000, len); | |||||
get_buffer(&s->pb, pkt->data, pkt->size); | get_buffer(&s->pb, pkt->data, pkt->size); | ||||
pkt->pts = pts; | pkt->pts = pts; | ||||
pkt->stream_index = i; | pkt->stream_index = i; | ||||
@@ -26,7 +26,7 @@ int raw_write_header(struct AVFormatContext *s) | |||||
int raw_write_packet(struct AVFormatContext *s, | int raw_write_packet(struct AVFormatContext *s, | ||||
int stream_index, | int stream_index, | ||||
unsigned char *buf, int size) | |||||
unsigned char *buf, int size, int force_pts) | |||||
{ | { | ||||
put_buffer(&s->pb, buf, size); | put_buffer(&s->pb, buf, size); | ||||
put_flush_packet(&s->pb); | put_flush_packet(&s->pb); | ||||
@@ -389,7 +389,7 @@ static int rm_write_video(AVFormatContext *s, UINT8 *buf, int size) | |||||
} | } | ||||
static int rm_write_packet(AVFormatContext *s, int stream_index, | static int rm_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
if (s->streams[stream_index]->codec.codec_type == | if (s->streams[stream_index]->codec.codec_type == | ||||
CODEC_TYPE_AUDIO) | CODEC_TYPE_AUDIO) | ||||
@@ -378,7 +378,7 @@ static int swf_write_audio(AVFormatContext *s, UINT8 *buf, int size) | |||||
} | } | ||||
static int swf_write_packet(AVFormatContext *s, int stream_index, | static int swf_write_packet(AVFormatContext *s, int stream_index, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
AVCodecContext *codec = &s->streams[stream_index]->codec; | AVCodecContext *codec = &s->streams[stream_index]->codec; | ||||
if (codec->codec_type == CODEC_TYPE_AUDIO) | if (codec->codec_type == CODEC_TYPE_AUDIO) | ||||
@@ -375,10 +375,10 @@ void av_close_input_file(AVFormatContext *s) | |||||
} | } | ||||
int av_write_packet(AVFormatContext *s, AVPacket *pkt) | |||||
int av_write_packet(AVFormatContext *s, AVPacket *pkt, int force_pts) | |||||
{ | { | ||||
/* XXX: currently, an emulation because internal API must change */ | /* XXX: currently, an emulation because internal API must change */ | ||||
return s->format->write_packet(s, pkt->stream_index, pkt->data, pkt->size); | |||||
return s->format->write_packet(s, pkt->stream_index, pkt->data, pkt->size, force_pts); | |||||
} | } | ||||
/* "user interface" functions */ | /* "user interface" functions */ | ||||
@@ -127,7 +127,7 @@ static int wav_write_header(AVFormatContext *s) | |||||
} | } | ||||
static int wav_write_packet(AVFormatContext *s, int stream_index_ptr, | static int wav_write_packet(AVFormatContext *s, int stream_index_ptr, | ||||
UINT8 *buf, int size) | |||||
UINT8 *buf, int size, int force_pts) | |||||
{ | { | ||||
ByteIOContext *pb = &s->pb; | ByteIOContext *pb = &s->pb; | ||||
put_buffer(pb, buf, size); | put_buffer(pb, buf, size); | ||||
@@ -1403,6 +1403,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, | |||||
for(;;) { | for(;;) { | ||||
clear_blocks(s->block[0]); | clear_blocks(s->block[0]); | ||||
emms_c(); | |||||
ret = mpeg_decode_mb(s, s->block); | ret = mpeg_decode_mb(s, s->block); | ||||
dprintf("ret=%d\n", ret); | dprintf("ret=%d\n", ret); | ||||
if (ret < 0) | if (ret < 0) | ||||
@@ -2314,6 +2314,7 @@ static int decode_frame(AVCodecContext * avctx, | |||||
avctx->sample_rate = s->sample_rate; | avctx->sample_rate = s->sample_rate; | ||||
avctx->channels = s->nb_channels; | avctx->channels = s->nb_channels; | ||||
avctx->bit_rate = s->bit_rate; | avctx->bit_rate = s->bit_rate; | ||||
avctx->frame_size = s->frame_size; | |||||
} | } | ||||
} | } | ||||
} | } | ||||