* qatar/master: h264: drop ff_h264_ prefix from static function ff_h264_decode_rbsp_trailing() h264: Make ff_h264_decode_end() static, it is not used externally. output-example: K&R formatting cosmetics, comment spelling fixes avf: make the example output the proper message avf: fix audio writing in the output-example mov: don't overwrite existing indexes. lzw: fix potential integer overflow. truemotion: forbid invalid VLC bitsizes and token values. truemotion2: handle out-of-frame motion vectors through edge extension. configure: Check for a different SDL function Conflicts: configure doc/examples/muxing.c libavcodec/truemotion2.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.11
@@ -3219,7 +3219,7 @@ if enabled libdc1394; then | |||||
fi | fi | ||||
SDL_CONFIG="${cross_prefix}sdl-config" | SDL_CONFIG="${cross_prefix}sdl-config" | ||||
if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then | |||||
if check_pkg_config sdl SDL_events.h SDL_PollEvent; then | |||||
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags && | check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags && | ||||
enable sdl && | enable sdl && | ||||
check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size | check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size | ||||
@@ -43,7 +43,7 @@ | |||||
#define STREAM_DURATION 200.0 | #define STREAM_DURATION 200.0 | ||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */ | #define STREAM_FRAME_RATE 25 /* 25 images/s */ | ||||
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) | #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) | ||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ | |||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ | |||||
static int sws_flags = SWS_BICUBIC; | static int sws_flags = SWS_BICUBIC; | ||||
@@ -80,10 +80,10 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
c = st->codec; | c = st->codec; | ||||
/* put sample parameters */ | /* put sample parameters */ | ||||
c->sample_fmt = AV_SAMPLE_FMT_S16; | |||||
c->bit_rate = 64000; | |||||
c->sample_fmt = AV_SAMPLE_FMT_S16; | |||||
c->bit_rate = 64000; | |||||
c->sample_rate = 44100; | c->sample_rate = 44100; | ||||
c->channels = 2; | |||||
c->channels = 2; | |||||
// some formats want stream headers to be separate | // some formats want stream headers to be separate | ||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) | if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
@@ -105,7 +105,7 @@ static void open_audio(AVFormatContext *oc, AVStream *st) | |||||
} | } | ||||
/* init signal generator */ | /* init signal generator */ | ||||
t = 0; | |||||
t = 0; | |||||
tincr = 2 * M_PI * 110.0 / c->sample_rate; | tincr = 2 * M_PI * 110.0 / c->sample_rate; | ||||
/* increment frequency by 110 Hz per second */ | /* increment frequency by 110 Hz per second */ | ||||
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | ||||
@@ -114,12 +114,13 @@ static void open_audio(AVFormatContext *oc, AVStream *st) | |||||
audio_input_frame_size = 10000; | audio_input_frame_size = 10000; | ||||
else | else | ||||
audio_input_frame_size = c->frame_size; | audio_input_frame_size = c->frame_size; | ||||
samples = av_malloc(audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | |||||
* c->channels); | |||||
samples = av_malloc(audio_input_frame_size * | |||||
av_get_bytes_per_sample(c->sample_fmt) * | |||||
c->channels); | |||||
} | } | ||||
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and | |||||
'nb_channels' channels */ | |||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and | |||||
* 'nb_channels' channels. */ | |||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) | static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) | ||||
{ | { | ||||
int j, i, v; | int j, i, v; | ||||
@@ -128,9 +129,9 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) | |||||
q = samples; | q = samples; | ||||
for (j = 0; j < frame_size; j++) { | for (j = 0; j < frame_size; j++) { | ||||
v = (int)(sin(t) * 10000); | v = (int)(sin(t) * 10000); | ||||
for(i = 0; i < nb_channels; i++) | |||||
for (i = 0; i < nb_channels; i++) | |||||
*q++ = v; | *q++ = v; | ||||
t += tincr; | |||||
t += tincr; | |||||
tincr += tincr2; | tincr += tincr2; | ||||
} | } | ||||
} | } | ||||
@@ -138,7 +139,7 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) | |||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st) | static void write_audio_frame(AVFormatContext *oc, AVStream *st) | ||||
{ | { | ||||
AVCodecContext *c; | AVCodecContext *c; | ||||
AVPacket pkt; | |||||
AVPacket pkt = { 0 }; // data and size must be 0; | |||||
AVFrame *frame = avcodec_alloc_frame(); | AVFrame *frame = avcodec_alloc_frame(); | ||||
int got_packet; | int got_packet; | ||||
@@ -147,17 +148,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | |||||
get_audio_frame(samples, audio_input_frame_size, c->channels); | get_audio_frame(samples, audio_input_frame_size, c->channels); | ||||
frame->nb_samples = audio_input_frame_size; | frame->nb_samples = audio_input_frame_size; | ||||
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (uint8_t *)samples, | |||||
audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | |||||
* c->channels, 1); | |||||
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, | |||||
(uint8_t *)samples, | |||||
audio_input_frame_size * | |||||
av_get_bytes_per_sample(c->sample_fmt) * | |||||
c->channels, 1); | |||||
avcodec_encode_audio2(c, &pkt, frame, &got_packet); | avcodec_encode_audio2(c, &pkt, frame, &got_packet); | ||||
if (!got_packet) | if (!got_packet) | ||||
return; | return; | ||||
pkt.stream_index= st->index; | |||||
pkt.stream_index = st->index; | |||||
/* write the compressed frame in the media file */ | |||||
/* Write the compressed frame to the media file. */ | |||||
if (av_interleaved_write_frame(oc, &pkt) != 0) { | if (av_interleaved_write_frame(oc, &pkt) != 0) { | ||||
fprintf(stderr, "Error while writing audio frame\n"); | fprintf(stderr, "Error while writing audio frame\n"); | ||||
exit(1); | exit(1); | ||||
@@ -178,7 +181,7 @@ static AVFrame *picture, *tmp_picture; | |||||
static uint8_t *video_outbuf; | static uint8_t *video_outbuf; | ||||
static int frame_count, video_outbuf_size; | static int frame_count, video_outbuf_size; | ||||
/* add a video output stream */ | |||||
/* Add a video output stream. */ | |||||
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | ||||
{ | { | ||||
AVCodecContext *c; | AVCodecContext *c; | ||||
@@ -210,30 +213,30 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
c->codec_id = codec_id; | c->codec_id = codec_id; | ||||
/* put sample parameters */ | |||||
/* Put sample parameters. */ | |||||
c->bit_rate = 400000; | c->bit_rate = 400000; | ||||
/* resolution must be a multiple of two */ | |||||
c->width = 352; | |||||
c->height = 288; | |||||
/* time base: this is the fundamental unit of time (in seconds) in terms | |||||
of which frame timestamps are represented. for fixed-fps content, | |||||
timebase should be 1/framerate and timestamp increments should be | |||||
identically 1. */ | |||||
/* Resolution must be a multiple of two. */ | |||||
c->width = 352; | |||||
c->height = 288; | |||||
/* timebase: This is the fundamental unit of time (in seconds) in terms | |||||
* of which frame timestamps are represented. For fixed-fps content, | |||||
* timebase should be 1/framerate and timestamp increments should be | |||||
* identical to 1. */ | |||||
c->time_base.den = STREAM_FRAME_RATE; | c->time_base.den = STREAM_FRAME_RATE; | ||||
c->time_base.num = 1; | c->time_base.num = 1; | ||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ | |||||
c->pix_fmt = STREAM_PIX_FMT; | |||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ | |||||
c->pix_fmt = STREAM_PIX_FMT; | |||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO) { | if (c->codec_id == CODEC_ID_MPEG2VIDEO) { | ||||
/* just for testing, we also add B frames */ | /* just for testing, we also add B frames */ | ||||
c->max_b_frames = 2; | c->max_b_frames = 2; | ||||
} | } | ||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO){ | |||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO) { | |||||
/* Needed to avoid using macroblocks in which some coeffs overflow. | /* Needed to avoid using macroblocks in which some coeffs overflow. | ||||
This does not happen with normal video, it just happens here as | |||||
the motion of the chroma plane does not match the luma plane. */ | |||||
c->mb_decision=2; | |||||
* This does not happen with normal video, it just happens here as | |||||
* the motion of the chroma plane does not match the luma plane. */ | |||||
c->mb_decision = 2; | |||||
} | } | ||||
// some formats want stream headers to be separate | |||||
/* Some formats want stream headers to be separate. */ | |||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER) | if (oc->oformat->flags & AVFMT_GLOBALHEADER) | ||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER; | c->flags |= CODEC_FLAG_GLOBAL_HEADER; | ||||
@@ -249,7 +252,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) | |||||
picture = avcodec_alloc_frame(); | picture = avcodec_alloc_frame(); | ||||
if (!picture) | if (!picture) | ||||
return NULL; | return NULL; | ||||
size = avpicture_get_size(pix_fmt, width, height); | |||||
size = avpicture_get_size(pix_fmt, width, height); | |||||
picture_buf = av_malloc(size); | picture_buf = av_malloc(size); | ||||
if (!picture_buf) { | if (!picture_buf) { | ||||
av_free(picture); | av_free(picture); | ||||
@@ -274,26 +277,26 @@ static void open_video(AVFormatContext *oc, AVStream *st) | |||||
video_outbuf = NULL; | video_outbuf = NULL; | ||||
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { | if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { | ||||
/* allocate output buffer */ | |||||
/* XXX: API change will be done */ | |||||
/* buffers passed into lav* can be allocated any way you prefer, | |||||
as long as they're aligned enough for the architecture, and | |||||
they're freed appropriately (such as using av_free for buffers | |||||
allocated with av_malloc) */ | |||||
/* Allocate output buffer. */ | |||||
/* XXX: API change will be done. */ | |||||
/* Buffers passed into lav* can be allocated any way you prefer, | |||||
* as long as they're aligned enough for the architecture, and | |||||
* they're freed appropriately (such as using av_free for buffers | |||||
* allocated with av_malloc). */ | |||||
video_outbuf_size = 200000; | video_outbuf_size = 200000; | ||||
video_outbuf = av_malloc(video_outbuf_size); | |||||
video_outbuf = av_malloc(video_outbuf_size); | |||||
} | } | ||||
/* allocate the encoded raw picture */ | |||||
/* Allocate the encoded raw picture. */ | |||||
picture = alloc_picture(c->pix_fmt, c->width, c->height); | picture = alloc_picture(c->pix_fmt, c->width, c->height); | ||||
if (!picture) { | if (!picture) { | ||||
fprintf(stderr, "Could not allocate picture\n"); | fprintf(stderr, "Could not allocate picture\n"); | ||||
exit(1); | exit(1); | ||||
} | } | ||||
/* if the output format is not YUV420P, then a temporary YUV420P | |||||
picture is needed too. It is then converted to the required | |||||
output format */ | |||||
/* If the output format is not YUV420P, then a temporary YUV420P | |||||
* picture is needed too. It is then converted to the required | |||||
* output format. */ | |||||
tmp_picture = NULL; | tmp_picture = NULL; | ||||
if (c->pix_fmt != PIX_FMT_YUV420P) { | if (c->pix_fmt != PIX_FMT_YUV420P) { | ||||
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); | tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); | ||||
@@ -304,23 +307,22 @@ static void open_video(AVFormatContext *oc, AVStream *st) | |||||
} | } | ||||
} | } | ||||
/* prepare a dummy image */ | |||||
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) | |||||
/* Prepare a dummy image. */ | |||||
static void fill_yuv_image(AVFrame *pict, int frame_index, | |||||
int width, int height) | |||||
{ | { | ||||
int x, y, i; | int x, y, i; | ||||
i = frame_index; | i = frame_index; | ||||
/* Y */ | /* Y */ | ||||
for (y = 0; y < height; y++) { | |||||
for (x = 0; x < width; x++) { | |||||
for (y = 0; y < height; y++) | |||||
for (x = 0; x < width; x++) | |||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; | pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; | ||||
} | |||||
} | |||||
/* Cb and Cr */ | /* Cb and Cr */ | ||||
for (y = 0; y < height/2; y++) { | |||||
for (x = 0; x < width/2; x++) { | |||||
for (y = 0; y < height / 2; y++) { | |||||
for (x = 0; x < width / 2; x++) { | |||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; | pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; | ||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; | pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; | ||||
} | } | ||||
@@ -336,13 +338,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | |||||
c = st->codec; | c = st->codec; | ||||
if (frame_count >= STREAM_NB_FRAMES) { | if (frame_count >= STREAM_NB_FRAMES) { | ||||
/* no more frame to compress. The codec has a latency of a few | |||||
frames if using B frames, so we get the last frames by | |||||
passing the same picture again */ | |||||
/* No more frames to compress. The codec has a latency of a few | |||||
* frames if using B-frames, so we get the last frames by | |||||
* passing the same picture again. */ | |||||
} else { | } else { | ||||
if (c->pix_fmt != PIX_FMT_YUV420P) { | if (c->pix_fmt != PIX_FMT_YUV420P) { | ||||
/* as we only generate a YUV420P picture, we must convert it | /* as we only generate a YUV420P picture, we must convert it | ||||
to the codec pixel format if needed */ | |||||
* to the codec pixel format if needed */ | |||||
if (img_convert_ctx == NULL) { | if (img_convert_ctx == NULL) { | ||||
img_convert_ctx = sws_getContext(c->width, c->height, | img_convert_ctx = sws_getContext(c->width, c->height, | ||||
PIX_FMT_YUV420P, | PIX_FMT_YUV420P, | ||||
@@ -350,7 +352,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | |||||
c->pix_fmt, | c->pix_fmt, | ||||
sws_flags, NULL, NULL, NULL); | sws_flags, NULL, NULL, NULL); | ||||
if (img_convert_ctx == NULL) { | if (img_convert_ctx == NULL) { | ||||
fprintf(stderr, "Cannot initialize the conversion context\n"); | |||||
fprintf(stderr, | |||||
"Cannot initialize the conversion context\n"); | |||||
exit(1); | exit(1); | ||||
} | } | ||||
} | } | ||||
@@ -362,36 +365,38 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) | |||||
} | } | ||||
} | } | ||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) { | if (oc->oformat->flags & AVFMT_RAWPICTURE) { | ||||
/* raw video case. The API will change slightly in the near | |||||
future for that. */ | |||||
/* Raw video case - the API will change slightly in the near | |||||
* future for that. */ | |||||
AVPacket pkt; | AVPacket pkt; | ||||
av_init_packet(&pkt); | av_init_packet(&pkt); | ||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
pkt.stream_index = st->index; | |||||
pkt.data = (uint8_t *)picture; | |||||
pkt.size = sizeof(AVPicture); | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | |||||
pkt.stream_index = st->index; | |||||
pkt.data = (uint8_t *)picture; | |||||
pkt.size = sizeof(AVPicture); | |||||
ret = av_interleaved_write_frame(oc, &pkt); | ret = av_interleaved_write_frame(oc, &pkt); | ||||
} else { | } else { | ||||
/* encode the image */ | /* encode the image */ | ||||
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture); | |||||
/* if zero size, it means the image was buffered */ | |||||
out_size = avcodec_encode_video(c, video_outbuf, | |||||
video_outbuf_size, picture); | |||||
/* If size is zero, it means the image was buffered. */ | |||||
if (out_size > 0) { | if (out_size > 0) { | ||||
AVPacket pkt; | AVPacket pkt; | ||||
av_init_packet(&pkt); | av_init_packet(&pkt); | ||||
if (c->coded_frame->pts != AV_NOPTS_VALUE) | if (c->coded_frame->pts != AV_NOPTS_VALUE) | ||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | |||||
if(c->coded_frame->key_frame) | |||||
pkt.pts = av_rescale_q(c->coded_frame->pts, | |||||
c->time_base, st->time_base); | |||||
if (c->coded_frame->key_frame) | |||||
pkt.flags |= AV_PKT_FLAG_KEY; | pkt.flags |= AV_PKT_FLAG_KEY; | ||||
pkt.stream_index = st->index; | pkt.stream_index = st->index; | ||||
pkt.data = video_outbuf; | |||||
pkt.size = out_size; | |||||
pkt.data = video_outbuf; | |||||
pkt.size = out_size; | |||||
/* write the compressed frame in the media file */ | |||||
/* Write the compressed frame to the media file. */ | |||||
ret = av_interleaved_write_frame(oc, &pkt); | ret = av_interleaved_write_frame(oc, &pkt); | ||||
} else { | } else { | ||||
ret = 0; | ret = 0; | ||||
@@ -428,7 +433,7 @@ int main(int argc, char **argv) | |||||
double audio_pts, video_pts; | double audio_pts, video_pts; | ||||
int i; | int i; | ||||
/* initialize libavcodec, and register all codecs and formats */ | |||||
/* Initialize libavcodec, and register all codecs and formats. */ | |||||
av_register_all(); | av_register_all(); | ||||
if (argc != 2) { | if (argc != 2) { | ||||
@@ -453,8 +458,8 @@ int main(int argc, char **argv) | |||||
} | } | ||||
fmt = oc->oformat; | fmt = oc->oformat; | ||||
/* add the audio and video streams using the default format codecs | |||||
and initialize the codecs */ | |||||
/* Add the audio and video streams using the default format codecs | |||||
* and initialize the codecs. */ | |||||
video_st = NULL; | video_st = NULL; | ||||
audio_st = NULL; | audio_st = NULL; | ||||
if (fmt->video_codec != CODEC_ID_NONE) { | if (fmt->video_codec != CODEC_ID_NONE) { | ||||
@@ -464,15 +469,15 @@ int main(int argc, char **argv) | |||||
audio_st = add_audio_stream(oc, fmt->audio_codec); | audio_st = add_audio_stream(oc, fmt->audio_codec); | ||||
} | } | ||||
av_dump_format(oc, 0, filename, 1); | |||||
/* now that all the parameters are set, we can open the audio and | |||||
video codecs and allocate the necessary encode buffers */ | |||||
/* Now that all the parameters are set, we can open the audio and | |||||
* video codecs and allocate the necessary encode buffers. */ | |||||
if (video_st) | if (video_st) | ||||
open_video(oc, video_st); | open_video(oc, video_st); | ||||
if (audio_st) | if (audio_st) | ||||
open_audio(oc, audio_st); | open_audio(oc, audio_st); | ||||
av_dump_format(oc, 0, filename, 1); | |||||
/* open the output file, if needed */ | /* open the output file, if needed */ | ||||
if (!(fmt->flags & AVFMT_NOFILE)) { | if (!(fmt->flags & AVFMT_NOFILE)) { | ||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { | if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { | ||||
@@ -481,18 +486,20 @@ int main(int argc, char **argv) | |||||
} | } | ||||
} | } | ||||
/* write the stream header, if any */ | |||||
/* Write the stream header, if any. */ | |||||
avformat_write_header(oc, NULL); | avformat_write_header(oc, NULL); | ||||
picture->pts = 0; | picture->pts = 0; | ||||
for(;;) { | |||||
/* compute current audio and video time */ | |||||
for (;;) { | |||||
/* Compute current audio and video time. */ | |||||
if (audio_st) | if (audio_st) | ||||
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; | audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; | ||||
else | else | ||||
audio_pts = 0.0; | audio_pts = 0.0; | ||||
if (video_st) | if (video_st) | ||||
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; | |||||
video_pts = (double)video_st->pts.val * video_st->time_base.num / | |||||
video_st->time_base.den; | |||||
else | else | ||||
video_pts = 0.0; | video_pts = 0.0; | ||||
@@ -509,28 +516,27 @@ int main(int argc, char **argv) | |||||
} | } | ||||
} | } | ||||
/* write the trailer, if any. the trailer must be written | |||||
* before you close the CodecContexts open when you wrote the | |||||
* header; otherwise write_trailer may try to use memory that | |||||
* was freed on av_codec_close() */ | |||||
/* Write the trailer, if any. The trailer must be written before you | |||||
* close the CodecContexts open when you wrote the header; otherwise | |||||
* av_write_trailer() may try to use memory that was freed on | |||||
* av_codec_close(). */ | |||||
av_write_trailer(oc); | av_write_trailer(oc); | ||||
/* close each codec */ | |||||
/* Close each codec. */ | |||||
if (video_st) | if (video_st) | ||||
close_video(oc, video_st); | close_video(oc, video_st); | ||||
if (audio_st) | if (audio_st) | ||||
close_audio(oc, audio_st); | close_audio(oc, audio_st); | ||||
/* free the streams */ | |||||
for(i = 0; i < oc->nb_streams; i++) { | |||||
/* Free the streams. */ | |||||
for (i = 0; i < oc->nb_streams; i++) { | |||||
av_freep(&oc->streams[i]->codec); | av_freep(&oc->streams[i]->codec); | ||||
av_freep(&oc->streams[i]); | av_freep(&oc->streams[i]); | ||||
} | } | ||||
if (!(fmt->flags & AVFMT_NOFILE)) { | |||||
/* close the output file */ | |||||
if (!(fmt->flags & AVFMT_NOFILE)) | |||||
/* Close the output file. */ | |||||
avio_close(oc->pb); | avio_close(oc->pb); | ||||
} | |||||
/* free the stream */ | /* free the stream */ | ||||
av_free(oc); | av_free(oc); | ||||
@@ -271,7 +271,7 @@ nsc: | |||||
* Identify the exact end of the bitstream | * Identify the exact end of the bitstream | ||||
* @return the length of the trailing, or 0 if damaged | * @return the length of the trailing, or 0 if damaged | ||||
*/ | */ | ||||
static int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src) | |||||
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src) | |||||
{ | { | ||||
int v = *src; | int v = *src; | ||||
int r; | int r; | ||||
@@ -4260,7 +4260,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size) | |||||
dst_length--; | dst_length--; | ||||
bit_length = !dst_length ? 0 | bit_length = !dst_length ? 0 | ||||
: (8 * dst_length - | : (8 * dst_length - | ||||
ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1)); | |||||
decode_rbsp_trailing(h, ptr + dst_length - 1)); | |||||
if (s->avctx->debug & FF_DEBUG_STARTCODE) | if (s->avctx->debug & FF_DEBUG_STARTCODE) | ||||
av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass); | av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass); | ||||
@@ -4606,7 +4606,7 @@ av_cold void ff_h264_free_context(H264Context *h) | |||||
av_freep(h->pps_buffers + i); | av_freep(h->pps_buffers + i); | ||||
} | } | ||||
av_cold int ff_h264_decode_end(AVCodecContext *avctx) | |||||
static av_cold int h264_decode_end(AVCodecContext *avctx) | |||||
{ | { | ||||
H264Context *h = avctx->priv_data; | H264Context *h = avctx->priv_data; | ||||
MpegEncContext *s = &h->s; | MpegEncContext *s = &h->s; | ||||
@@ -4664,7 +4664,7 @@ AVCodec ff_h264_decoder = { | |||||
.id = CODEC_ID_H264, | .id = CODEC_ID_H264, | ||||
.priv_data_size = sizeof(H264Context), | .priv_data_size = sizeof(H264Context), | ||||
.init = ff_h264_decode_init, | .init = ff_h264_decode_init, | ||||
.close = ff_h264_decode_end, | |||||
.close = h264_decode_end, | |||||
.decode = decode_frame, | .decode = decode_frame, | ||||
.capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | | .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | | ||||
CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS | | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS | | ||||
@@ -4684,7 +4684,7 @@ AVCodec ff_h264_vdpau_decoder = { | |||||
.id = CODEC_ID_H264, | .id = CODEC_ID_H264, | ||||
.priv_data_size = sizeof(H264Context), | .priv_data_size = sizeof(H264Context), | ||||
.init = ff_h264_decode_init, | .init = ff_h264_decode_init, | ||||
.close = ff_h264_decode_end, | |||||
.close = h264_decode_end, | |||||
.decode = decode_frame, | .decode = decode_frame, | ||||
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, | .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, | ||||
.flush = flush_dpb, | .flush = flush_dpb, | ||||
@@ -678,7 +678,6 @@ void ff_h264_hl_decode_mb(H264Context *h); | |||||
int ff_h264_frame_start(H264Context *h); | int ff_h264_frame_start(H264Context *h); | ||||
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size); | int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size); | ||||
av_cold int ff_h264_decode_init(AVCodecContext *avctx); | av_cold int ff_h264_decode_init(AVCodecContext *avctx); | ||||
av_cold int ff_h264_decode_end(AVCodecContext *avctx); | |||||
av_cold void ff_h264_decode_init_vlc(void); | av_cold void ff_h264_decode_init_vlc(void); | ||||
/** | /** | ||||
@@ -102,7 +102,7 @@ void ff_lzw_decode_tail(LZWState *p) | |||||
if(s->mode == FF_LZW_GIF) { | if(s->mode == FF_LZW_GIF) { | ||||
while (s->bs > 0) { | while (s->bs > 0) { | ||||
if (s->pbuf + s->bs >= s->ebuf) { | |||||
if (s->bs >= s->ebuf - s->pbuf) { | |||||
s->pbuf = s->ebuf; | s->pbuf = s->ebuf; | ||||
break; | break; | ||||
} else { | } else { | ||||
@@ -60,7 +60,9 @@ typedef struct TM2Context{ | |||||
int *clast; | int *clast; | ||||
/* data for current and previous frame */ | /* data for current and previous frame */ | ||||
int *Y1_base, *U1_base, *V1_base, *Y2_base, *U2_base, *V2_base; | |||||
int *Y1, *U1, *V1, *Y2, *U2, *V2; | int *Y1, *U1, *V1, *Y2, *U2, *V2; | ||||
int y_stride, uv_stride; | |||||
int cur; | int cur; | ||||
} TM2Context; | } TM2Context; | ||||
@@ -131,7 +133,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) | |||||
/* check for correct codes parameters */ | /* check for correct codes parameters */ | ||||
if((huff.val_bits < 1) || (huff.val_bits > 32) || | if((huff.val_bits < 1) || (huff.val_bits > 32) || | ||||
(huff.max_bits < 0) || (huff.max_bits > 32)) { | |||||
(huff.max_bits < 0) || (huff.max_bits > 25)) { | |||||
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n", | av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n", | ||||
huff.val_bits, huff.max_bits); | huff.val_bits, huff.max_bits); | ||||
return -1; | return -1; | ||||
@@ -328,10 +330,21 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i | |||||
return -1; | return -1; | ||||
} | } | ||||
ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); | ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); | ||||
if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) { | |||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n", | |||||
ctx->tokens[stream_id][i], stream_id, i); | |||||
return AVERROR_INVALIDDATA; | |||||
} | |||||
} | } | ||||
} else { | } else { | ||||
for(i = 0; i < toks; i++) | |||||
for(i = 0; i < toks; i++) { | |||||
ctx->tokens[stream_id][i] = codes.recode[0]; | ctx->tokens[stream_id][i] = codes.recode[0]; | ||||
if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) { | |||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n", | |||||
ctx->tokens[stream_id][i], stream_id, i); | |||||
return AVERROR_INVALIDDATA; | |||||
} | |||||
} | |||||
} | } | ||||
tm2_free_codes(&codes); | tm2_free_codes(&codes); | ||||
@@ -361,9 +374,9 @@ static inline int GET_TOK(TM2Context *ctx,int type) { | |||||
int *Y, *U, *V;\ | int *Y, *U, *V;\ | ||||
int Ystride, Ustride, Vstride;\ | int Ystride, Ustride, Vstride;\ | ||||
\ | \ | ||||
Ystride = ctx->avctx->width;\ | |||||
Vstride = (ctx->avctx->width + 1) >> 1;\ | |||||
Ustride = (ctx->avctx->width + 1) >> 1;\ | |||||
Ystride = ctx->y_stride;\ | |||||
Vstride = ctx->uv_stride;\ | |||||
Ustride = ctx->uv_stride;\ | |||||
Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\ | Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\ | ||||
V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\ | V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\ | ||||
U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\ | U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\ | ||||
@@ -651,6 +664,8 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b | |||||
mx = GET_TOK(ctx, TM2_MOT); | mx = GET_TOK(ctx, TM2_MOT); | ||||
my = GET_TOK(ctx, TM2_MOT); | my = GET_TOK(ctx, TM2_MOT); | ||||
mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4); | |||||
my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4); | |||||
if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) { | if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) { | ||||
av_log(0,0, "MV out of picture\n"); | av_log(0,0, "MV out of picture\n"); | ||||
@@ -696,15 +711,12 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b | |||||
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) | static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) | ||||
{ | { | ||||
int i, j; | int i, j; | ||||
int bw, bh; | |||||
int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1; | |||||
int type; | int type; | ||||
int keyframe = 1; | int keyframe = 1; | ||||
int *Y, *U, *V; | int *Y, *U, *V; | ||||
uint8_t *dst; | uint8_t *dst; | ||||
bw = ctx->avctx->width >> 2; | |||||
bh = ctx->avctx->height >> 2; | |||||
for(i = 0; i < TM2_NUM_STREAMS; i++) | for(i = 0; i < TM2_NUM_STREAMS; i++) | ||||
ctx->tok_ptrs[i] = 0; | ctx->tok_ptrs[i] = 0; | ||||
@@ -757,17 +769,54 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) | |||||
U = (ctx->cur?ctx->U2:ctx->U1); | U = (ctx->cur?ctx->U2:ctx->U1); | ||||
V = (ctx->cur?ctx->V2:ctx->V1); | V = (ctx->cur?ctx->V2:ctx->V1); | ||||
dst = p->data[0]; | dst = p->data[0]; | ||||
for(j = 0; j < ctx->avctx->height; j++){ | |||||
for(i = 0; i < ctx->avctx->width; i++){ | |||||
for(j = 0; j < h; j++){ | |||||
for(i = 0; i < w; i++){ | |||||
int y = Y[i], u = U[i >> 1], v = V[i >> 1]; | int y = Y[i], u = U[i >> 1], v = V[i >> 1]; | ||||
dst[3*i+0] = av_clip_uint8(y + v); | dst[3*i+0] = av_clip_uint8(y + v); | ||||
dst[3*i+1] = av_clip_uint8(y); | dst[3*i+1] = av_clip_uint8(y); | ||||
dst[3*i+2] = av_clip_uint8(y + u); | dst[3*i+2] = av_clip_uint8(y + u); | ||||
} | } | ||||
Y += ctx->avctx->width; | |||||
/* horizontal edge extension */ | |||||
Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0]; | |||||
Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1]; | |||||
/* vertical edge extension */ | |||||
if (j == 0) { | |||||
memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
} else if (j == h - 1) { | |||||
memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride); | |||||
} | |||||
Y += ctx->y_stride; | |||||
if (j & 1) { | if (j & 1) { | ||||
U += ctx->avctx->width >> 1; | |||||
V += ctx->avctx->width >> 1; | |||||
/* horizontal edge extension */ | |||||
U[-2] = U[-1] = U[0]; | |||||
V[-2] = V[-1] = V[0]; | |||||
U[cw + 1] = U[cw] = U[cw - 1]; | |||||
V[cw + 1] = V[cw] = V[cw - 1]; | |||||
/* vertical edge extension */ | |||||
if (j == 1) { | |||||
memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride); | |||||
memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride); | |||||
memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride); | |||||
memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride); | |||||
} else if (j == h - 1) { | |||||
memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride); | |||||
memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride); | |||||
memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride); | |||||
memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride); | |||||
} | |||||
U += ctx->uv_stride; | |||||
V += ctx->uv_stride; | |||||
} | } | ||||
dst += p->linesize[0]; | dst += p->linesize[0]; | ||||
} | } | ||||
@@ -813,9 +862,10 @@ static int decode_frame(AVCodecContext *avctx, | |||||
av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n"); | av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n"); | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
} | } | ||||
t = tm2_read_stream(l, l->buffer + skip, tm2_stream_order[i], buf_size - skip); | t = tm2_read_stream(l, l->buffer + skip, tm2_stream_order[i], buf_size - skip); | ||||
if(t == -1){ | |||||
return -1; | |||||
if(t < 0){ | |||||
return t; | |||||
} | } | ||||
skip += t; | skip += t; | ||||
} | } | ||||
@@ -834,7 +884,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
static av_cold int decode_init(AVCodecContext *avctx){ | static av_cold int decode_init(AVCodecContext *avctx){ | ||||
TM2Context * const l = avctx->priv_data; | TM2Context * const l = avctx->priv_data; | ||||
int i; | |||||
int i, w = avctx->width, h = avctx->height; | |||||
if((avctx->width & 3) || (avctx->height & 3)){ | if((avctx->width & 3) || (avctx->height & 3)){ | ||||
av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n"); | av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n"); | ||||
@@ -848,21 +898,46 @@ static av_cold int decode_init(AVCodecContext *avctx){ | |||||
ff_dsputil_init(&l->dsp, avctx); | ff_dsputil_init(&l->dsp, avctx); | ||||
l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); | |||||
l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); | |||||
l->last = av_malloc(4 * sizeof(*l->last) * (w >> 2)); | |||||
l->clast = av_malloc(4 * sizeof(*l->clast) * (w >> 2)); | |||||
for(i = 0; i < TM2_NUM_STREAMS; i++) { | for(i = 0; i < TM2_NUM_STREAMS; i++) { | ||||
l->tokens[i] = NULL; | l->tokens[i] = NULL; | ||||
l->tok_lens[i] = 0; | l->tok_lens[i] = 0; | ||||
} | } | ||||
l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height); | |||||
l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); | |||||
l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); | |||||
l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height); | |||||
l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); | |||||
l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); | |||||
w += 8; | |||||
h += 8; | |||||
l->Y1_base = av_malloc(sizeof(*l->Y1_base) * w * h); | |||||
l->Y2_base = av_malloc(sizeof(*l->Y2_base) * w * h); | |||||
l->y_stride = w; | |||||
w = (w + 1) >> 1; | |||||
h = (h + 1) >> 1; | |||||
l->U1_base = av_malloc(sizeof(*l->U1_base) * w * h); | |||||
l->V1_base = av_malloc(sizeof(*l->V1_base) * w * h); | |||||
l->U2_base = av_malloc(sizeof(*l->U2_base) * w * h); | |||||
l->V2_base = av_malloc(sizeof(*l->V1_base) * w * h); | |||||
l->uv_stride = w; | |||||
l->cur = 0; | l->cur = 0; | ||||
if (!l->Y1_base || !l->Y2_base || !l->U1_base || | |||||
!l->V1_base || !l->U2_base || !l->V2_base || | |||||
!l->last || !l->clast) { | |||||
av_freep(l->Y1_base); | |||||
av_freep(l->Y2_base); | |||||
av_freep(l->U1_base); | |||||
av_freep(l->U2_base); | |||||
av_freep(l->V1_base); | |||||
av_freep(l->V2_base); | |||||
av_freep(l->last); | |||||
av_freep(l->clast); | |||||
return AVERROR(ENOMEM); | |||||
} | |||||
l->Y1 = l->Y1_base + l->y_stride * 4 + 4; | |||||
l->Y2 = l->Y2_base + l->y_stride * 4 + 4; | |||||
l->U1 = l->U1_base + l->uv_stride * 2 + 2; | |||||
l->U2 = l->U2_base + l->uv_stride * 2 + 2; | |||||
l->V1 = l->V1_base + l->uv_stride * 2 + 2; | |||||
l->V2 = l->V2_base + l->uv_stride * 2 + 2; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -877,12 +952,12 @@ static av_cold int decode_end(AVCodecContext *avctx){ | |||||
for(i = 0; i < TM2_NUM_STREAMS; i++) | for(i = 0; i < TM2_NUM_STREAMS; i++) | ||||
av_free(l->tokens[i]); | av_free(l->tokens[i]); | ||||
if(l->Y1){ | if(l->Y1){ | ||||
av_free(l->Y1); | |||||
av_free(l->U1); | |||||
av_free(l->V1); | |||||
av_free(l->Y2); | |||||
av_free(l->U2); | |||||
av_free(l->V2); | |||||
av_free(l->Y1_base); | |||||
av_free(l->U1_base); | |||||
av_free(l->V1_base); | |||||
av_free(l->Y2_base); | |||||
av_free(l->U2_base); | |||||
av_free(l->V2_base); | |||||
} | } | ||||
av_freep(&l->buffer); | av_freep(&l->buffer); | ||||
l->buffer_size = 0; | l->buffer_size = 0; | ||||
@@ -1803,6 +1803,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st) | |||||
unsigned int stps_index = 0; | unsigned int stps_index = 0; | ||||
unsigned int i, j; | unsigned int i, j; | ||||
uint64_t stream_size = 0; | uint64_t stream_size = 0; | ||||
AVIndexEntry *mem; | |||||
/* adjust first dts according to edit list */ | /* adjust first dts according to edit list */ | ||||
if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) { | if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) { | ||||
@@ -1832,12 +1833,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st) | |||||
if (!sc->sample_count || st->nb_index_entries) | if (!sc->sample_count || st->nb_index_entries) | ||||
return; | return; | ||||
if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries)) | |||||
if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) | |||||
return; | return; | ||||
st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries)); | |||||
if (!st->index_entries) | |||||
mem = av_realloc(st->index_entries, (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries)); | |||||
if (!mem) | |||||
return; | return; | ||||
st->index_entries_allocated_size = sc->sample_count*sizeof(*st->index_entries); | |||||
st->index_entries = mem; | |||||
st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries); | |||||
for (i = 0; i < sc->chunk_count; i++) { | for (i = 0; i < sc->chunk_count; i++) { | ||||
current_offset = sc->chunk_offsets[i]; | current_offset = sc->chunk_offsets[i]; | ||||
@@ -1921,12 +1923,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st) | |||||
} | } | ||||
av_dlog(mov->fc, "chunk count %d\n", total); | av_dlog(mov->fc, "chunk count %d\n", total); | ||||
if (total >= UINT_MAX / sizeof(*st->index_entries)) | |||||
if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) | |||||
return; | return; | ||||
st->index_entries = av_malloc(total*sizeof(*st->index_entries)); | |||||
if (!st->index_entries) | |||||
mem = av_realloc(st->index_entries, (st->nb_index_entries + total) * sizeof(*st->index_entries)); | |||||
if (!mem) | |||||
return; | return; | ||||
st->index_entries_allocated_size = total*sizeof(*st->index_entries); | |||||
st->index_entries = mem; | |||||
st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries); | |||||
// populate index | // populate index | ||||
for (i = 0; i < sc->chunk_count; i++) { | for (i = 0; i < sc->chunk_count; i++) { | ||||