* qatar/master: docs: use -bsf:[vas] instead of -[vas]bsf. mpegaudiodec: Prevent premature clipping of mp3 input buffer. lavf: move the packet keyframe setting code. oggenc: free comment header for all codecs lcl: error out if uncompressed input buffer is smaller than framesize. mjpeg: abort decoding if packet is too large. golomb: use HAVE_BITS_REMAINING() macro to prevent infloop on EOF. get_bits: add HAVE_BITS_REMAINING macro. lavf/output-example: use new audio encoding API correctly. lavf/output-example: more proper usage of the new API. tiff: Prevent overreads in the type_sizes array. tiff: Make the TIFF_LONG and TIFF_SHORT types unsigned. apetag: do not leak memory if avio_read() fails apetag: propagate errors. SBR DSP x86: implement SSE sbr_hf_g_filt SBR DSP x86: implement SSE sbr_sum_square_sse SBR DSP: use intptr_t for the ixh parameter. Conflicts: doc/bitstream_filters.texi doc/examples/muxing.c doc/ffmpeg.texi libavcodec/golomb.h libavcodec/x86/Makefile libavformat/oggenc.c Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.11
| @@ -71,7 +71,7 @@ stream (carrying the AVI1 header ID and lacking a DHT segment) to | |||||
| produce fully qualified JPEG images. | produce fully qualified JPEG images. | ||||
| @example | @example | ||||
| ffmpeg -i mjpeg-movie.avi -c:v copy -vbsf mjpeg2jpeg frame_%d.jpg | |||||
| ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg | |||||
| exiftran -i -9 frame*.jpg | exiftran -i -9 frame*.jpg | ||||
| ffmpeg -i frame_%d.jpg -c:v copy rotated.avi | ffmpeg -i frame_%d.jpg -c:v copy rotated.avi | ||||
| @end example | @end example | ||||
| @@ -52,8 +52,6 @@ static int sws_flags = SWS_BICUBIC; | |||||
| static float t, tincr, tincr2; | static float t, tincr, tincr2; | ||||
| static int16_t *samples; | static int16_t *samples; | ||||
| static uint8_t *audio_outbuf; | |||||
| static int audio_outbuf_size; | |||||
| static int audio_input_frame_size; | static int audio_input_frame_size; | ||||
| /* | /* | ||||
| @@ -63,8 +61,16 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
| { | { | ||||
| AVCodecContext *c; | AVCodecContext *c; | ||||
| AVStream *st; | AVStream *st; | ||||
| AVCodec *codec; | |||||
| /* find the audio encoder */ | |||||
| codec = avcodec_find_encoder(codec_id); | |||||
| if (!codec) { | |||||
| fprintf(stderr, "codec not found\n"); | |||||
| exit(1); | |||||
| } | |||||
| st = avformat_new_stream(oc, NULL); | |||||
| st = avformat_new_stream(oc, codec); | |||||
| if (!st) { | if (!st) { | ||||
| fprintf(stderr, "Could not alloc stream\n"); | fprintf(stderr, "Could not alloc stream\n"); | ||||
| exit(1); | exit(1); | ||||
| @@ -72,8 +78,6 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
| st->id = 1; | st->id = 1; | ||||
| c = st->codec; | c = st->codec; | ||||
| c->codec_id = codec_id; | |||||
| c->codec_type = AVMEDIA_TYPE_AUDIO; | |||||
| /* put sample parameters */ | /* put sample parameters */ | ||||
| c->sample_fmt = AV_SAMPLE_FMT_S16; | c->sample_fmt = AV_SAMPLE_FMT_S16; | ||||
| @@ -91,19 +95,11 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
| static void open_audio(AVFormatContext *oc, AVStream *st) | static void open_audio(AVFormatContext *oc, AVStream *st) | ||||
| { | { | ||||
| AVCodecContext *c; | AVCodecContext *c; | ||||
| AVCodec *codec; | |||||
| c = st->codec; | c = st->codec; | ||||
| /* find the audio encoder */ | |||||
| codec = avcodec_find_encoder(c->codec_id); | |||||
| if (!codec) { | |||||
| fprintf(stderr, "codec not found\n"); | |||||
| exit(1); | |||||
| } | |||||
| /* open it */ | /* open it */ | ||||
| if (avcodec_open2(c, codec, NULL) < 0) { | |||||
| if (avcodec_open2(c, NULL, NULL) < 0) { | |||||
| fprintf(stderr, "could not open codec\n"); | fprintf(stderr, "could not open codec\n"); | ||||
| exit(1); | exit(1); | ||||
| } | } | ||||
| @@ -114,27 +110,12 @@ static void open_audio(AVFormatContext *oc, AVStream *st) | |||||
| /* increment frequency by 110 Hz per second */ | /* increment frequency by 110 Hz per second */ | ||||
| tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; | ||||
| audio_outbuf_size = 10000; | |||||
| audio_outbuf = av_malloc(audio_outbuf_size); | |||||
| /* ugly hack for PCM codecs (will be removed ASAP with new PCM | |||||
| support to compute the input frame size in samples */ | |||||
| if (c->frame_size <= 1) { | |||||
| audio_input_frame_size = audio_outbuf_size / c->channels; | |||||
| switch(st->codec->codec_id) { | |||||
| case CODEC_ID_PCM_S16LE: | |||||
| case CODEC_ID_PCM_S16BE: | |||||
| case CODEC_ID_PCM_U16LE: | |||||
| case CODEC_ID_PCM_U16BE: | |||||
| audio_input_frame_size >>= 1; | |||||
| break; | |||||
| default: | |||||
| break; | |||||
| } | |||||
| } else { | |||||
| if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) | |||||
| audio_input_frame_size = 10000; | |||||
| else | |||||
| audio_input_frame_size = c->frame_size; | audio_input_frame_size = c->frame_size; | ||||
| } | |||||
| samples = av_malloc(audio_input_frame_size * 2 * c->channels); | |||||
| samples = av_malloc(audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | |||||
| * c->channels); | |||||
| } | } | ||||
| /* prepare a 16 bit dummy audio frame of 'frame_size' samples and | /* prepare a 16 bit dummy audio frame of 'frame_size' samples and | ||||
| @@ -158,19 +139,23 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) | |||||
| { | { | ||||
| AVCodecContext *c; | AVCodecContext *c; | ||||
| AVPacket pkt; | AVPacket pkt; | ||||
| av_init_packet(&pkt); | |||||
| AVFrame *frame = avcodec_alloc_frame(); | |||||
| int got_packet; | |||||
| av_init_packet(&pkt); | |||||
| c = st->codec; | c = st->codec; | ||||
| get_audio_frame(samples, audio_input_frame_size, c->channels); | get_audio_frame(samples, audio_input_frame_size, c->channels); | ||||
| frame->nb_samples = audio_input_frame_size; | |||||
| avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (uint8_t *)samples, | |||||
| audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) | |||||
| * c->channels, 1); | |||||
| pkt.size = avcodec_encode_audio2(c, audio_outbuf, audio_outbuf_size, samples); | |||||
| avcodec_encode_audio2(c, &pkt, frame, &got_packet); | |||||
| if (!got_packet) | |||||
| return; | |||||
| if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE) | |||||
| pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); | |||||
| pkt.flags |= AV_PKT_FLAG_KEY; | |||||
| pkt.stream_index = st->index; | |||||
| pkt.data = audio_outbuf; | |||||
| pkt.stream_index= st->index; | |||||
| /* write the compressed frame in the media file */ | /* write the compressed frame in the media file */ | ||||
| if (av_interleaved_write_frame(oc, &pkt) != 0) { | if (av_interleaved_write_frame(oc, &pkt) != 0) { | ||||
| @@ -184,7 +169,6 @@ static void close_audio(AVFormatContext *oc, AVStream *st) | |||||
| avcodec_close(st->codec); | avcodec_close(st->codec); | ||||
| av_free(samples); | av_free(samples); | ||||
| av_free(audio_outbuf); | |||||
| } | } | ||||
| /**************************************************************/ | /**************************************************************/ | ||||
| @@ -201,7 +185,14 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) | |||||
| AVStream *st; | AVStream *st; | ||||
| AVCodec *codec; | AVCodec *codec; | ||||
| st = avformat_new_stream(oc, NULL); | |||||
| /* find the video encoder */ | |||||
| codec = avcodec_find_encoder(codec_id); | |||||
| if (!codec) { | |||||
| fprintf(stderr, "codec not found\n"); | |||||
| exit(1); | |||||
| } | |||||
| st = avformat_new_stream(oc, codec); | |||||
| if (!st) { | if (!st) { | ||||
| fprintf(stderr, "Could not alloc stream\n"); | fprintf(stderr, "Could not alloc stream\n"); | ||||
| exit(1); | exit(1); | ||||
| @@ -271,20 +262,12 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) | |||||
| static void open_video(AVFormatContext *oc, AVStream *st) | static void open_video(AVFormatContext *oc, AVStream *st) | ||||
| { | { | ||||
| AVCodec *codec; | |||||
| AVCodecContext *c; | AVCodecContext *c; | ||||
| c = st->codec; | c = st->codec; | ||||
| /* find the video encoder */ | |||||
| codec = avcodec_find_encoder(c->codec_id); | |||||
| if (!codec) { | |||||
| fprintf(stderr, "codec not found\n"); | |||||
| exit(1); | |||||
| } | |||||
| /* open the codec */ | /* open the codec */ | ||||
| if (avcodec_open2(c, codec, NULL) < 0) { | |||||
| if (avcodec_open2(c, NULL, NULL) < 0) { | |||||
| fprintf(stderr, "could not open codec\n"); | fprintf(stderr, "could not open codec\n"); | ||||
| exit(1); | exit(1); | ||||
| } | } | ||||
| @@ -1019,10 +1019,10 @@ Set bitstream filters for matching streams. @var{bistream_filters} is | |||||
| a comma-separated list of bitstream filters. Use the @code{-bsfs} option | a comma-separated list of bitstream filters. Use the @code{-bsfs} option | ||||
| to get the list of bitstream filters. | to get the list of bitstream filters. | ||||
| @example | @example | ||||
| ffmpeg -i h264.mp4 -c:v copy -vbsf h264_mp4toannexb -an out.h264 | |||||
| ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264 | |||||
| @end example | @end example | ||||
| @example | @example | ||||
| ffmpeg -i file.mov -an -vn -sbsf mov2textsub -c:s copy -f rawvideo sub.txt | |||||
| ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt | |||||
| @end example | @end example | ||||
| @item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream}) | @item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream}) | ||||
| @@ -30,7 +30,7 @@ void ff_sbr_qmf_post_shuffle_neon(float W[32][2], const float *z); | |||||
| void ff_sbr_qmf_deint_neg_neon(float *v, const float *src); | void ff_sbr_qmf_deint_neg_neon(float *v, const float *src); | ||||
| void ff_sbr_qmf_deint_bfly_neon(float *v, const float *src0, const float *src1); | void ff_sbr_qmf_deint_bfly_neon(float *v, const float *src0, const float *src1); | ||||
| void ff_sbr_hf_g_filt_neon(float (*Y)[2], const float (*X_high)[40][2], | void ff_sbr_hf_g_filt_neon(float (*Y)[2], const float (*X_high)[40][2], | ||||
| const float *g_filt, int m_max, int ixh); | |||||
| const float *g_filt, int m_max, intptr_t ixh); | |||||
| void ff_sbr_hf_gen_neon(float (*X_high)[2], const float (*X_low)[2], | void ff_sbr_hf_gen_neon(float (*X_high)[2], const float (*X_low)[2], | ||||
| const float alpha0[2], const float alpha1[2], | const float alpha0[2], const float alpha1[2], | ||||
| float bw, int start, int end); | float bw, int start, int end); | ||||
| @@ -118,10 +118,23 @@ for examples see get_bits, show_bits, skip_bits, get_vlc | |||||
| # define MIN_CACHE_BITS 25 | # define MIN_CACHE_BITS 25 | ||||
| #endif | #endif | ||||
| #if UNCHECKED_BITSTREAM_READER | |||||
| #define OPEN_READER(name, gb) \ | #define OPEN_READER(name, gb) \ | ||||
| unsigned int name##_index = (gb)->index; \ | unsigned int name##_index = (gb)->index; \ | ||||
| av_unused unsigned int name##_cache | av_unused unsigned int name##_cache | ||||
| #define HAVE_BITS_REMAINING(name, gb) 1 | |||||
| #else | |||||
| #define OPEN_READER(name, gb) \ | |||||
| unsigned int name##_index = (gb)->index; \ | |||||
| unsigned int av_unused name##_cache = 0; \ | |||||
| unsigned int av_unused name##_size_plus8 = \ | |||||
| (gb)->size_in_bits_plus8 | |||||
| #define HAVE_BITS_REMAINING(name, gb) \ | |||||
| name##_index < name##_size_plus8 | |||||
| #endif | |||||
| #define CLOSE_READER(name, gb) (gb)->index = name##_index | #define CLOSE_READER(name, gb) (gb)->index = name##_index | ||||
| #ifdef BITSTREAM_READER_LE | #ifdef BITSTREAM_READER_LE | ||||
| @@ -154,7 +167,7 @@ for examples see get_bits, show_bits, skip_bits, get_vlc | |||||
| # define SKIP_COUNTER(name, gb, num) name##_index += (num) | # define SKIP_COUNTER(name, gb, num) name##_index += (num) | ||||
| #else | #else | ||||
| # define SKIP_COUNTER(name, gb, num) \ | # define SKIP_COUNTER(name, gb, num) \ | ||||
| name##_index = FFMIN((gb)->size_in_bits_plus8, name##_index + (num)) | |||||
| name##_index = FFMIN(name##_size_plus8, name##_index + (num)) | |||||
| #endif | #endif | ||||
| #define SKIP_BITS(name, gb, num) do { \ | #define SKIP_BITS(name, gb, num) do { \ | ||||
| @@ -135,7 +135,7 @@ static inline int svq3_get_ue_golomb(GetBitContext *gb){ | |||||
| ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf]; | ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf]; | ||||
| UPDATE_CACHE(re, gb); | UPDATE_CACHE(re, gb); | ||||
| buf = GET_CACHE(re, gb); | buf = GET_CACHE(re, gb); | ||||
| } while(ret<0x8000000U); | |||||
| } while (ret<0x8000000U && HAVE_BITS_REMAINING(re, gb)); | |||||
| CLOSE_READER(re, gb); | CLOSE_READER(re, gb); | ||||
| return ret - 1; | return ret - 1; | ||||
| @@ -229,8 +229,29 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
| len = mszh_dlen; | len = mszh_dlen; | ||||
| } | } | ||||
| break; | break; | ||||
| case COMP_MSZH_NOCOMP: | |||||
| case COMP_MSZH_NOCOMP: { | |||||
| int bppx2; | |||||
| switch (c->imgtype) { | |||||
| case IMGTYPE_YUV111: | |||||
| case IMGTYPE_RGB24: | |||||
| bppx2 = 6; | |||||
| break; | |||||
| case IMGTYPE_YUV422: | |||||
| case IMGTYPE_YUV211: | |||||
| bppx2 = 4; | |||||
| break; | |||||
| case IMGTYPE_YUV411: | |||||
| case IMGTYPE_YUV420: | |||||
| bppx2 = 3; | |||||
| break; | |||||
| default: | |||||
| bppx2 = 0; // will error out below | |||||
| break; | |||||
| } | |||||
| if (len < ((width * height * bppx2) >> 1)) | |||||
| return AVERROR_INVALIDDATA; | |||||
| break; | break; | ||||
| } | |||||
| default: | default: | ||||
| av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n"); | av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n"); | ||||
| return -1; | return -1; | ||||
| @@ -1570,6 +1570,10 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, | |||||
| /* EOF */ | /* EOF */ | ||||
| if (start_code < 0) { | if (start_code < 0) { | ||||
| goto the_end; | goto the_end; | ||||
| } else if (unescaped_buf_size > (1U<<29)) { | |||||
| av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n", | |||||
| start_code, unescaped_buf_ptr, buf_size); | |||||
| return AVERROR_INVALIDDATA; | |||||
| } else { | } else { | ||||
| av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n", | av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n", | ||||
| start_code, buf_end - buf_ptr); | start_code, buf_end - buf_ptr); | ||||
| @@ -42,6 +42,7 @@ | |||||
| #define BACKSTEP_SIZE 512 | #define BACKSTEP_SIZE 512 | ||||
| #define EXTRABYTES 24 | #define EXTRABYTES 24 | ||||
| #define LAST_BUF_SIZE 2 * BACKSTEP_SIZE + EXTRABYTES | |||||
| /* layer 3 "granule" */ | /* layer 3 "granule" */ | ||||
| typedef struct GranuleDef { | typedef struct GranuleDef { | ||||
| @@ -65,7 +66,7 @@ typedef struct GranuleDef { | |||||
| typedef struct MPADecodeContext { | typedef struct MPADecodeContext { | ||||
| MPA_DECODE_HEADER | MPA_DECODE_HEADER | ||||
| uint8_t last_buf[2 * BACKSTEP_SIZE + EXTRABYTES]; | |||||
| uint8_t last_buf[LAST_BUF_SIZE]; | |||||
| int last_buf_size; | int last_buf_size; | ||||
| /* next header (used in free format parsing) */ | /* next header (used in free format parsing) */ | ||||
| uint32_t free_format_next_header; | uint32_t free_format_next_header; | ||||
| @@ -1379,7 +1380,8 @@ static int mp_decode_layer3(MPADecodeContext *s) | |||||
| if (!s->adu_mode) { | if (!s->adu_mode) { | ||||
| const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3); | const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3); | ||||
| int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0, EXTRABYTES); | |||||
| int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0, | |||||
| FFMAX(0, LAST_BUF_SIZE - s->last_buf_size)); | |||||
| assert((get_bits_count(&s->gb) & 7) == 0); | assert((get_bits_count(&s->gb) & 7) == 0); | ||||
| /* now we get bits from the main_data_begin offset */ | /* now we get bits from the main_data_begin offset */ | ||||
| av_dlog(s->avctx, "seekback: %d\n", main_data_begin); | av_dlog(s->avctx, "seekback: %d\n", main_data_begin); | ||||
| @@ -151,7 +151,7 @@ static void sbr_hf_gen_c(float (*X_high)[2], const float (*X_low)[2], | |||||
| } | } | ||||
| static void sbr_hf_g_filt_c(float (*Y)[2], const float (*X_high)[40][2], | static void sbr_hf_g_filt_c(float (*Y)[2], const float (*X_high)[40][2], | ||||
| const float *g_filt, int m_max, int ixh) | |||||
| const float *g_filt, int m_max, intptr_t ixh) | |||||
| { | { | ||||
| int m; | int m; | ||||
| @@ -238,4 +238,6 @@ av_cold void ff_sbrdsp_init(SBRDSPContext *s) | |||||
| if (ARCH_ARM) | if (ARCH_ARM) | ||||
| ff_sbrdsp_init_arm(s); | ff_sbrdsp_init_arm(s); | ||||
| if (HAVE_MMX) | |||||
| ff_sbrdsp_init_x86(s); | |||||
| } | } | ||||
| @@ -21,6 +21,8 @@ | |||||
| #ifndef LIBAVCODEC_SBRDSP_H | #ifndef LIBAVCODEC_SBRDSP_H | ||||
| #define LIBAVCODEC_SBRDSP_H | #define LIBAVCODEC_SBRDSP_H | ||||
| #include <stdint.h> | |||||
| typedef struct SBRDSPContext { | typedef struct SBRDSPContext { | ||||
| void (*sum64x5)(float *z); | void (*sum64x5)(float *z); | ||||
| float (*sum_square)(float (*x)[2], int n); | float (*sum_square)(float (*x)[2], int n); | ||||
| @@ -34,7 +36,7 @@ typedef struct SBRDSPContext { | |||||
| const float alpha0[2], const float alpha1[2], | const float alpha0[2], const float alpha1[2], | ||||
| float bw, int start, int end); | float bw, int start, int end); | ||||
| void (*hf_g_filt)(float (*Y)[2], const float (*X_high)[40][2], | void (*hf_g_filt)(float (*Y)[2], const float (*X_high)[40][2], | ||||
| const float *g_filt, int m_max, int ixh); | |||||
| const float *g_filt, int m_max, intptr_t ixh); | |||||
| void (*hf_apply_noise[4])(float (*Y)[2], const float *s_m, | void (*hf_apply_noise[4])(float (*Y)[2], const float *s_m, | ||||
| const float *q_filt, int noise, | const float *q_filt, int noise, | ||||
| int kx, int m_max); | int kx, int m_max); | ||||
| @@ -44,5 +46,6 @@ extern const float ff_sbr_noise_table[][2]; | |||||
| void ff_sbrdsp_init(SBRDSPContext *s); | void ff_sbrdsp_init(SBRDSPContext *s); | ||||
| void ff_sbrdsp_init_arm(SBRDSPContext *s); | void ff_sbrdsp_init_arm(SBRDSPContext *s); | ||||
| void ff_sbrdsp_init_x86(SBRDSPContext *s); | |||||
| #endif | #endif | ||||
| @@ -58,24 +58,24 @@ typedef struct TiffContext { | |||||
| LZWState *lzw; | LZWState *lzw; | ||||
| } TiffContext; | } TiffContext; | ||||
| static int tget_short(const uint8_t **p, int le){ | |||||
| int v = le ? AV_RL16(*p) : AV_RB16(*p); | |||||
| static unsigned tget_short(const uint8_t **p, int le) { | |||||
| unsigned v = le ? AV_RL16(*p) : AV_RB16(*p); | |||||
| *p += 2; | *p += 2; | ||||
| return v; | return v; | ||||
| } | } | ||||
| static int tget_long(const uint8_t **p, int le){ | |||||
| int v = le ? AV_RL32(*p) : AV_RB32(*p); | |||||
| static unsigned tget_long(const uint8_t **p, int le) { | |||||
| unsigned v = le ? AV_RL32(*p) : AV_RB32(*p); | |||||
| *p += 4; | *p += 4; | ||||
| return v; | return v; | ||||
| } | } | ||||
| static int tget(const uint8_t **p, int type, int le){ | |||||
| static unsigned tget(const uint8_t **p, int type, int le) { | |||||
| switch(type){ | switch(type){ | ||||
| case TIFF_BYTE : return *(*p)++; | case TIFF_BYTE : return *(*p)++; | ||||
| case TIFF_SHORT: return tget_short(p, le); | case TIFF_SHORT: return tget_short(p, le); | ||||
| case TIFF_LONG : return tget_long (p, le); | case TIFF_LONG : return tget_long (p, le); | ||||
| default : return -1; | |||||
| default : return UINT_MAX; | |||||
| } | } | ||||
| } | } | ||||
| @@ -340,7 +340,7 @@ static int init_image(TiffContext *s) | |||||
| static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf) | static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf) | ||||
| { | { | ||||
| int tag, type, count, off, value = 0; | |||||
| unsigned tag, type, count, off, value = 0; | |||||
| int i, j; | int i, j; | ||||
| uint32_t *pal; | uint32_t *pal; | ||||
| const uint8_t *rp, *gp, *bp; | const uint8_t *rp, *gp, *bp; | ||||
| @@ -352,6 +352,11 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t * | |||||
| count = tget_long(&buf, s->le); | count = tget_long(&buf, s->le); | ||||
| off = tget_long(&buf, s->le); | off = tget_long(&buf, s->le); | ||||
| if (type == 0 || type >= FF_ARRAY_ELEMS(type_sizes)) { | |||||
| av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n", type); | |||||
| return 0; | |||||
| } | |||||
| if(count == 1){ | if(count == 1){ | ||||
| switch(type){ | switch(type){ | ||||
| case TIFF_BYTE: | case TIFF_BYTE: | ||||
| @@ -370,13 +375,15 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t * | |||||
| break; | break; | ||||
| } | } | ||||
| default: | default: | ||||
| value = -1; | |||||
| value = UINT_MAX; | |||||
| buf = start + off; | |||||
| } | |||||
| } else { | |||||
| if (count <= 4 && type_sizes[type] * count <= 4) { | |||||
| buf -= 4; | |||||
| } else { | |||||
| buf = start + off; | buf = start + off; | ||||
| } | } | ||||
| }else if(type_sizes[type] * count <= 4){ | |||||
| buf -= 4; | |||||
| }else{ | |||||
| buf = start + off; | |||||
| } | } | ||||
| if(buf && (buf < start || buf > end_buf)){ | if(buf && (buf < start || buf > end_buf)){ | ||||
| @@ -454,7 +461,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t * | |||||
| } | } | ||||
| break; | break; | ||||
| case TIFF_ROWSPERSTRIP: | case TIFF_ROWSPERSTRIP: | ||||
| if(type == TIFF_LONG && value == -1) | |||||
| if (type == TIFF_LONG && value == UINT_MAX) | |||||
| value = s->avctx->height; | value = s->avctx->height; | ||||
| if(value < 1){ | if(value < 1){ | ||||
| av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n"); | av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n"); | ||||
| @@ -53,6 +53,8 @@ YASM-OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp.o | |||||
| MMX-OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp-init.o | MMX-OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp-init.o | ||||
| YASM-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp.o | YASM-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp.o | ||||
| MMX-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp-init.o | MMX-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp-init.o | ||||
| MMX-OBJS-$(CONFIG_AAC_DECODER) += x86/sbrdsp_init.o | |||||
| YASM-OBJS-$(CONFIG_AAC_DECODER) += x86/sbrdsp.o | |||||
| MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o \ | MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o \ | ||||
| x86/dwt.o | x86/dwt.o | ||||
| YASM-OBJS-$(CONFIG_V210_DECODER) += x86/v210.o | YASM-OBJS-$(CONFIG_V210_DECODER) += x86/v210.o | ||||
| @@ -0,0 +1,114 @@ | |||||
| ;****************************************************************************** | |||||
| ;* AAC Spectral Band Replication decoding functions | |||||
| ;* Copyright (C) 2012 Christophe Gisquet <christophe.gisquet@gmail.com> | |||||
| ;* | |||||
| ;* This file is part of Libav. | |||||
| ;* | |||||
| ;* Libav is free software; you can redistribute it and/or | |||||
| ;* modify it under the terms of the GNU Lesser General Public | |||||
| ;* License as published by the Free Software Foundation; either | |||||
| ;* version 2.1 of the License, or (at your option) any later version. | |||||
| ;* | |||||
| ;* Libav is distributed in the hope that it will be useful, | |||||
| ;* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| ;* Lesser General Public License for more details. | |||||
| ;* | |||||
| ;* You should have received a copy of the GNU Lesser General Public | |||||
| ;* License along with Libav; if not, write to the Free Software | |||||
| ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| ;****************************************************************************** | |||||
| %include "x86inc.asm" | |||||
| %include "x86util.asm" | |||||
| ;SECTION_RODATA | |||||
| SECTION .text | |||||
| INIT_XMM sse | |||||
| cglobal sbr_sum_square, 2, 3, 6 | |||||
| mov r2, r1 | |||||
| xorps m0, m0 | |||||
| xorps m1, m1 | |||||
| sar r2, 3 | |||||
| jz .prepare | |||||
| .loop: | |||||
| movu m2, [r0 + 0] | |||||
| movu m3, [r0 + 16] | |||||
| movu m4, [r0 + 32] | |||||
| movu m5, [r0 + 48] | |||||
| mulps m2, m2 | |||||
| mulps m3, m3 | |||||
| mulps m4, m4 | |||||
| mulps m5, m5 | |||||
| addps m0, m2 | |||||
| addps m1, m3 | |||||
| addps m0, m4 | |||||
| addps m1, m5 | |||||
| add r0, 64 | |||||
| dec r2 | |||||
| jnz .loop | |||||
| .prepare: | |||||
| and r1, 7 | |||||
| sar r1, 1 | |||||
| jz .end | |||||
| ; len is a multiple of 2, thus there are at least 4 elements to process | |||||
| .endloop: | |||||
| movu m2, [r0] | |||||
| add r0, 16 | |||||
| mulps m2, m2 | |||||
| dec r1 | |||||
| addps m0, m2 | |||||
| jnz .endloop | |||||
| .end: | |||||
| addps m0, m1 | |||||
| movhlps m2, m0 | |||||
| addps m0, m2 | |||||
| movss m1, m0 | |||||
| shufps m0, m0, 1 | |||||
| addss m0, m1 | |||||
| %if ARCH_X86_64 == 0 | |||||
| movd r0m, m0 | |||||
| fld dword r0m | |||||
| %endif | |||||
| RET | |||||
| %define STEP 40*4*2 | |||||
| cglobal sbr_hf_g_filt, 5, 6, 5 | |||||
| lea r1, [r1 + 8*r4] ; offset by ixh elements into X_high | |||||
| mov r5, r3 | |||||
| and r3, 0xFC | |||||
| lea r2, [r2 + r3*4] | |||||
| lea r0, [r0 + r3*8] | |||||
| neg r3 | |||||
| .loop4: | |||||
| movq m0, [r2 + 4*r3 + 0] | |||||
| movq m1, [r2 + 4*r3 + 8] | |||||
| movq m2, [r1 + 0*STEP] | |||||
| movq m3, [r1 + 2*STEP] | |||||
| movhps m2, [r1 + 1*STEP] | |||||
| movhps m3, [r1 + 3*STEP] | |||||
| punpckldq m0, m0 | |||||
| punpckldq m1, m1 | |||||
| mulps m0, m2 | |||||
| mulps m1, m3 | |||||
| movu [r0 + 8*r3 + 0], m0 | |||||
| movu [r0 + 8*r3 + 16], m1 | |||||
| add r1, 4*STEP | |||||
| add r3, 4 | |||||
| jnz .loop4 | |||||
| and r5, 3 ; number of single element loops | |||||
| jz .end | |||||
| .loop1: ; element 0 and 1 can be computed at the same time | |||||
| movss m0, [r2] | |||||
| movq m2, [r1] | |||||
| punpckldq m0, m0 | |||||
| mulps m2, m0 | |||||
| movq [r0], m2 | |||||
| add r0, 8 | |||||
| add r2, 4 | |||||
| add r1, STEP | |||||
| dec r5 | |||||
| jnz .loop1 | |||||
| .end: | |||||
| RET | |||||
| @@ -0,0 +1,40 @@ | |||||
| /* | |||||
| * AAC Spectral Band Replication decoding functions | |||||
| * Copyright (c) 2012 Christophe Gisquet <christophe.gisquet@gmail.com> | |||||
| * | |||||
| * This file is part of Libav. | |||||
| * | |||||
| * Libav is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU Lesser General Public | |||||
| * License as published by the Free Software Foundation; either | |||||
| * version 2.1 of the License, or (at your option) any later version. | |||||
| * | |||||
| * Libav is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
| * Lesser General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU Lesser General Public | |||||
| * License along with Libav; if not, write to the Free Software | |||||
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||||
| */ | |||||
| #include "config.h" | |||||
| #include "libavutil/cpu.h" | |||||
| #include "libavcodec/sbrdsp.h" | |||||
| float ff_sbr_sum_square_sse(float (*x)[2], int n); | |||||
| void ff_sbr_hf_g_filt_sse(float (*Y)[2], const float (*X_high)[40][2], | |||||
| const float *g_filt, int m_max, intptr_t ixh); | |||||
| void ff_sbrdsp_init_x86(SBRDSPContext *s) | |||||
| { | |||||
| if (HAVE_YASM) { | |||||
| int mm_flags = av_get_cpu_flags(); | |||||
| if (mm_flags & AV_CPU_FLAG_SSE) { | |||||
| s->sum_square = ff_sbr_sum_square_sse; | |||||
| s->hf_g_filt = ff_sbr_hf_g_filt_sse; | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -75,6 +75,10 @@ static int ape_tag_read_field(AVFormatContext *s) | |||||
| if (!value) | if (!value) | ||||
| return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
| c = avio_read(pb, value, size); | c = avio_read(pb, value, size); | ||||
| if (c < 0) { | |||||
| av_free(value); | |||||
| return c; | |||||
| } | |||||
| value[c] = 0; | value[c] = 0; | ||||
| av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); | av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); | ||||
| } | } | ||||
| @@ -420,10 +420,10 @@ static int ogg_write_header(AVFormatContext *s) | |||||
| p = ogg_write_vorbiscomment(7, st->codec->flags & CODEC_FLAG_BITEXACT, | p = ogg_write_vorbiscomment(7, st->codec->flags & CODEC_FLAG_BITEXACT, | ||||
| &oggstream->header_len[1], &s->metadata, | &oggstream->header_len[1], &s->metadata, | ||||
| framing_bit); | framing_bit); | ||||
| oggstream->header[1] = p; | |||||
| if (!p) | if (!p) | ||||
| return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
| oggstream->header[1] = p; | |||||
| bytestream_put_byte(&p, header_type); | bytestream_put_byte(&p, header_type); | ||||
| bytestream_put_buffer(&p, cstr, 6); | bytestream_put_buffer(&p, cstr, 6); | ||||
| @@ -529,10 +529,8 @@ static int ogg_write_trailer(AVFormatContext *s) | |||||
| if (st->codec->codec_id == CODEC_ID_FLAC || | if (st->codec->codec_id == CODEC_ID_FLAC || | ||||
| st->codec->codec_id == CODEC_ID_SPEEX) { | st->codec->codec_id == CODEC_ID_SPEEX) { | ||||
| av_freep(&oggstream->header[0]); | av_freep(&oggstream->header[0]); | ||||
| av_freep(&oggstream->header[1]); | |||||
| } | } | ||||
| else | |||||
| av_freep(&oggstream->header[1]); | |||||
| av_freep(&oggstream->header[1]); | |||||
| av_freep(&st->priv_data); | av_freep(&st->priv_data); | ||||
| } | } | ||||
| return 0; | return 0; | ||||
| @@ -1044,14 +1044,6 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st, | |||||
| /* update flags */ | /* update flags */ | ||||
| if(is_intra_only(st->codec)) | if(is_intra_only(st->codec)) | ||||
| pkt->flags |= AV_PKT_FLAG_KEY; | pkt->flags |= AV_PKT_FLAG_KEY; | ||||
| else if (pc) { | |||||
| pkt->flags = 0; | |||||
| /* keyframe computation */ | |||||
| if (pc->key_frame == 1) | |||||
| pkt->flags |= AV_PKT_FLAG_KEY; | |||||
| else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I) | |||||
| pkt->flags |= AV_PKT_FLAG_KEY; | |||||
| } | |||||
| if (pc) | if (pc) | ||||
| pkt->convergence_duration = pc->convergence_duration; | pkt->convergence_duration = pc->convergence_duration; | ||||
| } | } | ||||
| @@ -1116,6 +1108,10 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) | |||||
| pkt->pts = st->parser->pts; | pkt->pts = st->parser->pts; | ||||
| pkt->dts = st->parser->dts; | pkt->dts = st->parser->dts; | ||||
| pkt->pos = st->parser->pos; | pkt->pos = st->parser->pos; | ||||
| if (st->parser->key_frame == 1 || | |||||
| (st->parser->key_frame == -1 && | |||||
| st->parser->pict_type == AV_PICTURE_TYPE_I)) | |||||
| pkt->flags |= AV_PKT_FLAG_KEY; | |||||
| if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){ | if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){ | ||||
| s->cur_st = NULL; | s->cur_st = NULL; | ||||
| pkt->destruct= st->cur_pkt.destruct; | pkt->destruct= st->cur_pkt.destruct; | ||||