| @@ -223,7 +223,7 @@ static int ac3_parse_header(AC3DecodeContext *s) | |||
| int i; | |||
| /* read the rest of the bsi. read twice for dual mono mode. */ | |||
| i = !(s->channel_mode); | |||
| i = !s->channel_mode; | |||
| do { | |||
| skip_bits(gbc, 5); // skip dialog normalization | |||
| if (get_bits1(gbc)) | |||
| @@ -792,7 +792,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) | |||
| } | |||
| /* dynamic range */ | |||
| i = !(s->channel_mode); | |||
| i = !s->channel_mode; | |||
| do { | |||
| if (get_bits1(gbc)) { | |||
| s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)] - 1.0) * | |||
| @@ -658,8 +658,8 @@ void ff_cavs_init_top_lines(AVSContext *h) { | |||
| h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(cavs_vector)); | |||
| h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(*h->top_pred_Y)); | |||
| h->top_border_y = av_malloc((h->mb_width+1)*16); | |||
| h->top_border_u = av_malloc((h->mb_width)*10); | |||
| h->top_border_v = av_malloc((h->mb_width)*10); | |||
| h->top_border_u = av_malloc( h->mb_width * 10); | |||
| h->top_border_v = av_malloc( h->mb_width * 10); | |||
| /* alloc space for co-located MVs and types */ | |||
| h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(cavs_vector)); | |||
| @@ -490,7 +490,7 @@ static int decode_pic(AVSContext *h) { | |||
| skip_bits(&s->gb,24);//time_code | |||
| /* old sample clips were all progressive and no low_delay, | |||
| bump stream revision if detected otherwise */ | |||
| if((s->low_delay) || !(show_bits(&s->gb,9) & 1)) | |||
| if (s->low_delay || !(show_bits(&s->gb,9) & 1)) | |||
| h->stream_revision = 1; | |||
| /* similarly test top_field_first and repeat_first_field */ | |||
| else if(show_bits(&s->gb,11) & 3) | |||
| @@ -109,8 +109,8 @@ static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t | |||
| for(y=0; y<8; y++){ | |||
| int x; | |||
| for(x=0; x<8; x++){ | |||
| dest_cb[x + y*(s->uvlinesize)]= dcu/8; | |||
| dest_cr[x + y*(s->uvlinesize)]= dcv/8; | |||
| dest_cb[x + y * s->uvlinesize] = dcu / 8; | |||
| dest_cr[x + y * s->uvlinesize] = dcv / 8; | |||
| } | |||
| } | |||
| } | |||
| @@ -1092,8 +1092,8 @@ void ff_er_frame_end(MpegEncContext *s){ | |||
| for(y=0; y<8; y++){ | |||
| int x; | |||
| for(x=0; x<8; x++){ | |||
| dcu+=dest_cb[x + y*(s->uvlinesize)]; | |||
| dcv+=dest_cr[x + y*(s->uvlinesize)]; | |||
| dcu += dest_cb[x + y * s->uvlinesize]; | |||
| dcv += dest_cr[x + y * s->uvlinesize]; | |||
| } | |||
| } | |||
| s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3; | |||
| @@ -136,7 +136,7 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| preset = GetDiracVideoFormatPreset(avccontext); | |||
| /* initialize the encoder context */ | |||
| dirac_encoder_context_init(&(p_dirac_params->enc_ctx), preset); | |||
| dirac_encoder_context_init(&p_dirac_params->enc_ctx, preset); | |||
| p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt); | |||
| @@ -199,7 +199,7 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| * irrespective of the type of source material */ | |||
| p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1; | |||
| p_dirac_params->p_encoder = dirac_encoder_init(&(p_dirac_params->enc_ctx), | |||
| p_dirac_params->p_encoder = dirac_encoder_init(&p_dirac_params->enc_ctx, | |||
| verbose); | |||
| if (!p_dirac_params->p_encoder) { | |||
| @@ -221,7 +221,7 @@ static void DiracFreeFrame(void *data) | |||
| { | |||
| DiracSchroEncodedFrame *enc_frame = data; | |||
| av_freep(&(enc_frame->p_encbuf)); | |||
| av_freep(&enc_frame->p_encbuf); | |||
| av_free(enc_frame); | |||
| } | |||
| @@ -258,7 +258,7 @@ static void SchroedingerFreeFrame(void *data) | |||
| { | |||
| DiracSchroEncodedFrame *enc_frame = data; | |||
| av_freep(&(enc_frame->p_encbuf)); | |||
| av_freep(&enc_frame->p_encbuf); | |||
| av_free(enc_frame); | |||
| } | |||
| @@ -270,7 +270,7 @@ static av_cold int xvid_encode_init(AVCodecContext *avctx) { | |||
| rc2pass2.version = XVID_VERSION; | |||
| rc2pass2.bitrate = avctx->bit_rate; | |||
| fd = ff_tempfile("xvidff.", &(x->twopassfile)); | |||
| fd = ff_tempfile("xvidff.", &x->twopassfile); | |||
| if( fd == -1 ) { | |||
| av_log(avctx, AV_LOG_ERROR, | |||
| "Xvid: Cannot write 2-pass pipe\n"); | |||
| @@ -414,7 +414,7 @@ static int xvid_encode_frame(AVCodecContext *avctx, | |||
| char *tmp; | |||
| struct xvid_context *x = avctx->priv_data; | |||
| AVFrame *picture = data; | |||
| AVFrame *p = &(x->encoded_picture); | |||
| AVFrame *p = &x->encoded_picture; | |||
| xvid_enc_frame_t xvid_enc_frame; | |||
| xvid_enc_stats_t xvid_enc_stats; | |||
| @@ -575,7 +575,7 @@ int xvid_strip_vol_header(AVCodecContext *avctx, | |||
| } | |||
| /* Less dangerous now, memmove properly copies the two | |||
| chunks of overlapping data */ | |||
| memmove(frame, &(frame[vo_len]), frame_len - vo_len); | |||
| memmove(frame, &frame[vo_len], frame_len - vo_len); | |||
| return frame_len - vo_len; | |||
| } else | |||
| return frame_len; | |||
| @@ -1235,7 +1235,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) | |||
| /* low_delay may be forced, in this case we will have B-frames | |||
| * that behave like P-frames. */ | |||
| avctx->has_b_frames = !(s->low_delay); | |||
| avctx->has_b_frames = !s->low_delay; | |||
| assert((avctx->sub_id == 1) == (avctx->codec_id == CODEC_ID_MPEG1VIDEO)); | |||
| if (avctx->codec_id == CODEC_ID_MPEG1VIDEO) { | |||
| @@ -705,8 +705,8 @@ av_cold int MPV_common_init(MpegEncContext *s) | |||
| mv_table_size = (s->mb_height + 2) * s->mb_stride + 1; | |||
| /* set chroma shifts */ | |||
| avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift), | |||
| &(s->chroma_y_shift) ); | |||
| avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, | |||
| &s->chroma_y_shift); | |||
| /* set default edge pos, will be overriden | |||
| * in decode_header if needed */ | |||
| @@ -2339,7 +2339,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], | |||
| } | |||
| dct_linesize = linesize << s->interlaced_dct; | |||
| dct_offset =(s->interlaced_dct)? linesize : linesize*block_size; | |||
| dct_offset = s->interlaced_dct ? linesize : linesize * block_size; | |||
| if(readable){ | |||
| dest_y= s->dest[0]; | |||
| @@ -2435,7 +2435,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], | |||
| }else{ | |||
| //chroma422 | |||
| dct_linesize = uvlinesize << s->interlaced_dct; | |||
| dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; | |||
| dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8; | |||
| add_dct(s, block[4], 4, dest_cb, dct_linesize); | |||
| add_dct(s, block[5], 5, dest_cr, dct_linesize); | |||
| @@ -2487,7 +2487,7 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], | |||
| }else{ | |||
| dct_linesize = uvlinesize << s->interlaced_dct; | |||
| dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; | |||
| dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8; | |||
| s->dsp.idct_put(dest_cb, dct_linesize, block[4]); | |||
| s->dsp.idct_put(dest_cr, dct_linesize, block[5]); | |||
| @@ -268,10 +268,10 @@ static int dct_quantize_altivec(MpegEncContext* s, | |||
| vec_ste(baseVector, 0, &oldBaseValue); | |||
| qmat = (vector signed int*)s->q_intra_matrix[qscale]; | |||
| biasAddr = &(s->intra_quant_bias); | |||
| biasAddr = &s->intra_quant_bias; | |||
| } else { | |||
| qmat = (vector signed int*)s->q_inter_matrix[qscale]; | |||
| biasAddr = &(s->inter_quant_bias); | |||
| biasAddr = &s->inter_quant_bias; | |||
| } | |||
| // Load the bias vector (We add 0.5 to the bias so that we're | |||
| @@ -361,8 +361,8 @@ static int dct_quantize_altivec(MpegEncContext* s, | |||
| vector signed int max_q_int, min_q_int; | |||
| vector signed short max_q, min_q; | |||
| LOAD4(max_q_int, &(s->max_qcoeff)); | |||
| LOAD4(min_q_int, &(s->min_qcoeff)); | |||
| LOAD4(max_q_int, &s->max_qcoeff); | |||
| LOAD4(min_q_int, &s->min_qcoeff); | |||
| max_q = vec_pack(max_q_int, max_q_int); | |||
| min_q = vec_pack(min_q_int, min_q_int); | |||
| @@ -829,7 +829,7 @@ static int frame_thread_init(AVCodecContext *avctx) | |||
| err = AVERROR(ENOMEM); | |||
| goto error; | |||
| } | |||
| *(copy->internal) = *(src->internal); | |||
| *copy->internal = *src->internal; | |||
| copy->internal->is_copy = 1; | |||
| if (codec->init_thread_copy) | |||
| @@ -88,7 +88,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| return -1; | |||
| } | |||
| zret = inflateReset(&(c->zstream)); | |||
| zret = inflateReset(&c->zstream); | |||
| if (zret != Z_OK) { | |||
| av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret); | |||
| return -1; | |||
| @@ -97,7 +97,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||
| c->zstream.avail_in = len; | |||
| c->zstream.next_out = c->decomp_buf; | |||
| c->zstream.avail_out = c->decomp_size; | |||
| zret = inflate(&(c->zstream), Z_FINISH); | |||
| zret = inflate(&c->zstream, Z_FINISH); | |||
| // Z_DATA_ERROR means empty picture | |||
| if ((zret != Z_OK) && (zret != Z_STREAM_END) && (zret != Z_DATA_ERROR)) { | |||
| av_log(avctx, AV_LOG_ERROR, "Inflate error: %d\n", zret); | |||
| @@ -143,7 +143,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| c->height = avctx->height; | |||
| // Needed if zlib unused or init aborted before inflateInit | |||
| memset(&(c->zstream), 0, sizeof(z_stream)); | |||
| memset(&c->zstream, 0, sizeof(z_stream)); | |||
| switch(avctx->bits_per_coded_sample){ | |||
| case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; | |||
| case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; | |||
| @@ -169,7 +169,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| c->zstream.zalloc = Z_NULL; | |||
| c->zstream.zfree = Z_NULL; | |||
| c->zstream.opaque = Z_NULL; | |||
| zret = inflateInit(&(c->zstream)); | |||
| zret = inflateInit(&c->zstream); | |||
| if (zret != Z_OK) { | |||
| av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); | |||
| return 1; | |||
| @@ -193,7 +193,7 @@ static av_cold int decode_end(AVCodecContext *avctx) | |||
| if (c->pic.data[0]) | |||
| avctx->release_buffer(avctx, &c->pic); | |||
| inflateEnd(&(c->zstream)); | |||
| inflateEnd(&c->zstream); | |||
| return 0; | |||
| } | |||
| @@ -5342,7 +5342,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) | |||
| if (v->profile == PROFILE_ADVANCED) | |||
| avctx->level = v->level; | |||
| avctx->has_b_frames = !!(avctx->max_b_frames); | |||
| avctx->has_b_frames = !!avctx->max_b_frames; | |||
| s->mb_width = (avctx->coded_width + 15) >> 4; | |||
| s->mb_height = (avctx->coded_height + 15) >> 4; | |||
| @@ -816,8 +816,7 @@ static void create_map(vorbis_context *vc, unsigned floor_number) | |||
| for (idx = 0; idx < n; ++idx) { | |||
| map[idx] = floor(BARK((vf->rate * idx) / (2.0f * n)) * | |||
| ((vf->bark_map_size) / | |||
| BARK(vf->rate / 2.0f))); | |||
| (vf->bark_map_size / BARK(vf->rate / 2.0f))); | |||
| if (vf->bark_map_size-1 < map[idx]) | |||
| map[idx] = vf->bark_map_size - 1; | |||
| } | |||
| @@ -975,7 +974,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) | |||
| int headers_len = avccontext->extradata_size; | |||
| uint8_t *header_start[3]; | |||
| int header_len[3]; | |||
| GetBitContext *gb = &(vc->gb); | |||
| GetBitContext *gb = &vc->gb; | |||
| int hdr_type, ret; | |||
| vc->avccontext = avccontext; | |||
| @@ -1615,7 +1614,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, | |||
| const uint8_t *buf = avpkt->data; | |||
| int buf_size = avpkt->size; | |||
| vorbis_context *vc = avccontext->priv_data; | |||
| GetBitContext *gb = &(vc->gb); | |||
| GetBitContext *gb = &vc->gb; | |||
| const float *channel_ptrs[255]; | |||
| int i, len, ret; | |||
| @@ -613,7 +613,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| c->bpp = avctx->bits_per_coded_sample; | |||
| // Needed if zlib unused or init aborted before inflateInit | |||
| memset(&(c->zstream), 0, sizeof(z_stream)); | |||
| memset(&c->zstream, 0, sizeof(z_stream)); | |||
| avctx->pix_fmt = PIX_FMT_RGB24; | |||
| c->decomp_size = (avctx->width + 255) * 4 * (avctx->height + 64); | |||
| @@ -630,7 +630,7 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||
| c->zstream.zalloc = Z_NULL; | |||
| c->zstream.zfree = Z_NULL; | |||
| c->zstream.opaque = Z_NULL; | |||
| zret = inflateInit(&(c->zstream)); | |||
| zret = inflateInit(&c->zstream); | |||
| if (zret != Z_OK) { | |||
| av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); | |||
| return 1; | |||
| @@ -654,7 +654,7 @@ static av_cold int decode_end(AVCodecContext *avctx) | |||
| if (c->pic.data[0]) | |||
| avctx->release_buffer(avctx, &c->pic); | |||
| inflateEnd(&(c->zstream)); | |||
| inflateEnd(&c->zstream); | |||
| av_freep(&c->cur); | |||
| av_freep(&c->prev); | |||
| @@ -269,7 +269,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||
| } | |||
| // Needed if zlib unused or init aborted before deflateInit | |||
| memset(&(c->zstream), 0, sizeof(z_stream)); | |||
| memset(&c->zstream, 0, sizeof(z_stream)); | |||
| c->comp_size = avctx->width * avctx->height + 1024 + | |||
| ((avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * ((avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * 2 + 4; | |||
| if ((c->work_buf = av_malloc(c->comp_size)) == NULL) { | |||
| @@ -294,7 +294,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||
| c->zstream.zalloc = Z_NULL; | |||
| c->zstream.zfree = Z_NULL; | |||
| c->zstream.opaque = Z_NULL; | |||
| zret = deflateInit(&(c->zstream), lvl); | |||
| zret = deflateInit(&c->zstream, lvl); | |||
| if (zret != Z_OK) { | |||
| av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); | |||
| return -1; | |||
| @@ -317,7 +317,7 @@ static av_cold int encode_end(AVCodecContext *avctx) | |||
| av_freep(&c->comp_buf); | |||
| av_freep(&c->work_buf); | |||
| deflateEnd(&(c->zstream)); | |||
| deflateEnd(&c->zstream); | |||
| av_freep(&c->prev); | |||
| return 0; | |||
| @@ -292,7 +292,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||
| if (bktr_init(s1->filename, width, height, s->standard, | |||
| &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0) { | |||
| &s->video_fd, &s->tuner_fd, -1, 0.0) < 0) { | |||
| ret = AVERROR(EIO); | |||
| goto out; | |||
| } | |||
| @@ -69,7 +69,7 @@ double ff_timefilter_update(TimeFilter *self, double system_time, double period) | |||
| loop_error = system_time - self->cycle_time; | |||
| /// update loop | |||
| self->cycle_time += FFMAX(self->feedback2_factor, 1.0/(self->count)) * loop_error; | |||
| self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error; | |||
| self->clock_period += self->feedback3_factor * loop_error / period; | |||
| } | |||
| return self->cycle_time; | |||
| @@ -147,7 +147,7 @@ static int request_frame(AVFilterLink *link) | |||
| static int poll_frame(AVFilterLink *link) | |||
| { | |||
| BufferSourceContext *c = link->src->priv; | |||
| return !!(c->buf); | |||
| return !!c->buf; | |||
| } | |||
| AVFilter avfilter_vsrc_buffer = { | |||
| @@ -168,7 +168,7 @@ static void flush_buffer(AVIOContext *s) | |||
| void avio_w8(AVIOContext *s, int b) | |||
| { | |||
| *(s->buf_ptr)++ = b; | |||
| *s->buf_ptr++ = b; | |||
| if (s->buf_ptr >= s->buf_end) | |||
| flush_buffer(s); | |||
| } | |||
| @@ -53,7 +53,7 @@ typedef struct MTVDemuxContext { | |||
| static int mtv_probe(AVProbeData *p) | |||
| { | |||
| /* Magic is 'AMV' */ | |||
| if(*(p->buf) != 'A' || *(p->buf+1) != 'M' || *(p->buf+2) != 'V') | |||
| if (*p->buf != 'A' || *(p->buf + 1) != 'M' || *(p->buf + 2) != 'V') | |||
| return 0; | |||
| /* Check for nonzero in bpp and (width|height) header fields */ | |||
| @@ -661,7 +661,7 @@ static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, | |||
| vst->videobufpos += len; | |||
| rm->remaining_len-= len; | |||
| if(type == 2 || (vst->videobufpos) == vst->videobufsize){ | |||
| if (type == 2 || vst->videobufpos == vst->videobufsize) { | |||
| vst->pkt.data[0] = vst->cur_slice-1; | |||
| *pkt= vst->pkt; | |||
| vst->pkt.data= NULL; | |||