Originally committed as revision 14372 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
| @@ -508,7 +508,7 @@ retry: | |||||
| s->padding_bug_score= 256*256*256*64; | s->padding_bug_score= 256*256*256*64; | ||||
| /* very ugly XVID padding bug detection FIXME/XXX solve this differently | /* very ugly XVID padding bug detection FIXME/XXX solve this differently | ||||
| * lets hope this at least works | |||||
| * Let us hope this at least works. | |||||
| */ | */ | ||||
| if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==0 | if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==0 | ||||
| && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0) | && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0) | ||||
| @@ -24,7 +24,7 @@ | |||||
| * Motion estimation template. | * Motion estimation template. | ||||
| */ | */ | ||||
| //lets hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...) | |||||
| //Let us hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...) | |||||
| #define LOAD_COMMON\ | #define LOAD_COMMON\ | ||||
| uint32_t av_unused * const score_map= c->score_map;\ | uint32_t av_unused * const score_map= c->score_map;\ | ||||
| const int av_unused xmin= c->xmin;\ | const int av_unused xmin= c->xmin;\ | ||||
| @@ -1396,7 +1396,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, | |||||
| linesize = s->current_picture.linesize[0] << field_based; | linesize = s->current_picture.linesize[0] << field_based; | ||||
| uvlinesize = s->current_picture.linesize[1] << field_based; | uvlinesize = s->current_picture.linesize[1] << field_based; | ||||
| if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway | |||||
| if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway | |||||
| motion_x/=2; | motion_x/=2; | ||||
| motion_y/=2; | motion_y/=2; | ||||
| } | } | ||||
| @@ -230,7 +230,7 @@ void av_resample_compensate(AVResampleContext *c, int sample_delta, int compensa | |||||
| * @param consumed the number of samples of src which have been consumed are returned here | * @param consumed the number of samples of src which have been consumed are returned here | ||||
| * @param src_size the number of unconsumed samples available | * @param src_size the number of unconsumed samples available | ||||
| * @param dst_size the amount of space in samples available in dst | * @param dst_size the amount of space in samples available in dst | ||||
| * @param update_ctx if this is 0 then the context wont be modified, that way several channels can be resampled with the same context | |||||
| * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. | |||||
| * @return the number of samples written in dst or -1 if an error occurred | * @return the number of samples written in dst or -1 if an error occurred | ||||
| */ | */ | ||||
| int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx){ | int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx){ | ||||
| @@ -155,7 +155,7 @@ static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
| if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | ||||
| return -1; | return -1; | ||||
| /* Now lets prep the hardware */ | |||||
| /* Now let us prep the hardware. */ | |||||
| dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */ | dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */ | ||||
| if (!dc1394->handle) { | if (!dc1394->handle) { | ||||
| av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */); | av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */); | ||||
| @@ -248,7 +248,7 @@ static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
| if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | ||||
| return -1; | return -1; | ||||
| /* Now lets prep the hardware */ | |||||
| /* Now let us prep the hardware. */ | |||||
| dc1394->d = dc1394_new(); | dc1394->d = dc1394_new(); | ||||
| dc1394_camera_enumerate (dc1394->d, &list); | dc1394_camera_enumerate (dc1394->d, &list); | ||||
| if ( !list || list->num == 0) { | if ( !list || list->num == 0) { | ||||
| @@ -255,7 +255,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st, | |||||
| av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames); | av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames); | ||||
| av_fifo_generic_write(&c->audio_data[i], data, data_size, NULL); | av_fifo_generic_write(&c->audio_data[i], data, data_size, NULL); | ||||
| /* Lets see if we've got enough audio for one DV frame */ | |||||
| /* Let us see if we've got enough audio for one DV frame. */ | |||||
| c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i); | c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i); | ||||
| break; | break; | ||||
| @@ -263,7 +263,7 @@ int dv_assemble_frame(DVMuxContext *c, AVStream* st, | |||||
| break; | break; | ||||
| } | } | ||||
| /* Lets see if we have enough data to construct one DV frame */ | |||||
| /* Let us see if we have enough data to construct one DV frame. */ | |||||
| if (c->has_video == 1 && c->has_audio + 1 == 1<<c->n_ast) { | if (c->has_video == 1 && c->has_audio + 1 == 1<<c->n_ast) { | ||||
| dv_inject_metadata(c, *frame); | dv_inject_metadata(c, *frame); | ||||
| c->has_audio = 0; | c->has_audio = 0; | ||||
| @@ -74,7 +74,7 @@ static inline av_const SoftFloat av_normalize1_sf(SoftFloat a){ | |||||
| /** | /** | ||||
| * | * | ||||
| * @return will not be more denormalized then a+b, so if either input is | * @return will not be more denormalized then a+b, so if either input is | ||||
| * normalized then the output wont be worse then the other input | |||||
| * normalized then the output will not be worse then the other input | |||||
| * if both are normalized then the output will be normalized | * if both are normalized then the output will be normalized | ||||
| */ | */ | ||||
| static inline av_const SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){ | static inline av_const SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){ | ||||