| @@ -216,7 +216,7 @@ static const float psy_fir_coeffs[] = { | |||||
| }; | }; | ||||
| /** | /** | ||||
| * calculates the attack threshold for ABR from the above table for the LAME psy model | |||||
| * Calculate the ABR attack threshold from the above LAME psymodel table. | |||||
| */ | */ | ||||
| static float lame_calc_attack_threshold(int bitrate) | static float lame_calc_attack_threshold(int bitrate) | ||||
| { | { | ||||
| @@ -111,7 +111,7 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx) | |||||
| /** | /** | ||||
| * Decode the frame header in the "MIME/storage" format. This format | * Decode the frame header in the "MIME/storage" format. This format | ||||
| * is simpler and does not carry the auxiliary information of the frame | |||||
| * is simpler and does not carry the auxiliary frame information. | |||||
| * | * | ||||
| * @param[in] ctx The Context | * @param[in] ctx The Context | ||||
| * @param[in] buf Pointer to the input buffer | * @param[in] buf Pointer to the input buffer | ||||
| @@ -133,7 +133,7 @@ static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf) | |||||
| } | } | ||||
| /** | /** | ||||
| * Decodes quantized ISF vectors using 36-bit indexes (6K60 mode only) | |||||
| * Decode quantized ISF vectors using 36-bit indexes (6K60 mode only). | |||||
| * | * | ||||
| * @param[in] ind Array of 5 indexes | * @param[in] ind Array of 5 indexes | ||||
| * @param[out] isf_q Buffer for isf_q[LP_ORDER] | * @param[out] isf_q Buffer for isf_q[LP_ORDER] | ||||
| @@ -160,7 +160,7 @@ static void decode_isf_indices_36b(uint16_t *ind, float *isf_q) | |||||
| } | } | ||||
| /** | /** | ||||
| * Decodes quantized ISF vectors using 46-bit indexes (except 6K60 mode) | |||||
| * Decode quantized ISF vectors using 46-bit indexes (except 6K60 mode). | |||||
| * | * | ||||
| * @param[in] ind Array of 7 indexes | * @param[in] ind Array of 7 indexes | ||||
| * @param[out] isf_q Buffer for isf_q[LP_ORDER] | * @param[out] isf_q Buffer for isf_q[LP_ORDER] | ||||
| @@ -193,8 +193,8 @@ static void decode_isf_indices_46b(uint16_t *ind, float *isf_q) | |||||
| } | } | ||||
| /** | /** | ||||
| * Apply mean and past ISF values using the prediction factor | |||||
| * Updates past ISF vector | |||||
| * Apply mean and past ISF values using the prediction factor. | |||||
| * Updates past ISF vector. | |||||
| * | * | ||||
| * @param[in,out] isf_q Current quantized ISF | * @param[in,out] isf_q Current quantized ISF | ||||
| * @param[in,out] isf_past Past quantized ISF | * @param[in,out] isf_past Past quantized ISF | ||||
| @@ -215,7 +215,7 @@ static void isf_add_mean_and_past(float *isf_q, float *isf_past) | |||||
| /** | /** | ||||
| * Interpolate the fourth ISP vector from current and past frames | * Interpolate the fourth ISP vector from current and past frames | ||||
| * to obtain a ISP vector for each subframe | |||||
| * to obtain an ISP vector for each subframe. | |||||
| * | * | ||||
| * @param[in,out] isp_q ISPs for each subframe | * @param[in,out] isp_q ISPs for each subframe | ||||
| * @param[in] isp4_past Past ISP for subframe 4 | * @param[in] isp4_past Past ISP for subframe 4 | ||||
| @@ -232,9 +232,9 @@ static void interpolate_isp(double isp_q[4][LP_ORDER], const double *isp4_past) | |||||
| } | } | ||||
| /** | /** | ||||
| * Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes) | |||||
| * Calculate integer lag and fractional lag always using 1/4 resolution | |||||
| * In 1st and 3rd subframes the index is relative to last subframe integer lag | |||||
| * Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes). | |||||
| * Calculate integer lag and fractional lag always using 1/4 resolution. | |||||
| * In 1st and 3rd subframes the index is relative to last subframe integer lag. | |||||
| * | * | ||||
| * @param[out] lag_int Decoded integer pitch lag | * @param[out] lag_int Decoded integer pitch lag | ||||
| * @param[out] lag_frac Decoded fractional pitch lag | * @param[out] lag_frac Decoded fractional pitch lag | ||||
| @@ -271,9 +271,9 @@ static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index, | |||||
| } | } | ||||
| /** | /** | ||||
| * Decode a adaptive codebook index into pitch lag for 8k85 and 6k60 modes | |||||
| * Description is analogous to decode_pitch_lag_high, but in 6k60 relative | |||||
| * index is used for all subframes except the first | |||||
| * Decode an adaptive codebook index into pitch lag for 8k85 and 6k60 modes. | |||||
| * The description is analogous to decode_pitch_lag_high, but in 6k60 the | |||||
| * relative index is used for all subframes except the first. | |||||
| */ | */ | ||||
| static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index, | static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index, | ||||
| uint8_t *base_lag_int, int subframe, enum Mode mode) | uint8_t *base_lag_int, int subframe, enum Mode mode) | ||||
| @@ -298,7 +298,7 @@ static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index, | |||||
| /** | /** | ||||
| * Find the pitch vector by interpolating the past excitation at the | * Find the pitch vector by interpolating the past excitation at the | ||||
| * pitch delay, which is obtained in this function | |||||
| * pitch delay, which is obtained in this function. | |||||
| * | * | ||||
| * @param[in,out] ctx The context | * @param[in,out] ctx The context | ||||
| * @param[in] amr_subframe Current subframe data | * @param[in] amr_subframe Current subframe data | ||||
| @@ -351,10 +351,10 @@ static void decode_pitch_vector(AMRWBContext *ctx, | |||||
| /** | /** | ||||
| * The next six functions decode_[i]p_track decode exactly i pulses | * The next six functions decode_[i]p_track decode exactly i pulses | ||||
| * positions and amplitudes (-1 or 1) in a subframe track using | * positions and amplitudes (-1 or 1) in a subframe track using | ||||
| * an encoded pulse indexing (TS 26.190 section 5.8.2) | |||||
| * an encoded pulse indexing (TS 26.190 section 5.8.2). | |||||
| * | * | ||||
| * The results are given in out[], in which a negative number means | * The results are given in out[], in which a negative number means | ||||
| * amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) ) | |||||
| * amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) ). | |||||
| * | * | ||||
| * @param[out] out Output buffer (writes i elements) | * @param[out] out Output buffer (writes i elements) | ||||
| * @param[in] code Pulse index (no. of bits varies, see below) | * @param[in] code Pulse index (no. of bits varies, see below) | ||||
| @@ -470,7 +470,7 @@ static void decode_6p_track(int *out, int code, int m, int off) ///code: 6m-2 bi | |||||
| /** | /** | ||||
| * Decode the algebraic codebook index to pulse positions and signs, | * Decode the algebraic codebook index to pulse positions and signs, | ||||
| * then construct the algebraic codebook vector | |||||
| * then construct the algebraic codebook vector. | |||||
| * | * | ||||
| * @param[out] fixed_vector Buffer for the fixed codebook excitation | * @param[out] fixed_vector Buffer for the fixed codebook excitation | ||||
| * @param[in] pulse_hi MSBs part of the pulse index array (higher modes only) | * @param[in] pulse_hi MSBs part of the pulse index array (higher modes only) | ||||
| @@ -541,7 +541,7 @@ static void decode_fixed_vector(float *fixed_vector, const uint16_t *pulse_hi, | |||||
| } | } | ||||
| /** | /** | ||||
| * Decode pitch gain and fixed gain correction factor | |||||
| * Decode pitch gain and fixed gain correction factor. | |||||
| * | * | ||||
| * @param[in] vq_gain Vector-quantized index for gains | * @param[in] vq_gain Vector-quantized index for gains | ||||
| * @param[in] mode Mode of the current frame | * @param[in] mode Mode of the current frame | ||||
| @@ -559,7 +559,7 @@ static void decode_gains(const uint8_t vq_gain, const enum Mode mode, | |||||
| } | } | ||||
| /** | /** | ||||
| * Apply pitch sharpening filters to the fixed codebook vector | |||||
| * Apply pitch sharpening filters to the fixed codebook vector. | |||||
| * | * | ||||
| * @param[in] ctx The context | * @param[in] ctx The context | ||||
| * @param[in,out] fixed_vector Fixed codebook excitation | * @param[in,out] fixed_vector Fixed codebook excitation | ||||
| @@ -580,7 +580,7 @@ static void pitch_sharpening(AMRWBContext *ctx, float *fixed_vector) | |||||
| } | } | ||||
| /** | /** | ||||
| * Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced) | |||||
| * Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced). | |||||
| * | * | ||||
| * @param[in] p_vector, f_vector Pitch and fixed excitation vectors | * @param[in] p_vector, f_vector Pitch and fixed excitation vectors | ||||
| * @param[in] p_gain, f_gain Pitch and fixed gains | * @param[in] p_gain, f_gain Pitch and fixed gains | ||||
| @@ -599,8 +599,8 @@ static float voice_factor(float *p_vector, float p_gain, | |||||
| } | } | ||||
| /** | /** | ||||
| * Reduce fixed vector sparseness by smoothing with one of three IR filters | |||||
| * Also known as "adaptive phase dispersion" | |||||
| * Reduce fixed vector sparseness by smoothing with one of three IR filters, | |||||
| * also known as "adaptive phase dispersion". | |||||
| * | * | ||||
| * @param[in] ctx The context | * @param[in] ctx The context | ||||
| * @param[in,out] fixed_vector Unfiltered fixed vector | * @param[in,out] fixed_vector Unfiltered fixed vector | ||||
| @@ -670,7 +670,7 @@ static float *anti_sparseness(AMRWBContext *ctx, | |||||
| /** | /** | ||||
| * Calculate a stability factor {teta} based on distance between | * Calculate a stability factor {teta} based on distance between | ||||
| * current and past isf. A value of 1 shows maximum signal stability | |||||
| * current and past isf. A value of 1 shows maximum signal stability. | |||||
| */ | */ | ||||
| static float stability_factor(const float *isf, const float *isf_past) | static float stability_factor(const float *isf, const float *isf_past) | ||||
| { | { | ||||
| @@ -687,7 +687,7 @@ static float stability_factor(const float *isf, const float *isf_past) | |||||
| /** | /** | ||||
| * Apply a non-linear fixed gain smoothing in order to reduce | * Apply a non-linear fixed gain smoothing in order to reduce | ||||
| * fluctuation in the energy of excitation | |||||
| * fluctuation in the energy of excitation. | |||||
| * | * | ||||
| * @param[in] fixed_gain Unsmoothed fixed gain | * @param[in] fixed_gain Unsmoothed fixed gain | ||||
| * @param[in,out] prev_tr_gain Previous threshold gain (updated) | * @param[in,out] prev_tr_gain Previous threshold gain (updated) | ||||
| @@ -718,7 +718,7 @@ static float noise_enhancer(float fixed_gain, float *prev_tr_gain, | |||||
| } | } | ||||
| /** | /** | ||||
| * Filter the fixed_vector to emphasize the higher frequencies | |||||
| * Filter the fixed_vector to emphasize the higher frequencies. | |||||
| * | * | ||||
| * @param[in,out] fixed_vector Fixed codebook vector | * @param[in,out] fixed_vector Fixed codebook vector | ||||
| * @param[in] voice_fac Frame voicing factor | * @param[in] voice_fac Frame voicing factor | ||||
| @@ -742,7 +742,7 @@ static void pitch_enhancer(float *fixed_vector, float voice_fac) | |||||
| } | } | ||||
| /** | /** | ||||
| * Conduct 16th order linear predictive coding synthesis from excitation | |||||
| * Conduct 16th order linear predictive coding synthesis from excitation. | |||||
| * | * | ||||
| * @param[in] ctx Pointer to the AMRWBContext | * @param[in] ctx Pointer to the AMRWBContext | ||||
| * @param[in] lpc Pointer to the LPC coefficients | * @param[in] lpc Pointer to the LPC coefficients | ||||
| @@ -802,7 +802,7 @@ static void de_emphasis(float *out, float *in, float m, float mem[1]) | |||||
| /** | /** | ||||
| * Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using | * Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using | ||||
| * a FIR interpolation filter. Uses past data from before *in address | |||||
| * a FIR interpolation filter. Uses past data from before *in address. | |||||
| * | * | ||||
| * @param[out] out Buffer for interpolated signal | * @param[out] out Buffer for interpolated signal | ||||
| * @param[in] in Current signal data (length 0.8*o_size) | * @param[in] in Current signal data (length 0.8*o_size) | ||||
| @@ -832,7 +832,7 @@ static void upsample_5_4(float *out, const float *in, int o_size) | |||||
| /** | /** | ||||
| * Calculate the high-band gain based on encoded index (23k85 mode) or | * Calculate the high-band gain based on encoded index (23k85 mode) or | ||||
| * on the low-band speech signal and the Voice Activity Detection flag | |||||
| * on the low-band speech signal and the Voice Activity Detection flag. | |||||
| * | * | ||||
| * @param[in] ctx The context | * @param[in] ctx The context | ||||
| * @param[in] synth LB speech synthesis at 12.8k | * @param[in] synth LB speech synthesis at 12.8k | ||||
| @@ -857,7 +857,7 @@ static float find_hb_gain(AMRWBContext *ctx, const float *synth, | |||||
| /** | /** | ||||
| * Generate the high-band excitation with the same energy from the lower | * Generate the high-band excitation with the same energy from the lower | ||||
| * one and scaled by the given gain | |||||
| * one and scaled by the given gain. | |||||
| * | * | ||||
| * @param[in] ctx The context | * @param[in] ctx The context | ||||
| * @param[out] hb_exc Buffer for the excitation | * @param[out] hb_exc Buffer for the excitation | ||||
| @@ -880,7 +880,7 @@ static void scaled_hb_excitation(AMRWBContext *ctx, float *hb_exc, | |||||
| } | } | ||||
| /** | /** | ||||
| * Calculate the auto-correlation for the ISF difference vector | |||||
| * Calculate the auto-correlation for the ISF difference vector. | |||||
| */ | */ | ||||
| static float auto_correlation(float *diff_isf, float mean, int lag) | static float auto_correlation(float *diff_isf, float mean, int lag) | ||||
| { | { | ||||
| @@ -896,7 +896,7 @@ static float auto_correlation(float *diff_isf, float mean, int lag) | |||||
| /** | /** | ||||
| * Extrapolate a ISF vector to the 16kHz range (20th order LP) | * Extrapolate a ISF vector to the 16kHz range (20th order LP) | ||||
| * used at mode 6k60 LP filter for the high frequency band | |||||
| * used at mode 6k60 LP filter for the high frequency band. | |||||
| * | * | ||||
| * @param[out] out Buffer for extrapolated isf | * @param[out] out Buffer for extrapolated isf | ||||
| * @param[in] isf Input isf vector | * @param[in] isf Input isf vector | ||||
| @@ -981,7 +981,7 @@ static void lpc_weighting(float *out, const float *lpc, float gamma, int size) | |||||
| /** | /** | ||||
| * Conduct 20th order linear predictive coding synthesis for the high | * Conduct 20th order linear predictive coding synthesis for the high | ||||
| * frequency band excitation at 16kHz | |||||
| * frequency band excitation at 16kHz. | |||||
| * | * | ||||
| * @param[in] ctx The context | * @param[in] ctx The context | ||||
| * @param[in] subframe Current subframe index (0 to 3) | * @param[in] subframe Current subframe index (0 to 3) | ||||
| @@ -1019,8 +1019,8 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples, | |||||
| } | } | ||||
| /** | /** | ||||
| * Apply to high-band samples a 15th order filter | |||||
| * The filter characteristic depends on the given coefficients | |||||
| * Apply a 15th order filter to high-band samples. | |||||
| * The filter characteristic depends on the given coefficients. | |||||
| * | * | ||||
| * @param[out] out Buffer for filtered output | * @param[out] out Buffer for filtered output | ||||
| * @param[in] fir_coef Filter coefficients | * @param[in] fir_coef Filter coefficients | ||||
| @@ -1048,7 +1048,7 @@ static void hb_fir_filter(float *out, const float fir_coef[HB_FIR_SIZE + 1], | |||||
| } | } | ||||
| /** | /** | ||||
| * Update context state before the next subframe | |||||
| * Update context state before the next subframe. | |||||
| */ | */ | ||||
| static void update_sub_state(AMRWBContext *ctx) | static void update_sub_state(AMRWBContext *ctx) | ||||
| { | { | ||||
| @@ -2591,7 +2591,7 @@ typedef struct AVCodecContext { | |||||
| #if FF_API_X264_GLOBAL_OPTS | #if FF_API_X264_GLOBAL_OPTS | ||||
| /** | /** | ||||
| * Influences how often B-frames are used. | |||||
| * Influence how often B-frames are used. | |||||
| * - encoding: Set by user. | * - encoding: Set by user. | ||||
| * - decoding: unused | * - decoding: unused | ||||
| */ | */ | ||||
| @@ -2672,7 +2672,7 @@ typedef struct AVCodecContext { | |||||
| int mv0_threshold; | int mv0_threshold; | ||||
| /** | /** | ||||
| * Adjusts sensitivity of b_frame_strategy 1. | |||||
| * Adjust sensitivity of b_frame_strategy 1. | |||||
| * - encoding: Set by user. | * - encoding: Set by user. | ||||
| * - decoding: unused | * - decoding: unused | ||||
| */ | */ | ||||
| @@ -2956,7 +2956,7 @@ typedef struct AVCodecContext { | |||||
| #if FF_API_FLAC_GLOBAL_OPTS | #if FF_API_FLAC_GLOBAL_OPTS | ||||
| /** | /** | ||||
| * Determines which LPC analysis algorithm to use. | |||||
| * Determine which LPC analysis algorithm to use. | |||||
| * - encoding: Set by user | * - encoding: Set by user | ||||
| * - decoding: unused | * - decoding: unused | ||||
| */ | */ | ||||
| @@ -4121,7 +4121,7 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, | |||||
| AVPacket *avpkt); | AVPacket *avpkt); | ||||
| /** | /** | ||||
| * Frees all allocated data in the given subtitle struct. | |||||
| * Free all allocated data in the given subtitle struct. | |||||
| * | * | ||||
| * @param sub AVSubtitle to free. | * @param sub AVSubtitle to free. | ||||
| */ | */ | ||||
| @@ -4486,7 +4486,7 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, | |||||
| unsigned int av_xiphlacing(unsigned char *s, unsigned int v); | unsigned int av_xiphlacing(unsigned char *s, unsigned int v); | ||||
| /** | /** | ||||
| * Logs a generic warning message about a missing feature. This function is | |||||
| * Log a generic warning message about a missing feature. This function is | |||||
| * intended to be used internally by Libav (libavcodec, libavformat, etc.) | * intended to be used internally by Libav (libavcodec, libavformat, etc.) | ||||
| * only, and would normally not be used by applications. | * only, and would normally not be used by applications. | ||||
| * @param[in] avc a pointer to an arbitrary struct of which the first field is | * @param[in] avc a pointer to an arbitrary struct of which the first field is | ||||
| @@ -30,7 +30,7 @@ | |||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, | static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, | ||||
| @@ -64,7 +64,7 @@ static inline int bidir_sal(int value, int offset) | |||||
| } | } | ||||
| /** | /** | ||||
| * returns the dot product. | |||||
| * Return the dot product. | |||||
| * @param a input data array | * @param a input data array | ||||
| * @param b input data array | * @param b input data array | ||||
| * @param length number of elements | * @param length number of elements | ||||
| @@ -39,7 +39,7 @@ typedef struct DCAParseContext { | |||||
| || state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE) | || state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE) | ||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| static int dca_find_frame_end(DCAParseContext * pc1, const uint8_t * buf, | static int dca_find_frame_end(DCAParseContext * pc1, const uint8_t * buf, | ||||
| @@ -1779,7 +1779,7 @@ static void add_8x8basis_c(int16_t rem[64], int16_t basis[64], int scale){ | |||||
| } | } | ||||
| /** | /** | ||||
| * permutes an 8x8 block. | |||||
| * Permute an 8x8 block. | |||||
| * @param block the block which will be permuted according to the given permutation vector | * @param block the block which will be permuted according to the given permutation vector | ||||
| * @param permutation the permutation vector | * @param permutation the permutation vector | ||||
| * @param last the last non zero coefficient in scantable order, used to speed the permutation up | * @param last the last non zero coefficient in scantable order, used to speed the permutation up | ||||
| @@ -80,7 +80,7 @@ static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){ | |||||
| } | } | ||||
| /** | /** | ||||
| * replaces the current MB with a flat dc only version. | |||||
| * Replace the current MB with a flat dc-only version. | |||||
| */ | */ | ||||
| static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) | static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) | ||||
| { | { | ||||
| @@ -711,7 +711,7 @@ void ff_er_frame_start(MpegEncContext *s){ | |||||
| } | } | ||||
| /** | /** | ||||
| * adds a slice. | |||||
| * Add a slice. | |||||
| * @param endx x component of the last macroblock, can be -1 for the last of the previous line | * @param endx x component of the last macroblock, can be -1 for the last of the previous line | ||||
| * @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or | * @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or | ||||
| * error of the same type occurred | * error of the same type occurred | ||||
| @@ -85,13 +85,13 @@ gb | |||||
| getbitcontext | getbitcontext | ||||
| OPEN_READER(name, gb) | OPEN_READER(name, gb) | ||||
| loads gb into local variables | |||||
| load gb into local variables | |||||
| CLOSE_READER(name, gb) | CLOSE_READER(name, gb) | ||||
| stores local vars in gb | |||||
| store local vars in gb | |||||
| UPDATE_CACHE(name, gb) | UPDATE_CACHE(name, gb) | ||||
| refills the internal cache from the bitstream | |||||
| refill the internal cache from the bitstream | |||||
| after this call at least MIN_CACHE_BITS will be available, | after this call at least MIN_CACHE_BITS will be available, | ||||
| GET_CACHE(name, gb) | GET_CACHE(name, gb) | ||||
| @@ -282,7 +282,7 @@ static inline unsigned int get_bits(GetBitContext *s, int n){ | |||||
| } | } | ||||
| /** | /** | ||||
| * Shows 1-25 bits. | |||||
| * Show 1-25 bits. | |||||
| */ | */ | ||||
| static inline unsigned int show_bits(GetBitContext *s, int n){ | static inline unsigned int show_bits(GetBitContext *s, int n){ | ||||
| register int tmp; | register int tmp; | ||||
| @@ -329,7 +329,7 @@ static inline void skip_bits1(GetBitContext *s){ | |||||
| } | } | ||||
| /** | /** | ||||
| * reads 0-32 bits. | |||||
| * Read 0-32 bits. | |||||
| */ | */ | ||||
| static inline unsigned int get_bits_long(GetBitContext *s, int n){ | static inline unsigned int get_bits_long(GetBitContext *s, int n){ | ||||
| if (n <= MIN_CACHE_BITS) return get_bits(s, n); | if (n <= MIN_CACHE_BITS) return get_bits(s, n); | ||||
| @@ -345,14 +345,14 @@ static inline unsigned int get_bits_long(GetBitContext *s, int n){ | |||||
| } | } | ||||
| /** | /** | ||||
| * reads 0-32 bits as a signed integer. | |||||
| * Read 0-32 bits as a signed integer. | |||||
| */ | */ | ||||
| static inline int get_sbits_long(GetBitContext *s, int n) { | static inline int get_sbits_long(GetBitContext *s, int n) { | ||||
| return sign_extend(get_bits_long(s, n), n); | return sign_extend(get_bits_long(s, n), n); | ||||
| } | } | ||||
| /** | /** | ||||
| * shows 0-32 bits. | |||||
| * Show 0-32 bits. | |||||
| */ | */ | ||||
| static inline unsigned int show_bits_long(GetBitContext *s, int n){ | static inline unsigned int show_bits_long(GetBitContext *s, int n){ | ||||
| if (n <= MIN_CACHE_BITS) return show_bits(s, n); | if (n <= MIN_CACHE_BITS) return show_bits(s, n); | ||||
| @@ -372,7 +372,7 @@ static inline int check_marker(GetBitContext *s, const char *msg) | |||||
| } | } | ||||
| /** | /** | ||||
| * init GetBitContext. | |||||
| * Inititalize GetBitContext. | |||||
| * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits | * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits | ||||
| * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end | * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end | ||||
| * @param bit_size the size of the buffer in bits | * @param bit_size the size of the buffer in bits | ||||
| @@ -434,7 +434,6 @@ void free_vlc(VLC *vlc); | |||||
| /** | /** | ||||
| * | |||||
| * If the vlc code is invalid and max_depth=1, then no bits will be removed. | * If the vlc code is invalid and max_depth=1, then no bits will be removed. | ||||
| * If the vlc code is invalid and max_depth>1, then the number of bits removed | * If the vlc code is invalid and max_depth>1, then the number of bits removed | ||||
| * is undefined. | * is undefined. | ||||
| @@ -496,7 +495,7 @@ void free_vlc(VLC *vlc); | |||||
| /** | /** | ||||
| * parses a vlc code, faster than get_vlc() | |||||
| * Parse a vlc code, faster than get_vlc(). | |||||
| * @param bits is the number of bits which will be read at once, must be | * @param bits is the number of bits which will be read at once, must be | ||||
| * identical to nb_bits in init_vlc() | * identical to nb_bits in init_vlc() | ||||
| * @param max_depth is the number of times bits bits must be read to completely | * @param max_depth is the number of times bits bits must be read to completely | ||||
| @@ -97,7 +97,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the group of blocks header or slice header. | |||||
| * Decode the group of blocks header or slice header. | |||||
| * @return <0 if an error occurred | * @return <0 if an error occurred | ||||
| */ | */ | ||||
| static int h261_decode_gob_header(H261Context *h){ | static int h261_decode_gob_header(H261Context *h){ | ||||
| @@ -150,7 +150,7 @@ static int h261_decode_gob_header(H261Context *h){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the group of blocks / video packet header. | |||||
| * Decode the group of blocks / video packet header. | |||||
| * @return <0 if no resync found | * @return <0 if no resync found | ||||
| */ | */ | ||||
| static int ff_h261_resync(H261Context *h){ | static int ff_h261_resync(H261Context *h){ | ||||
| @@ -191,7 +191,7 @@ static int ff_h261_resync(H261Context *h){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes skipped macroblocks | |||||
| * Decode skipped macroblocks. | |||||
| * @return 0 | * @return 0 | ||||
| */ | */ | ||||
| static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) | static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) | ||||
| @@ -355,7 +355,7 @@ intra: | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes a macroblock | |||||
| * Decode a macroblock. | |||||
| * @return <0 if an error occurred | * @return <0 if an error occurred | ||||
| */ | */ | ||||
| static int h261_decode_block(H261Context * h, DCTELEM * block, | static int h261_decode_block(H261Context * h, DCTELEM * block, | ||||
| @@ -437,7 +437,7 @@ static int h261_decode_block(H261Context * h, DCTELEM * block, | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the H261 picture header. | |||||
| * Decode the H.261 picture header. | |||||
| * @return <0 if no startcode found | * @return <0 if no startcode found | ||||
| */ | */ | ||||
| static int h261_decode_picture_header(H261Context *h){ | static int h261_decode_picture_header(H261Context *h){ | ||||
| @@ -251,7 +251,7 @@ void ff_h261_encode_init(MpegEncContext *s){ | |||||
| /** | /** | ||||
| * encodes a 8x8 block. | |||||
| * Encode an 8x8 block. | |||||
| * @param block the 8x8 block | * @param block the 8x8 block | ||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| */ | */ | ||||
| @@ -127,7 +127,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx) | |||||
| } | } | ||||
| /** | /** | ||||
| * returns the number of bytes consumed for building the current frame | |||||
| * Return the number of bytes consumed for building the current frame. | |||||
| */ | */ | ||||
| static int get_consumed_bytes(MpegEncContext *s, int buf_size){ | static int get_consumed_bytes(MpegEncContext *s, int buf_size){ | ||||
| int pos= (get_bits_count(&s->gb)+7)>>3; | int pos= (get_bits_count(&s->gb)+7)>>3; | ||||
| @@ -62,7 +62,8 @@ static const enum PixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = { | |||||
| }; | }; | ||||
| /** | /** | ||||
| * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks. | |||||
| * Check if the top & left blocks are available if needed and | |||||
| * change the dc mode so it only uses the available blocks. | |||||
| */ | */ | ||||
| int ff_h264_check_intra4x4_pred_mode(H264Context *h){ | int ff_h264_check_intra4x4_pred_mode(H264Context *h){ | ||||
| MpegEncContext * const s = &h->s; | MpegEncContext * const s = &h->s; | ||||
| @@ -101,7 +102,8 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){ | |||||
| } //FIXME cleanup like ff_h264_check_intra_pred_mode | } //FIXME cleanup like ff_h264_check_intra_pred_mode | ||||
| /** | /** | ||||
| * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks. | |||||
| * Check if the top & left blocks are available if needed and | |||||
| * change the dc mode so it only uses the available blocks. | |||||
| */ | */ | ||||
| int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ | int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ | ||||
| MpegEncContext * const s = &h->s; | MpegEncContext * const s = &h->s; | ||||
| @@ -2592,7 +2594,7 @@ static void clone_slice(H264Context *dst, H264Context *src) | |||||
| } | } | ||||
| /** | /** | ||||
| * computes profile from profile_idc and constraint_set?_flags | |||||
| * Compute profile from profile_idc and constraint_set?_flags. | |||||
| * | * | ||||
| * @param sps SPS | * @param sps SPS | ||||
| * | * | ||||
| @@ -2619,7 +2621,7 @@ int ff_h264_get_profile(SPS *sps) | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes a slice header. | |||||
| * Decode a slice header. | |||||
| * This will also call MPV_common_init() and frame_start() as needed. | * This will also call MPV_common_init() and frame_start() as needed. | ||||
| * | * | ||||
| * @param h h264context | * @param h h264context | ||||
| @@ -3982,7 +3984,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ | |||||
| } | } | ||||
| /** | /** | ||||
| * returns the number of bytes consumed for building the current frame | |||||
| * Return the number of bytes consumed for building the current frame. | |||||
| */ | */ | ||||
| static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){ | static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){ | ||||
| if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...) | if(pos==0) pos=1; //avoid infinite loops (i doubt that is needed but ...) | ||||
| @@ -762,14 +762,14 @@ static av_always_inline uint16_t pack8to16(int a, int b){ | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the chroma qp. | |||||
| * Get the chroma qp. | |||||
| */ | */ | ||||
| static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){ | static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){ | ||||
| return h->pps.chroma_qp_table[t][qscale]; | return h->pps.chroma_qp_table[t][qscale]; | ||||
| } | } | ||||
| /** | /** | ||||
| * gets the predicted intra4x4 prediction mode. | |||||
| * Get the predicted intra4x4 prediction mode. | |||||
| */ | */ | ||||
| static av_always_inline int pred_intra_mode(H264Context *h, int n){ | static av_always_inline int pred_intra_mode(H264Context *h, int n){ | ||||
| const int index8= scan8[n]; | const int index8= scan8[n]; | ||||
| @@ -1862,7 +1862,7 @@ static av_always_inline void decode_cabac_luma_residual( H264Context *h, const u | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes a macroblock | |||||
| * Decode a macroblock. | |||||
| * @return 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed | * @return 0 if OK, AC_ERROR / DC_ERROR / MV_ERROR if an error is noticed | ||||
| */ | */ | ||||
| int ff_h264_decode_mb_cabac(H264Context *h) { | int ff_h264_decode_mb_cabac(H264Context *h) { | ||||
| @@ -281,7 +281,7 @@ static int8_t cavlc_level_tab[7][1<<LEVEL_TAB_BITS][2]; | |||||
| #define RUN7_VLC_BITS 6 | #define RUN7_VLC_BITS 6 | ||||
| /** | /** | ||||
| * gets the predicted number of non-zero coefficients. | |||||
| * Get the predicted number of non-zero coefficients. | |||||
| * @param n block index | * @param n block index | ||||
| */ | */ | ||||
| static inline int pred_non_zero_count(H264Context *h, int n){ | static inline int pred_non_zero_count(H264Context *h, int n){ | ||||
| @@ -436,7 +436,7 @@ static inline int get_level_prefix(GetBitContext *gb){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes a residual block. | |||||
| * Decode a residual block. | |||||
| * @param n block index | * @param n block index | ||||
| * @param scantable scantable | * @param scantable scantable | ||||
| * @param max_coeff number of coefficients in the block | * @param max_coeff number of coefficients in the block | ||||
| @@ -86,7 +86,7 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the predicted MV. | |||||
| * Get the predicted MV. | |||||
| * @param n the block index | * @param n the block index | ||||
| * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4) | * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4) | ||||
| * @param mx the x component of the predicted motion vector | * @param mx the x component of the predicted motion vector | ||||
| @@ -142,7 +142,7 @@ static av_always_inline void pred_motion(H264Context * const h, int n, int part_ | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the directionally predicted 16x8 MV. | |||||
| * Get the directionally predicted 16x8 MV. | |||||
| * @param n the block index | * @param n the block index | ||||
| * @param mx the x component of the predicted motion vector | * @param mx the x component of the predicted motion vector | ||||
| * @param my the y component of the predicted motion vector | * @param my the y component of the predicted motion vector | ||||
| @@ -177,7 +177,7 @@ static av_always_inline void pred_16x8_motion(H264Context * const h, int n, int | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the directionally predicted 8x16 MV. | |||||
| * Get the directionally predicted 8x16 MV. | |||||
| * @param n the block index | * @param n the block index | ||||
| * @param mx the x component of the predicted motion vector | * @param mx the x component of the predicted motion vector | ||||
| * @param my the y component of the predicted motion vector | * @param my the y component of the predicted motion vector | ||||
| @@ -148,7 +148,7 @@ int ff_h263_decode_mba(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the group of blocks header or slice header. | |||||
| * Decode the group of blocks header or slice header. | |||||
| * @return <0 if an error occurred | * @return <0 if an error occurred | ||||
| */ | */ | ||||
| static int h263_decode_gob_header(MpegEncContext *s) | static int h263_decode_gob_header(MpegEncContext *s) | ||||
| @@ -203,7 +203,7 @@ static int h263_decode_gob_header(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * finds the next resync_marker | |||||
| * Find the next resync_marker. | |||||
| * @param p pointer to buffer to scan | * @param p pointer to buffer to scan | ||||
| * @param end pointer to the end of the buffer | * @param end pointer to the end of the buffer | ||||
| * @return pointer to the next resync_marker, or end if none was found | * @return pointer to the next resync_marker, or end if none was found | ||||
| @@ -224,7 +224,7 @@ const uint8_t *ff_h263_find_resync_marker(const uint8_t *restrict p, const uint8 | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the group of blocks / video packet header. | |||||
| * Decode the group of blocks / video packet header. | |||||
| * @return bit position of the resync_marker, or <0 if none was found | * @return bit position of the resync_marker, or <0 if none was found | ||||
| */ | */ | ||||
| int ff_h263_resync(MpegEncContext *s){ | int ff_h263_resync(MpegEncContext *s){ | ||||
| @@ -306,7 +306,7 @@ int h263_decode_motion(MpegEncContext * s, int pred, int f_code) | |||||
| } | } | ||||
| /* Decodes RVLC of H.263+ UMV */ | |||||
| /* Decode RVLC of H.263+ UMV */ | |||||
| static int h263p_decode_umotion(MpegEncContext * s, int pred) | static int h263p_decode_umotion(MpegEncContext * s, int pred) | ||||
| { | { | ||||
| int code = 0, sign; | int code = 0, sign; | ||||
| @@ -302,7 +302,7 @@ void ff_clean_h263_qscales(MpegEncContext *s){ | |||||
| static const int dquant_code[5]= {1,0,9,2,3}; | static const int dquant_code[5]= {1,0,9,2,3}; | ||||
| /** | /** | ||||
| * encodes a 8x8 block. | |||||
| * Encode an 8x8 block. | |||||
| * @param block the 8x8 block | * @param block the 8x8 block | ||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| */ | */ | ||||
| @@ -36,7 +36,7 @@ typedef struct LATMParseContext{ | |||||
| } LATMParseContext; | } LATMParseContext; | ||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| static int latm_find_frame_end(AVCodecParserContext *s1, const uint8_t *buf, | static int latm_find_frame_end(AVCodecParserContext *s1, const uint8_t *buf, | ||||
| @@ -30,7 +30,7 @@ | |||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| static int find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){ | static int find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size){ | ||||
| @@ -1619,9 +1619,10 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) | |||||
| #define DECODE_SLICE_OK 0 | #define DECODE_SLICE_OK 0 | ||||
| /** | /** | ||||
| * decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode | |||||
| * @return DECODE_SLICE_ERROR if the slice is damaged<br> | |||||
| * DECODE_SLICE_OK if this slice is ok<br> | |||||
| * Decode a slice. | |||||
| * MpegEncContext.mb_y must be set to the MB row from the startcode. | |||||
| * @return DECODE_SLICE_ERROR if the slice is damaged, | |||||
| * DECODE_SLICE_OK if this slice is OK | |||||
| */ | */ | ||||
| static int mpeg_decode_slice(MpegEncContext *s, int mb_y, | static int mpeg_decode_slice(MpegEncContext *s, int mb_y, | ||||
| const uint8_t **buf, int buf_size) | const uint8_t **buf, int buf_size) | ||||
| @@ -119,7 +119,7 @@ extern uint8_t ff_mpeg4_static_rl_table_store[3][2][2*MAX_RUN + MAX_LEVEL + 3]; | |||||
| /** | /** | ||||
| * predicts the dc. | |||||
| * Predict the dc. | |||||
| * encoding quantized level -> quantized diff | * encoding quantized level -> quantized diff | ||||
| * decoding quantized diff -> quantized level | * decoding quantized diff -> quantized level | ||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| @@ -26,7 +26,7 @@ | |||||
| #include "parser.h" | #include "parser.h" | ||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size); | int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size); | ||||
| @@ -46,7 +46,7 @@ static const int mb_type_b_map[4]= { | |||||
| }; | }; | ||||
| /** | /** | ||||
| * predicts the ac. | |||||
| * Predict the ac. | |||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| * @param dir the ac prediction direction | * @param dir the ac prediction direction | ||||
| */ | */ | ||||
| @@ -343,7 +343,7 @@ static void mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the next video packet. | |||||
| * Decode the next video packet. | |||||
| * @return <0 if something went wrong | * @return <0 if something went wrong | ||||
| */ | */ | ||||
| int mpeg4_decode_video_packet_header(MpegEncContext *s) | int mpeg4_decode_video_packet_header(MpegEncContext *s) | ||||
| @@ -439,7 +439,7 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the average motion vector for a GMC MB. | |||||
| * Get the average motion vector for a GMC MB. | |||||
| * @param n either 0 for the x component or 1 for y | * @param n either 0 for the x component or 1 for y | ||||
| * @return the average MV for a GMC MB | * @return the average MV for a GMC MB | ||||
| */ | */ | ||||
| @@ -485,7 +485,7 @@ static inline int get_amv(MpegEncContext *s, int n){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the dc value. | |||||
| * Decode the dc value. | |||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| * @param dir_ptr the prediction direction will be stored here | * @param dir_ptr the prediction direction will be stored here | ||||
| * @return the quantized dc | * @return the quantized dc | ||||
| @@ -532,7 +532,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes first partition. | |||||
| * Decode first partition. | |||||
| * @return number of MBs decoded or <0 if an error occurred | * @return number of MBs decoded or <0 if an error occurred | ||||
| */ | */ | ||||
| static int mpeg4_decode_partition_a(MpegEncContext *s){ | static int mpeg4_decode_partition_a(MpegEncContext *s){ | ||||
| @@ -784,7 +784,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the first & second partition | |||||
| * Decode the first and second partition. | |||||
| * @return <0 if error (and sets error type in the error_status_table) | * @return <0 if error (and sets error type in the error_status_table) | ||||
| */ | */ | ||||
| int ff_mpeg4_decode_partitions(MpegEncContext *s) | int ff_mpeg4_decode_partitions(MpegEncContext *s) | ||||
| @@ -837,7 +837,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes a block. | |||||
| * Decode a block. | |||||
| * @return <0 if an error occurred | * @return <0 if an error occurred | ||||
| */ | */ | ||||
| static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, | static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, | ||||
| @@ -1824,7 +1824,7 @@ no_cplx_est: | |||||
| } | } | ||||
| /** | /** | ||||
| * decodes the user data stuff in the header. | |||||
| * Decode the user data stuff in the header. | |||||
| * Also initializes divx/xvid/lavc_version/build. | * Also initializes divx/xvid/lavc_version/build. | ||||
| */ | */ | ||||
| static int decode_user_data(MpegEncContext *s, GetBitContext *gb){ | static int decode_user_data(MpegEncContext *s, GetBitContext *gb){ | ||||
| @@ -2094,7 +2094,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ | |||||
| } | } | ||||
| /** | /** | ||||
| * decode mpeg4 headers | |||||
| * Decode mpeg4 headers. | |||||
| * @return <0 if no VOP found (or a damaged one) | * @return <0 if no VOP found (or a damaged one) | ||||
| * FRAME_SKIPPED if a not coded VOP is found | * FRAME_SKIPPED if a not coded VOP is found | ||||
| * 0 if a VOP is found | * 0 if a VOP is found | ||||
| @@ -238,7 +238,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ | |||||
| /** | /** | ||||
| * encodes the dc value. | |||||
| * Encode the dc value. | |||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| */ | */ | ||||
| static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n) | static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n) | ||||
| @@ -291,7 +291,7 @@ static inline int mpeg4_get_dc_length(int level, int n){ | |||||
| } | } | ||||
| /** | /** | ||||
| * encodes a 8x8 block | |||||
| * Encode an 8x8 block. | |||||
| * @param n block index (0-3 are luma, 4-5 are chroma) | * @param n block index (0-3 are luma, 4-5 are chroma) | ||||
| */ | */ | ||||
| static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, | static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, | ||||
| @@ -289,7 +289,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) | |||||
| } | } | ||||
| /** | /** | ||||
| * allocates a Picture | |||||
| * Allocate a Picture. | |||||
| * The pixels are allocated/set by calling get_buffer() if shared = 0 | * The pixels are allocated/set by calling get_buffer() if shared = 0 | ||||
| */ | */ | ||||
| int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared) | int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared) | ||||
| @@ -388,7 +388,7 @@ fail: // for the FF_ALLOCZ_OR_GOTO macro | |||||
| } | } | ||||
| /** | /** | ||||
| * deallocates a picture | |||||
| * Deallocate a picture. | |||||
| */ | */ | ||||
| static void free_picture(MpegEncContext *s, Picture *pic) | static void free_picture(MpegEncContext *s, Picture *pic) | ||||
| { | { | ||||
| @@ -624,9 +624,9 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, | |||||
| } | } | ||||
| /** | /** | ||||
| * sets the given MpegEncContext to common defaults | |||||
| * Set the given MpegEncContext to common defaults | |||||
| * (same for encoding and decoding). | * (same for encoding and decoding). | ||||
| * the changed fields will not depend upon the | |||||
| * The changed fields will not depend upon the | |||||
| * prior state of the MpegEncContext. | * prior state of the MpegEncContext. | ||||
| */ | */ | ||||
| void MPV_common_defaults(MpegEncContext *s) | void MPV_common_defaults(MpegEncContext *s) | ||||
| @@ -652,7 +652,7 @@ void MPV_common_defaults(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * sets the given MpegEncContext to defaults for decoding. | |||||
| * Set the given MpegEncContext to defaults for decoding. | |||||
| * the changed fields will not depend upon | * the changed fields will not depend upon | ||||
| * the prior state of the MpegEncContext. | * the prior state of the MpegEncContext. | ||||
| */ | */ | ||||
| @@ -1400,7 +1400,7 @@ void MPV_frame_end(MpegEncContext *s) | |||||
| } | } | ||||
| /** | /** | ||||
| * draws an line from (ex, ey) -> (sx, sy). | |||||
| * Draw a line from (ex, ey) -> (sx, sy). | |||||
| * @param w width of the image | * @param w width of the image | ||||
| * @param h height of the image | * @param h height of the image | ||||
| * @param stride stride/linesize of the image | * @param stride stride/linesize of the image | ||||
| @@ -1449,7 +1449,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h | |||||
| } | } | ||||
| /** | /** | ||||
| * draws an arrow from (ex, ey) -> (sx, sy). | |||||
| * Draw an arrow from (ex, ey) -> (sx, sy). | |||||
| * @param w width of the image | * @param w width of the image | ||||
| * @param h height of the image | * @param h height of the image | ||||
| * @param stride stride/linesize of the image | * @param stride stride/linesize of the image | ||||
| @@ -1482,7 +1482,7 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int | |||||
| } | } | ||||
| /** | /** | ||||
| * prints debuging info for the given picture. | |||||
| * Print debuging info for the given picture. | |||||
| */ | */ | ||||
| void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ | void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ | ||||
| @@ -2127,7 +2127,7 @@ static inline void add_dequant_dct(MpegEncContext *s, | |||||
| } | } | ||||
| /** | /** | ||||
| * cleans dc, ac, coded_block for the current non intra MB | |||||
| * Clean dc, ac, coded_block for the current non-intra MB. | |||||
| */ | */ | ||||
| void ff_clean_intra_table_entries(MpegEncContext *s) | void ff_clean_intra_table_entries(MpegEncContext *s) | ||||
| { | { | ||||
| @@ -2432,7 +2432,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){ | |||||
| } | } | ||||
| /** | /** | ||||
| * | |||||
| * @param h is the normal height, this will be reduced automatically if needed for the last row | * @param h is the normal height, this will be reduced automatically if needed for the last row | ||||
| */ | */ | ||||
| void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ | void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ | ||||
| @@ -728,8 +728,8 @@ void ff_init_block_index(MpegEncContext *s); | |||||
| void ff_copy_picture(Picture *dst, Picture *src); | void ff_copy_picture(Picture *dst, Picture *src); | ||||
| /** | /** | ||||
| * allocates a Picture | |||||
| * The pixels are allocated/set by calling get_buffer() if shared=0 | |||||
| * Allocate a Picture. | |||||
| * The pixels are allocated/set by calling get_buffer() if shared = 0. | |||||
| */ | */ | ||||
| int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared); | int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared); | ||||
| @@ -42,14 +42,14 @@ | |||||
| int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); | int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); | ||||
| /** | /** | ||||
| * allocates a Picture | |||||
| * The pixels are allocated/set by calling get_buffer() if shared=0 | |||||
| * Allocate a Picture. | |||||
| * The pixels are allocated/set by calling get_buffer() if shared = 0. | |||||
| */ | */ | ||||
| int alloc_picture(MpegEncContext *s, Picture *pic, int shared); | int alloc_picture(MpegEncContext *s, Picture *pic, int shared); | ||||
| /** | /** | ||||
| * sets the given MpegEncContext to common defaults (same for encoding and decoding). | |||||
| * the changed fields will not depend upon the prior state of the MpegEncContext. | |||||
| * Set the given MpegEncContext to common defaults (same for encoding and decoding). | |||||
| * The changed fields will not depend upon the prior state of the MpegEncContext. | |||||
| */ | */ | ||||
| void MPV_common_defaults(MpegEncContext *s); | void MPV_common_defaults(MpegEncContext *s); | ||||
| @@ -227,7 +227,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContex | |||||
| } | } | ||||
| /** | /** | ||||
| * sets the given MpegEncContext to defaults for encoding. | |||||
| * Set the given MpegEncContext to defaults for encoding. | |||||
| * the changed fields will not depend upon the prior state of the MpegEncContext. | * the changed fields will not depend upon the prior state of the MpegEncContext. | ||||
| */ | */ | ||||
| static void MPV_encode_defaults(MpegEncContext *s){ | static void MPV_encode_defaults(MpegEncContext *s){ | ||||
| @@ -216,7 +216,7 @@ void av_parser_close(AVCodecParserContext *s) | |||||
| /*****************************************************/ | /*****************************************************/ | ||||
| /** | /** | ||||
| * combines the (truncated) bitstream to a complete frame | |||||
| * Combine the (truncated) bitstream to a complete frame. | |||||
| * @return -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error | * @return -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error | ||||
| */ | */ | ||||
| int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size) | int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size) | ||||
| @@ -322,7 +322,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg) | |||||
| } | } | ||||
| /** | /** | ||||
| * Updates the next thread's AVCodecContext with values from the reference thread's context. | |||||
| * Update the next thread's AVCodecContext with values from the reference thread's context. | |||||
| * | * | ||||
| * @param dst The destination context. | * @param dst The destination context. | ||||
| * @param src The source context. | * @param src The source context. | ||||
| @@ -300,7 +300,7 @@ int ff_vbv_update(MpegEncContext *s, int frame_size){ | |||||
| } | } | ||||
| /** | /** | ||||
| * modifies the bitrate curve from pass1 for one frame | |||||
| * Modify the bitrate curve from pass1 for one frame. | |||||
| */ | */ | ||||
| static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){ | static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){ | ||||
| RateControlContext *rcc= &s->rc_context; | RateControlContext *rcc= &s->rc_context; | ||||
| @@ -404,7 +404,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl | |||||
| } | } | ||||
| /** | /** | ||||
| * gets the qmin & qmax for pict_type | |||||
| * Get the qmin & qmax for pict_type. | |||||
| */ | */ | ||||
| static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){ | static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){ | ||||
| int qmin= s->avctx->lmin; | int qmin= s->avctx->lmin; | ||||
| @@ -90,7 +90,7 @@ static double bessel(double x){ | |||||
| } | } | ||||
| /** | /** | ||||
| * builds a polyphase filterbank. | |||||
| * Build a polyphase filterbank. | |||||
| * @param factor resampling factor | * @param factor resampling factor | ||||
| * @param scale wanted sum of coefficients for each filter | * @param scale wanted sum of coefficients for each filter | ||||
| * @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16 | * @param type 0->cubic, 1->blackman nuttall windowed sinc, 2..16->kaiser windowed sinc beta=2..16 | ||||
| @@ -31,15 +31,15 @@ | |||||
| #include "avcodec.h" | #include "avcodec.h" | ||||
| /** | /** | ||||
| * Waits for decoding threads to finish and resets internal | |||||
| * state. Called by avcodec_flush_buffers(). | |||||
| * Wait for decoding threads to finish and reset internal state. | |||||
| * Called by avcodec_flush_buffers(). | |||||
| * | * | ||||
| * @param avctx The context. | * @param avctx The context. | ||||
| */ | */ | ||||
| void ff_thread_flush(AVCodecContext *avctx); | void ff_thread_flush(AVCodecContext *avctx); | ||||
| /** | /** | ||||
| * Submits a new frame to a decoding thread. | |||||
| * Submit a new frame to a decoding thread. | |||||
| * Returns the next available frame in picture. *got_picture_ptr | * Returns the next available frame in picture. *got_picture_ptr | ||||
| * will be 0 if none is available. | * will be 0 if none is available. | ||||
| * The return value on success is the size of the consumed packet for | * The return value on success is the size of the consumed packet for | ||||
| @@ -62,8 +62,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, | |||||
| void ff_thread_finish_setup(AVCodecContext *avctx); | void ff_thread_finish_setup(AVCodecContext *avctx); | ||||
| /** | /** | ||||
| * Notifies later decoding threads when part of their reference picture | |||||
| * is ready. | |||||
| * Notify later decoding threads when part of their reference picture is ready. | |||||
| * Call this when some part of the picture is finished decoding. | * Call this when some part of the picture is finished decoding. | ||||
| * Later calls with lower values of progress have no effect. | * Later calls with lower values of progress have no effect. | ||||
| * | * | ||||
| @@ -75,7 +74,7 @@ void ff_thread_finish_setup(AVCodecContext *avctx); | |||||
| void ff_thread_report_progress(AVFrame *f, int progress, int field); | void ff_thread_report_progress(AVFrame *f, int progress, int field); | ||||
| /** | /** | ||||
| * Waits for earlier decoding threads to finish reference pictures | |||||
| * Wait for earlier decoding threads to finish reference pictures. | |||||
| * Call this before accessing some part of a picture, with a given | * Call this before accessing some part of a picture, with a given | ||||
| * value for progress, and it will return after the responsible decoding | * value for progress, and it will return after the responsible decoding | ||||
| * thread calls ff_thread_report_progress() with the same or | * thread calls ff_thread_report_progress() with the same or | ||||
| @@ -96,7 +96,7 @@ static void vc1_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx, | |||||
| } | } | ||||
| /** | /** | ||||
| * finds the end of the current frame in the bitstream. | |||||
| * Find the end of the current frame in the bitstream. | |||||
| * @return the position of the first byte of the next frame, or -1 | * @return the position of the first byte of the next frame, or -1 | ||||
| */ | */ | ||||
| static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf, | static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf, | ||||
| @@ -708,7 +708,7 @@ int avfilter_request_frame(AVFilterLink *link); | |||||
| int avfilter_poll_frame(AVFilterLink *link); | int avfilter_poll_frame(AVFilterLink *link); | ||||
| /** | /** | ||||
| * Notifie the next filter of the start of a frame. | |||||
| * Notify the next filter of the start of a frame. | |||||
| * | * | ||||
| * @param link the output link the frame will be sent over | * @param link the output link the frame will be sent over | ||||
| * @param picref A reference to the frame about to be sent. The data for this | * @param picref A reference to the frame about to be sent. The data for this | ||||
| @@ -516,7 +516,7 @@ typedef struct AVInputFormat { | |||||
| int stream_index, int64_t timestamp, int flags); | int stream_index, int64_t timestamp, int flags); | ||||
| #endif | #endif | ||||
| /** | /** | ||||
| * Gets the next timestamp in stream[stream_index].time_base units. | |||||
| * Get the next timestamp in stream[stream_index].time_base units. | |||||
| * @return the timestamp or AV_NOPTS_VALUE if an error occurred | * @return the timestamp or AV_NOPTS_VALUE if an error occurred | ||||
| */ | */ | ||||
| int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, | int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, | ||||
| @@ -52,8 +52,8 @@ struct PayloadContext { | |||||
| }; | }; | ||||
| /** | /** | ||||
| * Parses configuration (basically the codec-specific extradata) from | |||||
| * a RTP config subpacket (starts with 0xff). | |||||
| * Parse configuration (basically the codec-specific extradata) from | |||||
| * an RTP config subpacket (starts with 0xff). | |||||
| * | * | ||||
| * Layout of the config subpacket (in bytes): | * Layout of the config subpacket (in bytes): | ||||
| * 1: 0xFF <- config ID | * 1: 0xFF <- config ID | ||||
| @@ -128,7 +128,7 @@ static int qdm2_parse_config(PayloadContext *qdm, AVStream *st, | |||||
| } | } | ||||
| /** | /** | ||||
| * Parses a single subpacket. We store this subpacket in an intermediate | |||||
| * Parse a single subpacket. We store this subpacket in an intermediate | |||||
| * buffer (position depends on the ID (byte[0]). When called, at least | * buffer (position depends on the ID (byte[0]). When called, at least | ||||
| * 4 bytes are available for reading (see qdm2_parse_packet()). | * 4 bytes are available for reading (see qdm2_parse_packet()). | ||||
| * | * | ||||
| @@ -179,7 +179,7 @@ static int qdm2_parse_subpacket(PayloadContext *qdm, AVStream *st, | |||||
| } | } | ||||
| /** | /** | ||||
| * Adds a superblock header around a set of subpackets. | |||||
| * Add a superblock header around a set of subpackets. | |||||
| * | * | ||||
| * @return <0 on error, else 0. | * @return <0 on error, else 0. | ||||
| */ | */ | ||||
| @@ -186,7 +186,7 @@ enum RTSPClientState { | |||||
| }; | }; | ||||
| /** | /** | ||||
| * Identifies particular servers that require special handling, such as | |||||
| * Identify particular servers that require special handling, such as | |||||
| * standards-incompliant "Transport:" lines in the SETUP request. | * standards-incompliant "Transport:" lines in the SETUP request. | ||||
| */ | */ | ||||
| enum RTSPServerType { | enum RTSPServerType { | ||||
| @@ -366,7 +366,7 @@ typedef struct RTSPState { | |||||
| source address and port. */ | source address and port. */ | ||||
| /** | /** | ||||
| * Describes a single stream, as identified by a single m= line block in the | |||||
| * Describe a single stream, as identified by a single m= line block in the | |||||
| * SDP content. In the case of RDT, one RTSPStream can represent multiple | * SDP content. In the case of RDT, one RTSPStream can represent multiple | ||||
| * AVStreams. In this case, each AVStream in this set has similar content | * AVStreams. In this case, each AVStream in this set has similar content | ||||
| * (but different codec/bitrate). | * (but different codec/bitrate). | ||||
| @@ -39,7 +39,7 @@ int ff_vorbiscomment_length(AVDictionary *m, const char *vendor_string, | |||||
| unsigned *count); | unsigned *count); | ||||
| /** | /** | ||||
| * Writes a VorbisComment into a buffer. The buffer, p, must have enough | |||||
| * Write a VorbisComment into a buffer. The buffer, p, must have enough | |||||
| * data to hold the whole VorbisComment. The minimum size required can be | * data to hold the whole VorbisComment. The minimum size required can be | ||||
| * obtained by passing the same AVDictionary and vendor_string to | * obtained by passing the same AVDictionary and vendor_string to | ||||
| * ff_vorbiscomment_length() | * ff_vorbiscomment_length() | ||||
| @@ -137,7 +137,7 @@ char *av_d2str(double d); | |||||
| char *av_get_token(const char **buf, const char *term); | char *av_get_token(const char **buf, const char *term); | ||||
| /** | /** | ||||
| * Locale independent conversion of ASCII characters to upper case. | |||||
| * Locale-independent conversion of ASCII characters to uppercase. | |||||
| */ | */ | ||||
| static inline int av_toupper(int c) | static inline int av_toupper(int c) | ||||
| { | { | ||||
| @@ -147,7 +147,7 @@ static inline int av_toupper(int c) | |||||
| } | } | ||||
| /** | /** | ||||
| * Locale independent conversion of ASCII characters to lower case. | |||||
| * Locale-independent conversion of ASCII characters to lowercase. | |||||
| */ | */ | ||||
| static inline int av_tolower(int c) | static inline int av_tolower(int c) | ||||
| { | { | ||||
| @@ -157,13 +157,13 @@ static inline int av_tolower(int c) | |||||
| } | } | ||||
| /* | /* | ||||
| * Locale independent case-insensitive compare. | |||||
| * Locale-independent case-insensitive compare. | |||||
| * @note This means only ASCII-range characters are case-insensitive | * @note This means only ASCII-range characters are case-insensitive | ||||
| */ | */ | ||||
| int av_strcasecmp(const char *a, const char *b); | int av_strcasecmp(const char *a, const char *b); | ||||
| /** | /** | ||||
| * Locale independent case-insensitive compare. | |||||
| * Locale-independent case-insensitive compare. | |||||
| * @note This means only ASCII-range characters are case-insensitive | * @note This means only ASCII-range characters are case-insensitive | ||||
| */ | */ | ||||
| int av_strncasecmp(const char *a, const char *b, size_t n); | int av_strncasecmp(const char *a, const char *b, size_t n); | ||||
| @@ -221,7 +221,7 @@ struct AVDictionary { | |||||
| #endif | #endif | ||||
| /** | /** | ||||
| * Returns NULL if a threading library has not been enabled. | |||||
| * Return NULL if a threading library has not been enabled. | |||||
| * Used to disable threading functions in AVCodec definitions | * Used to disable threading functions in AVCodec definitions | ||||
| * when not needed. | * when not needed. | ||||
| */ | */ | ||||
| @@ -75,7 +75,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, | |||||
| void *log_ctx); | void *log_ctx); | ||||
| /** | /** | ||||
| * Parses timestr and returns in *time a corresponding number of | |||||
| * Parse timestr and return in *time a corresponding number of | |||||
| * microseconds. | * microseconds. | ||||
| * | * | ||||
| * @param timeval puts here the number of microseconds corresponding | * @param timeval puts here the number of microseconds corresponding | ||||
| @@ -77,9 +77,10 @@ void pp_postprocess(const uint8_t * src[3], const int srcStride[3], | |||||
| /** | /** | ||||
| * returns a pp_mode or NULL if an error occurred | |||||
| * name is the string after "-pp" on the command line | |||||
| * quality is a number from 0 to PP_QUALITY_MAX | |||||
| * Return a pp_mode or NULL if an error occurred. | |||||
| * | |||||
| * @param name the string after "-pp" on the command line | |||||
| * @param quality a number from 0 to PP_QUALITY_MAX | |||||
| */ | */ | ||||
| pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality); | pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality); | ||||
| void pp_free_mode(pp_mode *mode); | void pp_free_mode(pp_mode *mode); | ||||
| @@ -1912,7 +1912,7 @@ MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8)) | |||||
| #if HAVE_MMX | #if HAVE_MMX | ||||
| /** | /** | ||||
| * transposes and shift the given 8x8 Block into dst1 and dst2 | |||||
| * Transpose and shift the given 8x8 Block into dst1 and dst2. | |||||
| */ | */ | ||||
| static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride) | static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride) | ||||
| { | { | ||||
| @@ -1997,7 +1997,7 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src | |||||
| } | } | ||||
| /** | /** | ||||
| * transposes the given 8x8 block | |||||
| * Transpose the given 8x8 block. | |||||
| */ | */ | ||||
| static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src) | static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src) | ||||
| { | { | ||||
| @@ -57,17 +57,17 @@ | |||||
| #endif | #endif | ||||
| /** | /** | ||||
| * Returns the LIBSWSCALE_VERSION_INT constant. | |||||
| * Return the LIBSWSCALE_VERSION_INT constant. | |||||
| */ | */ | ||||
| unsigned swscale_version(void); | unsigned swscale_version(void); | ||||
| /** | /** | ||||
| * Returns the libswscale build-time configuration. | |||||
| * Return the libswscale build-time configuration. | |||||
| */ | */ | ||||
| const char *swscale_configuration(void); | const char *swscale_configuration(void); | ||||
| /** | /** | ||||
| * Returns the libswscale license. | |||||
| * Return the libswscale license. | |||||
| */ | */ | ||||
| const char *swscale_license(void); | const char *swscale_license(void); | ||||
| @@ -124,7 +124,7 @@ const char *swscale_license(void); | |||||
| #define SWS_CS_DEFAULT 5 | #define SWS_CS_DEFAULT 5 | ||||
| /** | /** | ||||
| * Returns a pointer to yuv<->rgb coefficients for the given colorspace | |||||
| * Return a pointer to yuv<->rgb coefficients for the given colorspace | |||||
| * suitable for sws_setColorspaceDetails(). | * suitable for sws_setColorspaceDetails(). | ||||
| * | * | ||||
| * @param colorspace One of the SWS_CS_* macros. If invalid, | * @param colorspace One of the SWS_CS_* macros. If invalid, | ||||
| @@ -151,26 +151,26 @@ typedef struct { | |||||
| struct SwsContext; | struct SwsContext; | ||||
| /** | /** | ||||
| * Returns a positive value if pix_fmt is a supported input format, 0 | |||||
| * Return a positive value if pix_fmt is a supported input format, 0 | |||||
| * otherwise. | * otherwise. | ||||
| */ | */ | ||||
| int sws_isSupportedInput(enum PixelFormat pix_fmt); | int sws_isSupportedInput(enum PixelFormat pix_fmt); | ||||
| /** | /** | ||||
| * Returns a positive value if pix_fmt is a supported output format, 0 | |||||
| * Return a positive value if pix_fmt is a supported output format, 0 | |||||
| * otherwise. | * otherwise. | ||||
| */ | */ | ||||
| int sws_isSupportedOutput(enum PixelFormat pix_fmt); | int sws_isSupportedOutput(enum PixelFormat pix_fmt); | ||||
| /** | /** | ||||
| * Allocates an empty SwsContext. This must be filled and passed to | |||||
| * Allocate an empty SwsContext. This must be filled and passed to | |||||
| * sws_init_context(). For filling see AVOptions, options.c and | * sws_init_context(). For filling see AVOptions, options.c and | ||||
| * sws_setColorspaceDetails(). | * sws_setColorspaceDetails(). | ||||
| */ | */ | ||||
| struct SwsContext *sws_alloc_context(void); | struct SwsContext *sws_alloc_context(void); | ||||
| /** | /** | ||||
| * Initializes the swscaler context sws_context. | |||||
| * Initialize the swscaler context sws_context. | |||||
| * | * | ||||
| * @return zero or positive value on success, a negative value on | * @return zero or positive value on success, a negative value on | ||||
| * error | * error | ||||
| @@ -178,14 +178,14 @@ struct SwsContext *sws_alloc_context(void); | |||||
| int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); | int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); | ||||
| /** | /** | ||||
| * Frees the swscaler context swsContext. | |||||
| * Free the swscaler context swsContext. | |||||
| * If swsContext is NULL, then does nothing. | * If swsContext is NULL, then does nothing. | ||||
| */ | */ | ||||
| void sws_freeContext(struct SwsContext *swsContext); | void sws_freeContext(struct SwsContext *swsContext); | ||||
| #if FF_API_SWS_GETCONTEXT | #if FF_API_SWS_GETCONTEXT | ||||
| /** | /** | ||||
| * Allocates and returns a SwsContext. You need it to perform | |||||
| * Allocate and return an SwsContext. You need it to perform | |||||
| * scaling/conversion operations using sws_scale(). | * scaling/conversion operations using sws_scale(). | ||||
| * | * | ||||
| * @param srcW the width of the source image | * @param srcW the width of the source image | ||||
| @@ -207,7 +207,7 @@ struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat | |||||
| #endif | #endif | ||||
| /** | /** | ||||
| * Scales the image slice in srcSlice and puts the resulting scaled | |||||
| * Scale the image slice in srcSlice and put the resulting scaled | |||||
| * slice in the image in dst. A slice is a sequence of consecutive | * slice in the image in dst. A slice is a sequence of consecutive | ||||
| * rows in an image. | * rows in an image. | ||||
| * | * | ||||
| @@ -252,35 +252,35 @@ int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, | |||||
| int *brightness, int *contrast, int *saturation); | int *brightness, int *contrast, int *saturation); | ||||
| /** | /** | ||||
| * Allocates and returns an uninitialized vector with length coefficients. | |||||
| * Allocate and return an uninitialized vector with length coefficients. | |||||
| */ | */ | ||||
| SwsVector *sws_allocVec(int length); | SwsVector *sws_allocVec(int length); | ||||
| /** | /** | ||||
| * Returns a normalized Gaussian curve used to filter stuff | |||||
| * quality=3 is high quality, lower is lower quality. | |||||
| * Return a normalized Gaussian curve used to filter stuff | |||||
| * quality = 3 is high quality, lower is lower quality. | |||||
| */ | */ | ||||
| SwsVector *sws_getGaussianVec(double variance, double quality); | SwsVector *sws_getGaussianVec(double variance, double quality); | ||||
| /** | /** | ||||
| * Allocates and returns a vector with length coefficients, all | |||||
| * Allocate and return a vector with length coefficients, all | |||||
| * with the same value c. | * with the same value c. | ||||
| */ | */ | ||||
| SwsVector *sws_getConstVec(double c, int length); | SwsVector *sws_getConstVec(double c, int length); | ||||
| /** | /** | ||||
| * Allocates and returns a vector with just one coefficient, with | |||||
| * Allocate and return a vector with just one coefficient, with | |||||
| * value 1.0. | * value 1.0. | ||||
| */ | */ | ||||
| SwsVector *sws_getIdentityVec(void); | SwsVector *sws_getIdentityVec(void); | ||||
| /** | /** | ||||
| * Scales all the coefficients of a by the scalar value. | |||||
| * Scale all the coefficients of a by the scalar value. | |||||
| */ | */ | ||||
| void sws_scaleVec(SwsVector *a, double scalar); | void sws_scaleVec(SwsVector *a, double scalar); | ||||
| /** | /** | ||||
| * Scales all the coefficients of a so that their sum equals height. | |||||
| * Scale all the coefficients of a so that their sum equals height. | |||||
| */ | */ | ||||
| void sws_normalizeVec(SwsVector *a, double height); | void sws_normalizeVec(SwsVector *a, double height); | ||||
| void sws_convVec(SwsVector *a, SwsVector *b); | void sws_convVec(SwsVector *a, SwsVector *b); | ||||
| @@ -289,13 +289,13 @@ void sws_subVec(SwsVector *a, SwsVector *b); | |||||
| void sws_shiftVec(SwsVector *a, int shift); | void sws_shiftVec(SwsVector *a, int shift); | ||||
| /** | /** | ||||
| * Allocates and returns a clone of the vector a, that is a vector | |||||
| * Allocate and return a clone of the vector a, that is a vector | |||||
| * with the same coefficients as a. | * with the same coefficients as a. | ||||
| */ | */ | ||||
| SwsVector *sws_cloneVec(SwsVector *a); | SwsVector *sws_cloneVec(SwsVector *a); | ||||
| /** | /** | ||||
| * Prints with av_log() a textual representation of the vector a | |||||
| * Print with av_log() a textual representation of the vector a | |||||
| * if log_level <= av_log_level. | * if log_level <= av_log_level. | ||||
| */ | */ | ||||
| void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); | void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); | ||||
| @@ -309,8 +309,7 @@ SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, | |||||
| void sws_freeFilter(SwsFilter *filter); | void sws_freeFilter(SwsFilter *filter); | ||||
| /** | /** | ||||
| * Checks if context can be reused, otherwise reallocates a new | |||||
| * one. | |||||
| * Check if context can be reused, otherwise reallocate a new one. | |||||
| * | * | ||||
| * If context is NULL, just calls sws_getContext() to get a new | * If context is NULL, just calls sws_getContext() to get a new | ||||
| * context. Otherwise, checks if the parameters are the ones already | * context. Otherwise, checks if the parameters are the ones already | ||||
| @@ -328,7 +327,7 @@ struct SwsContext *sws_getCachedContext(struct SwsContext *context, | |||||
| SwsFilter *dstFilter, const double *param); | SwsFilter *dstFilter, const double *param); | ||||
| /** | /** | ||||
| * Converts an 8bit paletted frame into a frame with a color depth of 32-bits. | |||||
| * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. | |||||
| * | * | ||||
| * The output frame will have the same packed format as the palette. | * The output frame will have the same packed format as the palette. | ||||
| * | * | ||||
| @@ -340,7 +339,7 @@ struct SwsContext *sws_getCachedContext(struct SwsContext *context, | |||||
| void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); | void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); | ||||
| /** | /** | ||||
| * Converts an 8bit paletted frame into a frame with a color depth of 24 bits. | |||||
| * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. | |||||
| * | * | ||||
| * With the palette format "ABCD", the destination frame ends up with the format "ABC". | * With the palette format "ABCD", the destination frame ends up with the format "ABC". | ||||
| * | * | ||||
| @@ -625,7 +625,7 @@ extern const uint64_t ff_dither8[2]; | |||||
| extern const AVClass sws_context_class; | extern const AVClass sws_context_class; | ||||
| /** | /** | ||||
| * Sets c->swScale to an unscaled converter if one exists for the specific | |||||
| * Set c->swScale to an unscaled converter if one exists for the specific | |||||
| * source and destination formats, bit depths, flags, etc. | * source and destination formats, bit depths, flags, etc. | ||||
| */ | */ | ||||
| void ff_get_unscaled_swscale(SwsContext *c); | void ff_get_unscaled_swscale(SwsContext *c); | ||||
| @@ -633,7 +633,7 @@ void ff_get_unscaled_swscale(SwsContext *c); | |||||
| void ff_swscale_get_unscaled_altivec(SwsContext *c); | void ff_swscale_get_unscaled_altivec(SwsContext *c); | ||||
| /** | /** | ||||
| * Returns function pointer to fastest main scaler path function depending | |||||
| * Return function pointer to fastest main scaler path function depending | |||||
| * on architecture and available optimizations. | * on architecture and available optimizations. | ||||
| */ | */ | ||||
| SwsFunc ff_getSwsFunc(SwsContext *c); | SwsFunc ff_getSwsFunc(SwsContext *c); | ||||
| @@ -1,6 +1,6 @@ | |||||
| /* | /* | ||||
| * Generates a synthetic stereo sound | |||||
| * NOTE: No floats are used to guarantee a bit exact output. | |||||
| * Generate a synthetic stereo sound. | |||||
| * NOTE: No floats are used to guarantee bitexact output. | |||||
| * | * | ||||
| * Copyright (c) 2002 Fabrice Bellard | * Copyright (c) 2002 Fabrice Bellard | ||||
| * | * | ||||
| @@ -1,5 +1,5 @@ | |||||
| /* | /* | ||||
| * Generates a synthetic YUV video sequence suitable for codec testing. | |||||
| * Generate a synthetic YUV video sequence suitable for codec testing. | |||||
| * | * | ||||
| * copyright (c) Sebastien Bechet <s.bechet@av7.net> | * copyright (c) Sebastien Bechet <s.bechet@av7.net> | ||||
| * | * | ||||
| @@ -1,6 +1,6 @@ | |||||
| /* | /* | ||||
| * Generates a synthetic YUV video sequence suitable for codec testing. | |||||
| * NOTE: No floats are used to guarantee a bit exact output. | |||||
| * Generate a synthetic YUV video sequence suitable for codec testing. | |||||
| * NOTE: No floats are used to guarantee bitexact output. | |||||
| * | * | ||||
| * Copyright (c) 2002 Fabrice Bellard | * Copyright (c) 2002 Fabrice Bellard | ||||
| * | * | ||||