* qatar/master: vorbisdec: Rename silly "class_" variable to plain "class". simple_idct_alpha: Drop some useless casts. Simplify av_log_missing_feature(). ac3enc: remove check for mismatching channels and channel_layout If AVCodecContext.channels is 0 and AVCodecContext.channel_layout is non-zero, set channels based on channel_layout. If AVCodecContext.channel_layout and AVCodecContext.channels are both non-zero, check to make sure they do not contradict eachother. cosmetics: indentation Check AVCodec.supported_samplerates and AVCodec.channel_layouts in avcodec_open(). aacdec: remove sf_scale and sf_offset. aacdec: use a scale of 2 in the LTP MDCT rather than doubling the coefficient table values from the spec. Define POW_SF2_ZERO in aac.h and use for ff_aac_pow2sf_tabp[] offsets instead of hardcoding 200 everywhere. Large intensity stereo and PNS indices are legal. Clip them instead of erroring out. A magnitude of 100 corresponds to 2^25 so the will most likely result in clipped output anyway. qpeg: use reget_buffer() in decode_frame() ultimotion: use reget_buffer() in ulti_decode_frame() smacker: remove unnecessary call to avctx->release_buffer in decode_frame() avparser: don't av_malloc(0). Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.8
@@ -130,6 +130,7 @@ typedef struct { | |||||
#define SCALE_MAX_POS 255 ///< scalefactor index maximum value | #define SCALE_MAX_POS 255 ///< scalefactor index maximum value | ||||
#define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard | #define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard | ||||
#define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference | #define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference | ||||
#define POW_SF2_ZERO 200 ///< ff_aac_pow2sf_tab index corresponding to pow(2, 0); | |||||
/** | /** | ||||
* Long Term Prediction | * Long Term Prediction | ||||
@@ -292,8 +293,6 @@ typedef struct { | |||||
* @{ | * @{ | ||||
*/ | */ | ||||
float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). | float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output). | ||||
float sf_scale; ///< Pre-scale for correct IMDCT and dsp.float_to_int16. | |||||
int sf_offset; ///< offset into pow2sf_tab as appropriate for dsp.float_to_int16 | |||||
/** @} */ | /** @} */ | ||||
DECLARE_ALIGNED(32, float, temp)[128]; | DECLARE_ALIGNED(32, float, temp)[128]; | ||||
@@ -29,13 +29,14 @@ | |||||
#include "libavcodec/aac_tables.h" | #include "libavcodec/aac_tables.h" | ||||
#else | #else | ||||
#include "libavutil/mathematics.h" | #include "libavutil/mathematics.h" | ||||
#include "libavcodec/aac.h" | |||||
float ff_aac_pow2sf_tab[428]; | float ff_aac_pow2sf_tab[428]; | ||||
void ff_aac_tableinit(void) | void ff_aac_tableinit(void) | ||||
{ | { | ||||
int i; | int i; | ||||
for (i = 0; i < 428; i++) | for (i = 0; i < 428; i++) | ||||
ff_aac_pow2sf_tab[i] = pow(2, (i - 200) / 4.); | |||||
ff_aac_pow2sf_tab[i] = pow(2, (i - POW_SF2_ZERO) / 4.); | |||||
} | } | ||||
#endif /* CONFIG_HARDCODED_TABLES */ | #endif /* CONFIG_HARDCODED_TABLES */ | ||||
@@ -109,8 +109,8 @@ static av_always_inline float quantize_and_encode_band_cost_template( | |||||
int *bits, int BT_ZERO, int BT_UNSIGNED, | int *bits, int BT_ZERO, int BT_UNSIGNED, | ||||
int BT_PAIR, int BT_ESC) | int BT_PAIR, int BT_ESC) | ||||
{ | { | ||||
const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; | |||||
const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; | |||||
const float IQ = ff_aac_pow2sf_tab[POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; | |||||
const float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; | |||||
const float CLIPPED_ESCAPE = 165140.0f*IQ; | const float CLIPPED_ESCAPE = 165140.0f*IQ; | ||||
int i, j; | int i, j; | ||||
float cost = 0; | float cost = 0; | ||||
@@ -281,7 +281,7 @@ static float find_max_val(int group_len, int swb_size, const float *scaled) { | |||||
} | } | ||||
static int find_min_book(float maxval, int sf) { | static int find_min_book(float maxval, int sf) { | ||||
float Q = ff_aac_pow2sf_tab[200 - sf + SCALE_ONE_POS - SCALE_DIV_512]; | |||||
float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - sf + SCALE_ONE_POS - SCALE_DIV_512]; | |||||
float Q34 = sqrtf(Q * sqrtf(Q)); | float Q34 = sqrtf(Q * sqrtf(Q)); | ||||
int qmaxval, cb; | int qmaxval, cb; | ||||
qmaxval = maxval * Q34 + 0.4054f; | qmaxval = maxval * Q34 + 0.4054f; | ||||
@@ -956,7 +956,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s, | |||||
dist -= b; | dist -= b; | ||||
} | } | ||||
dist *= 1.0f / 512.0f / lambda; | dist *= 1.0f / 512.0f / lambda; | ||||
quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[200 - scf + SCALE_ONE_POS - SCALE_DIV_512]); | |||||
quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[POW_SF2_ZERO - scf + SCALE_ONE_POS - SCALE_DIV_512]); | |||||
if (quant_max >= 8191) { // too much, return to the previous quantizer | if (quant_max >= 8191) { // too much, return to the previous quantizer | ||||
sce->sf_idx[w*16+g] = prev_scf; | sce->sf_idx[w*16+g] = prev_scf; | ||||
break; | break; | ||||
@@ -579,12 +579,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) | |||||
ac->random_state = 0x1f2e3d4c; | ac->random_state = 0x1f2e3d4c; | ||||
// -1024 - Compensate wrong IMDCT method. | |||||
// 60 - Required to scale values to the correct range [-32768,32767] | |||||
// for float to int16 conversion. (1 << (60 / 4)) == 32768 | |||||
ac->sf_scale = 1. / -1024.; | |||||
ac->sf_offset = 60; | |||||
ff_aac_tableinit(); | ff_aac_tableinit(); | ||||
INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), | INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), | ||||
@@ -592,9 +586,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) | |||||
ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), | ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), | ||||
352); | 352); | ||||
ff_mdct_init(&ac->mdct, 11, 1, 1.0); | |||||
ff_mdct_init(&ac->mdct_small, 8, 1, 1.0); | |||||
ff_mdct_init(&ac->mdct_ltp, 11, 0, 1.0); | |||||
ff_mdct_init(&ac->mdct, 11, 1, 1.0/1024.0); | |||||
ff_mdct_init(&ac->mdct_small, 8, 1, 1.0/128.0); | |||||
ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0); | |||||
// window initialization | // window initialization | ||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); | ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); | ||||
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); | ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); | ||||
@@ -652,7 +646,7 @@ static void decode_ltp(AACContext *ac, LongTermPrediction *ltp, | |||||
int sfb; | int sfb; | ||||
ltp->lag = get_bits(gb, 11); | ltp->lag = get_bits(gb, 11); | ||||
ltp->coef = ltp_coef[get_bits(gb, 3)] * ac->sf_scale; | |||||
ltp->coef = ltp_coef[get_bits(gb, 3)]; | |||||
for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++) | for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++) | ||||
ltp->used[sfb] = get_bits1(gb); | ltp->used[sfb] = get_bits1(gb); | ||||
} | } | ||||
@@ -790,9 +784,9 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, | |||||
enum BandType band_type[120], | enum BandType band_type[120], | ||||
int band_type_run_end[120]) | int band_type_run_end[120]) | ||||
{ | { | ||||
const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0); | |||||
int g, i, idx = 0; | int g, i, idx = 0; | ||||
int offset[3] = { global_gain, global_gain - 90, 100 }; | |||||
int offset[3] = { global_gain, global_gain - 90, 0 }; | |||||
int clipped_offset; | |||||
int noise_flag = 1; | int noise_flag = 1; | ||||
static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" }; | static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" }; | ||||
for (g = 0; g < ics->num_window_groups; g++) { | for (g = 0; g < ics->num_window_groups; g++) { | ||||
@@ -804,12 +798,14 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, | |||||
} else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) { | } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) { | ||||
for (; i < run_end; i++, idx++) { | for (; i < run_end; i++, idx++) { | ||||
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; | offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; | ||||
if (offset[2] > 255U) { | |||||
av_log(ac->avctx, AV_LOG_ERROR, | |||||
"%s (%d) out of range.\n", sf_str[2], offset[2]); | |||||
return -1; | |||||
clipped_offset = av_clip(offset[2], -155, 100); | |||||
if (offset[2] != clipped_offset) { | |||||
av_log_ask_for_sample(ac->avctx, "Intensity stereo " | |||||
"position clipped (%d -> %d).\nIf you heard an " | |||||
"audible artifact, there may be a bug in the " | |||||
"decoder. ", offset[2], clipped_offset); | |||||
} | } | ||||
sf[idx] = ff_aac_pow2sf_tab[-offset[2] + 300]; | |||||
sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO]; | |||||
} | } | ||||
} else if (band_type[idx] == NOISE_BT) { | } else if (band_type[idx] == NOISE_BT) { | ||||
for (; i < run_end; i++, idx++) { | for (; i < run_end; i++, idx++) { | ||||
@@ -817,12 +813,14 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, | |||||
offset[1] += get_bits(gb, 9) - 256; | offset[1] += get_bits(gb, 9) - 256; | ||||
else | else | ||||
offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; | offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; | ||||
if (offset[1] > 255U) { | |||||
av_log(ac->avctx, AV_LOG_ERROR, | |||||
"%s (%d) out of range.\n", sf_str[1], offset[1]); | |||||
return -1; | |||||
clipped_offset = av_clip(offset[1], -100, 155); | |||||
if (offset[2] != clipped_offset) { | |||||
av_log_ask_for_sample(ac->avctx, "Noise gain clipped " | |||||
"(%d -> %d).\nIf you heard an audible " | |||||
"artifact, there may be a bug in the decoder. ", | |||||
offset[1], clipped_offset); | |||||
} | } | ||||
sf[idx] = -ff_aac_pow2sf_tab[offset[1] + sf_offset + 100]; | |||||
sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO]; | |||||
} | } | ||||
} else { | } else { | ||||
for (; i < run_end; i++, idx++) { | for (; i < run_end; i++, idx++) { | ||||
@@ -832,7 +830,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb, | |||||
"%s (%d) out of range.\n", sf_str[0], offset[0]); | "%s (%d) out of range.\n", sf_str[0], offset[0]); | ||||
return -1; | return -1; | ||||
} | } | ||||
sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset]; | |||||
sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO]; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -1243,7 +1241,6 @@ static av_always_inline float flt16_trunc(float pf) | |||||
} | } | ||||
static av_always_inline void predict(PredictorState *ps, float *coef, | static av_always_inline void predict(PredictorState *ps, float *coef, | ||||
float sf_scale, float inv_sf_scale, | |||||
int output_enable) | int output_enable) | ||||
{ | { | ||||
const float a = 0.953125; // 61.0 / 64 | const float a = 0.953125; // 61.0 / 64 | ||||
@@ -1260,9 +1257,9 @@ static av_always_inline void predict(PredictorState *ps, float *coef, | |||||
pv = flt16_round(k1 * r0 + k2 * r1); | pv = flt16_round(k1 * r0 + k2 * r1); | ||||
if (output_enable) | if (output_enable) | ||||
*coef += pv * sf_scale; | |||||
*coef += pv; | |||||
e0 = *coef * inv_sf_scale; | |||||
e0 = *coef; | |||||
e1 = e0 - k1 * r0; | e1 = e0 - k1 * r0; | ||||
ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1); | ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1); | ||||
@@ -1280,7 +1277,6 @@ static av_always_inline void predict(PredictorState *ps, float *coef, | |||||
static void apply_prediction(AACContext *ac, SingleChannelElement *sce) | static void apply_prediction(AACContext *ac, SingleChannelElement *sce) | ||||
{ | { | ||||
int sfb, k; | int sfb, k; | ||||
float sf_scale = ac->sf_scale, inv_sf_scale = 1 / ac->sf_scale; | |||||
if (!sce->ics.predictor_initialized) { | if (!sce->ics.predictor_initialized) { | ||||
reset_all_predictors(sce->predictor_state); | reset_all_predictors(sce->predictor_state); | ||||
@@ -1291,7 +1287,6 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce) | |||||
for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) { | for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) { | ||||
for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { | for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) { | ||||
predict(&sce->predictor_state[k], &sce->coeffs[k], | predict(&sce->predictor_state[k], &sce->coeffs[k], | ||||
sf_scale, inv_sf_scale, | |||||
sce->ics.predictor_present && sce->ics.prediction_used[sfb]); | sce->ics.predictor_present && sce->ics.prediction_used[sfb]); | ||||
} | } | ||||
} | } | ||||
@@ -36,11 +36,11 @@ | |||||
#include <stdint.h> | #include <stdint.h> | ||||
/* @name ltp_coef | /* @name ltp_coef | ||||
* Table of the LTP coefficient (multiplied by 2) | |||||
* Table of the LTP coefficients | |||||
*/ | */ | ||||
static const float ltp_coef[8] = { | static const float ltp_coef[8] = { | ||||
1.141658, 1.393232, 1.626008, 1.822608, | |||||
1.969800, 2.135788, 2.2389202, 2.739066, | |||||
0.570829, 0.696616, 0.813004, 0.911304, | |||||
0.984900, 1.067894, 1.194601, 1.369533, | |||||
}; | }; | ||||
/* @name tns_tmp2_map | /* @name tns_tmp2_map | ||||
@@ -1962,8 +1962,6 @@ static av_cold int set_channel_info(AC3EncodeContext *s, int channels, | |||||
ch_layout = *channel_layout; | ch_layout = *channel_layout; | ||||
if (!ch_layout) | if (!ch_layout) | ||||
ch_layout = avcodec_guess_channel_layout(channels, CODEC_ID_AC3, NULL); | ch_layout = avcodec_guess_channel_layout(channels, CODEC_ID_AC3, NULL); | ||||
if (av_get_channel_layout_nb_channels(ch_layout) != channels) | |||||
return AVERROR(EINVAL); | |||||
s->lfe_on = !!(ch_layout & AV_CH_LOW_FREQUENCY); | s->lfe_on = !!(ch_layout & AV_CH_LOW_FREQUENCY); | ||||
s->channels = channels; | s->channels = channels; | ||||
@@ -33,13 +33,13 @@ | |||||
// cos(i * M_PI / 16) * sqrt(2) * (1 << 14) | // cos(i * M_PI / 16) * sqrt(2) * (1 << 14) | ||||
// W4 is actually exactly 16384, but using 16383 works around | // W4 is actually exactly 16384, but using 16383 works around | ||||
// accumulating rounding errors for some encoders | // accumulating rounding errors for some encoders | ||||
#define W1 ((int_fast32_t) 22725) | |||||
#define W2 ((int_fast32_t) 21407) | |||||
#define W3 ((int_fast32_t) 19266) | |||||
#define W4 ((int_fast32_t) 16383) | |||||
#define W5 ((int_fast32_t) 12873) | |||||
#define W6 ((int_fast32_t) 8867) | |||||
#define W7 ((int_fast32_t) 4520) | |||||
#define W1 22725 | |||||
#define W2 21407 | |||||
#define W3 19266 | |||||
#define W4 16383 | |||||
#define W5 12873 | |||||
#define W6 8867 | |||||
#define W7 4520 | |||||
#define ROW_SHIFT 11 | #define ROW_SHIFT 11 | ||||
#define COL_SHIFT 20 | #define COL_SHIFT 20 | ||||
@@ -259,12 +259,9 @@ static int decode_frame(AVCodecContext *avctx, | |||||
int delta; | int delta; | ||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | ||||
if(p->data[0]) | |||||
avctx->release_buffer(avctx, p); | |||||
p->reference= 0; | |||||
if(avctx->get_buffer(avctx, p) < 0){ | |||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||||
p->reference = 3; | |||||
if (avctx->reget_buffer(avctx, p) < 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); | |||||
return -1; | return -1; | ||||
} | } | ||||
outdata = a->pic.data[0]; | outdata = a->pic.data[0]; | ||||
@@ -360,8 +360,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
if(buf_size <= 769) | if(buf_size <= 769) | ||||
return 0; | return 0; | ||||
if(smk->pic.data[0]) | |||||
avctx->release_buffer(avctx, &smk->pic); | |||||
smk->pic.reference = 1; | smk->pic.reference = 1; | ||||
smk->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; | smk->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; | ||||
@@ -224,13 +224,10 @@ static int ulti_decode_frame(AVCodecContext *avctx, | |||||
int skip; | int skip; | ||||
int tmp; | int tmp; | ||||
if(s->frame.data[0]) | |||||
avctx->release_buffer(avctx, &s->frame); | |||||
s->frame.reference = 1; | s->frame.reference = 1; | ||||
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; | s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; | ||||
if(avctx->get_buffer(avctx, &s->frame) < 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); | |||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); | |||||
return -1; | return -1; | ||||
} | } | ||||
@@ -555,15 +555,50 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) | |||||
ret = AVERROR(EINVAL); | ret = AVERROR(EINVAL); | ||||
goto free_and_end; | goto free_and_end; | ||||
} | } | ||||
if (avctx->codec->sample_fmts && avctx->codec->encode) { | |||||
if (avctx->codec->encode) { | |||||
int i; | int i; | ||||
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) | |||||
if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) | |||||
break; | |||||
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { | |||||
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n"); | |||||
ret = AVERROR(EINVAL); | |||||
goto free_and_end; | |||||
if (avctx->codec->sample_fmts) { | |||||
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) | |||||
if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) | |||||
break; | |||||
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { | |||||
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n"); | |||||
ret = AVERROR(EINVAL); | |||||
goto free_and_end; | |||||
} | |||||
} | |||||
if (avctx->codec->supported_samplerates) { | |||||
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) | |||||
if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) | |||||
break; | |||||
if (avctx->codec->supported_samplerates[i] == 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "Specified sample_rate is not supported\n"); | |||||
ret = AVERROR(EINVAL); | |||||
goto free_and_end; | |||||
} | |||||
} | |||||
if (avctx->codec->channel_layouts) { | |||||
if (!avctx->channel_layout) { | |||||
av_log(avctx, AV_LOG_WARNING, "channel_layout not specified\n"); | |||||
} else { | |||||
for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) | |||||
if (avctx->channel_layout == avctx->codec->channel_layouts[i]) | |||||
break; | |||||
if (avctx->codec->channel_layouts[i] == 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "Specified channel_layout is not supported\n"); | |||||
ret = AVERROR(EINVAL); | |||||
goto free_and_end; | |||||
} | |||||
} | |||||
} | |||||
if (avctx->channel_layout && avctx->channels) { | |||||
if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) { | |||||
av_log(avctx, AV_LOG_ERROR, "channel layout does not match number of channels\n"); | |||||
ret = AVERROR(EINVAL); | |||||
goto free_and_end; | |||||
} | |||||
} else if (avctx->channel_layout) { | |||||
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); | |||||
} | } | ||||
} | } | ||||
@@ -1194,11 +1229,9 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample) | |||||
av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your FFmpeg " | av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your FFmpeg " | ||||
"version to the newest one from Git. If the problem still " | "version to the newest one from Git. If the problem still " | ||||
"occurs, it means that your file has a feature which has not " | "occurs, it means that your file has a feature which has not " | ||||
"been implemented.", feature); | |||||
"been implemented.\n", feature); | |||||
if(want_sample) | if(want_sample) | ||||
av_log_ask_for_sample(avc, NULL); | av_log_ask_for_sample(avc, NULL); | ||||
else | |||||
av_log(avc, AV_LOG_WARNING, "\n"); | |||||
} | } | ||||
void av_log_ask_for_sample(void *avc, const char *msg, ...) | void av_log_ask_for_sample(void *avc, const char *msg, ...) | ||||
@@ -1138,7 +1138,7 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
uint_fast16_t floor1_Y[258]; | uint_fast16_t floor1_Y[258]; | ||||
uint_fast16_t floor1_Y_final[258]; | uint_fast16_t floor1_Y_final[258]; | ||||
int floor1_flag[258]; | int floor1_flag[258]; | ||||
uint_fast8_t class_; | |||||
uint_fast8_t class; | |||||
uint_fast8_t cdim; | uint_fast8_t cdim; | ||||
uint_fast8_t cbits; | uint_fast8_t cbits; | ||||
uint_fast8_t csub; | uint_fast8_t csub; | ||||
@@ -1162,20 +1162,20 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
offset = 2; | offset = 2; | ||||
for (i = 0; i < vf->partitions; ++i) { | for (i = 0; i < vf->partitions; ++i) { | ||||
class_ = vf->partition_class[i]; | |||||
cdim = vf->class_dimensions[class_]; | |||||
cbits = vf->class_subclasses[class_]; | |||||
class = vf->partition_class[i]; | |||||
cdim = vf->class_dimensions[class]; | |||||
cbits = vf->class_subclasses[class]; | |||||
csub = (1 << cbits) - 1; | csub = (1 << cbits) - 1; | ||||
cval = 0; | cval = 0; | ||||
AV_DEBUG("Cbits %d \n", cbits); | AV_DEBUG("Cbits %d \n", cbits); | ||||
if (cbits) // this reads all subclasses for this partition's class | if (cbits) // this reads all subclasses for this partition's class | ||||
cval = get_vlc2(gb, vc->codebooks[vf->class_masterbook[class_]].vlc.table, | |||||
vc->codebooks[vf->class_masterbook[class_]].nb_bits, 3); | |||||
cval = get_vlc2(gb, vc->codebooks[vf->class_masterbook[class]].vlc.table, | |||||
vc->codebooks[vf->class_masterbook[class]].nb_bits, 3); | |||||
for (j = 0; j < cdim; ++j) { | for (j = 0; j < cdim; ++j) { | ||||
book = vf->subclass_books[class_][cval & csub]; | |||||
book = vf->subclass_books[class][cval & csub]; | |||||
AV_DEBUG("book %d Cbits %d cval %d bits:%d \n", book, cbits, cval, get_bits_count(gb)); | AV_DEBUG("book %d Cbits %d cval %d bits:%d \n", book, cbits, cval, get_bits_count(gb)); | ||||