* qatar/master: (27 commits) cmdutils: use new avcodec_is_decoder/encoder() functions. lavc: make codec_is_decoder/encoder() public. lavc: deprecate AVCodecContext.sub_id. libcdio: add a forgotten AVClass to the private context. swscale: remove "cpu flags" from -sws_flags description. proresenc: give user a possibility to alter some encoding parameters vorbisenc: add output buffer overwrite protection libopencore-amrnbenc: fix end-of-stream handling ra144enc: fix end-of-stream handling nellymoserenc: zero any leftover packet bytes nellymoserenc: use proper MDCT overlap delay qpeg: Use bytestream2 functions to prevent buffer overreads. swscale: make %rep unconditional. vp8: convert simple loopfilter x86 assembly to use named arguments. vp8: convert idct x86 assembly to use named arguments. vp8: convert mc x86 assembly to use named arguments. vp8: convert loopfilter x86 assembly to use cpuflags(). vp8: convert idct/mc x86 assembly to use cpuflags(). swscale: remove now unnecessary hack. x86inc: don't "bake" stack_offset in named arguments. ... Conflicts: cmdutils.c doc/APIchanges libavcodec/mpeg12.c libavcodec/options.c libavcodec/qpeg.c libavcodec/utils.c libavcodec/version.h libavdevice/libcdio.c tests/lavf-regression.sh Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.11
@@ -815,9 +815,9 @@ int opt_codecs(const char *opt, const char *arg) | |||
decode = encode = cap = 0; | |||
} | |||
if (p2 && strcmp(p->name, p2->name) == 0) { | |||
if (p->decode) | |||
if (av_codec_is_decoder(p)) | |||
decode = 1; | |||
if (p->encode || p->encode2) | |||
if (av_codec_is_encoder(p)) | |||
encode = 1; | |||
cap |= p->capabilities; | |||
} | |||
@@ -35,6 +35,9 @@ API changes, most recent first: | |||
2012-01-24 - xxxxxxx - lavfi 2.60.100 | |||
Add avfilter_graph_dump. | |||
2012-03-xx - xxxxxxx - lavc 54.7.0 - avcodec.h | |||
Add av_codec_is_encoder/decoder(). | |||
2012-xx-xx - xxxxxxx - lavc 54.3.0 - avcodec.h | |||
Add av_packet_shrink_side_data. | |||
@@ -42,7 +45,6 @@ API changes, most recent first: | |||
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC, | |||
used for dealing with attached pictures/cover art. | |||
>>>>>>> qatar/master | |||
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h | |||
Add AVERROR_UNKNOWN | |||
@@ -1280,15 +1280,12 @@ typedef struct AVCodecContext { | |||
*/ | |||
unsigned int stream_codec_tag; | |||
#if FF_API_SUB_ID | |||
/** | |||
* Some codecs need additional format info. It is stored here. | |||
* If any muxer uses this then ALL demuxers/parsers AND encoders for the | |||
* specific codec MUST set it correctly otherwise stream copy breaks. | |||
* In general use of this field by muxers is not recommended. | |||
* - encoding: Set by libavcodec. | |||
* - decoding: Set by libavcodec. (FIXME: Is this OK?) | |||
* @deprecated this field is unused | |||
*/ | |||
int sub_id; | |||
attribute_deprecated int sub_id; | |||
#endif | |||
void *priv_data; | |||
@@ -4504,4 +4501,14 @@ const AVClass *avcodec_get_frame_class(void); | |||
*/ | |||
int avcodec_is_open(AVCodecContext *s); | |||
/** | |||
* @return a non-zero number if codec is an encoder, zero otherwise | |||
*/ | |||
int av_codec_is_encoder(AVCodec *codec); | |||
/** | |||
* @return a non-zero number if codec is a decoder, zero otherwise | |||
*/ | |||
int av_codec_is_decoder(AVCodec *codec); | |||
#endif /* AVCODEC_AVCODEC_H */ |
@@ -85,6 +85,7 @@ typedef struct AMRContext { | |||
int enc_bitrate; | |||
int enc_mode; | |||
int enc_dtx; | |||
int enc_last_frame; | |||
} AMRContext; | |||
static const AVOption options[] = { | |||
@@ -195,6 +196,7 @@ static av_cold int amr_nb_encode_init(AVCodecContext *avctx) | |||
} | |||
avctx->frame_size = 160; | |||
avctx->delay = 50; | |||
avctx->coded_frame = avcodec_alloc_frame(); | |||
if (!avctx->coded_frame) | |||
return AVERROR(ENOMEM); | |||
@@ -227,17 +229,40 @@ static int amr_nb_encode_frame(AVCodecContext *avctx, | |||
{ | |||
AMRContext *s = avctx->priv_data; | |||
int written; | |||
int16_t *flush_buf = NULL; | |||
const int16_t *samples = data; | |||
if (s->enc_bitrate != avctx->bit_rate) { | |||
s->enc_mode = get_bitrate_mode(avctx->bit_rate, avctx); | |||
s->enc_bitrate = avctx->bit_rate; | |||
} | |||
written = Encoder_Interface_Encode(s->enc_state, s->enc_mode, data, | |||
if (data) { | |||
if (avctx->frame_size < 160) { | |||
flush_buf = av_mallocz(160 * sizeof(*flush_buf)); | |||
if (!flush_buf) | |||
return AVERROR(ENOMEM); | |||
memcpy(flush_buf, samples, avctx->frame_size * sizeof(*flush_buf)); | |||
samples = flush_buf; | |||
if (avctx->frame_size < 110) | |||
s->enc_last_frame = -1; | |||
} | |||
} else { | |||
if (s->enc_last_frame < 0) | |||
return 0; | |||
flush_buf = av_mallocz(160 * sizeof(*flush_buf)); | |||
if (!flush_buf) | |||
return AVERROR(ENOMEM); | |||
samples = flush_buf; | |||
s->enc_last_frame = -1; | |||
} | |||
written = Encoder_Interface_Encode(s->enc_state, s->enc_mode, samples, | |||
frame, 0); | |||
av_dlog(avctx, "amr_nb_encode_frame encoded %u bytes, bitrate %u, first byte was %#02x\n", | |||
written, s->enc_mode, frame[0]); | |||
av_freep(&flush_buf); | |||
return written; | |||
} | |||
@@ -249,6 +274,7 @@ AVCodec ff_libopencore_amrnb_encoder = { | |||
.init = amr_nb_encode_init, | |||
.encode = amr_nb_encode_frame, | |||
.close = amr_nb_encode_close, | |||
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME, | |||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, | |||
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), | |||
.priv_class = &class, | |||
@@ -1276,7 +1276,6 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) | |||
* that behave like P-frames. */ | |||
avctx->has_b_frames = !s->low_delay; | |||
assert((avctx->sub_id == 1) == (avctx->codec_id == CODEC_ID_MPEG1VIDEO)); | |||
if (avctx->codec_id == CODEC_ID_MPEG1VIDEO) { | |||
//MPEG-1 fps | |||
avctx->time_base.den = avpriv_frame_rate_tab[s->frame_rate_index].num; | |||
@@ -1420,7 +1419,6 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *s1) | |||
av_dlog(s->avctx, "sequence extension\n"); | |||
s->codec_id = s->avctx->codec_id = CODEC_ID_MPEG2VIDEO; | |||
s->avctx->sub_id = 2; /* indicates MPEG-2 found */ | |||
if (s->avctx->debug & FF_DEBUG_PICT_INFO) | |||
av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", | |||
@@ -2038,7 +2036,6 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, | |||
s->frame_pred_frame_dct = 1; | |||
s->chroma_format = 1; | |||
s->codec_id = s->avctx->codec_id = CODEC_ID_MPEG1VIDEO; | |||
avctx->sub_id = 1; /* indicates MPEG-1 */ | |||
s->out_format = FMT_MPEG1; | |||
s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER | |||
if (s->flags & CODEC_FLAG_LOW_DELAY) | |||
@@ -2097,12 +2094,10 @@ static int vcr2_init_sequence(AVCodecContext *avctx) | |||
s->chroma_format = 1; | |||
if (s->codec_tag == AV_RL32("BW10")) { | |||
s->codec_id = s->avctx->codec_id = CODEC_ID_MPEG1VIDEO; | |||
avctx->sub_id = 1; /* indicates MPEG-1 */ | |||
} else { | |||
exchange_uv(s); // common init reset pblocks, so we swap them here | |||
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB | |||
s->codec_id = s->avctx->codec_id = CODEC_ID_MPEG2VIDEO; | |||
avctx->sub_id = 2; /* indicates MPEG-2 */ | |||
} | |||
s1->save_width = s->width; | |||
s1->save_height = s->height; | |||
@@ -1659,7 +1659,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, | |||
avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO; | |||
if (!avctx->bit_rate) | |||
avctx->bit_rate = s->bit_rate; | |||
avctx->sub_id = s->layer; | |||
if (s->frame_size <= 0 || s->frame_size > buf_size) { | |||
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); | |||
@@ -1732,7 +1731,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, | |||
avctx->channels = s->nb_channels; | |||
if (!avctx->bit_rate) | |||
avctx->bit_rate = s->bit_rate; | |||
avctx->sub_id = s->layer; | |||
s->frame_size = len; | |||
@@ -142,6 +142,5 @@ int avpriv_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_r | |||
*sample_rate = s->sample_rate; | |||
*channels = s->nb_channels; | |||
*bit_rate = s->bit_rate; | |||
avctx->sub_id = s->layer; | |||
return s->frame_size; | |||
} |
@@ -69,7 +69,6 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, | |||
pc->frame_rate.num = avctx->time_base.num = avpriv_frame_rate_tab[frame_rate_index].den; | |||
avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; | |||
avctx->codec_id = CODEC_ID_MPEG1VIDEO; | |||
avctx->sub_id = 1; | |||
} | |||
break; | |||
case EXT_START_CODE: | |||
@@ -94,7 +93,6 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, | |||
avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1) * 2; | |||
avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1); | |||
avctx->codec_id = CODEC_ID_MPEG2VIDEO; | |||
avctx->sub_id = 2; /* forces MPEG2 */ | |||
} | |||
break; | |||
case 0x8: /* picture coding extension */ | |||
@@ -52,13 +52,11 @@ | |||
typedef struct NellyMoserEncodeContext { | |||
AVCodecContext *avctx; | |||
int last_frame; | |||
int bufsel; | |||
int have_saved; | |||
DSPContext dsp; | |||
FFTContext mdct_ctx; | |||
DECLARE_ALIGNED(32, float, mdct_out)[NELLY_SAMPLES]; | |||
DECLARE_ALIGNED(32, float, in_buff)[NELLY_SAMPLES]; | |||
DECLARE_ALIGNED(32, float, buf)[2][3 * NELLY_BUF_LEN]; ///< sample buffer | |||
DECLARE_ALIGNED(32, float, buf)[3 * NELLY_BUF_LEN]; ///< sample buffer | |||
float (*opt )[NELLY_BANDS]; | |||
uint8_t (*path)[NELLY_BANDS]; | |||
} NellyMoserEncodeContext; | |||
@@ -115,16 +113,17 @@ static const uint8_t quant_lut_offset[8] = { 0, 0, 1, 4, 11, 32, 81, 230 }; | |||
static void apply_mdct(NellyMoserEncodeContext *s) | |||
{ | |||
s->dsp.vector_fmul(s->in_buff, s->buf[s->bufsel], ff_sine_128, NELLY_BUF_LEN); | |||
s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN, ff_sine_128, | |||
NELLY_BUF_LEN); | |||
float *in0 = s->buf; | |||
float *in1 = s->buf + NELLY_BUF_LEN; | |||
float *in2 = s->buf + 2 * NELLY_BUF_LEN; | |||
s->dsp.vector_fmul (s->in_buff, in0, ff_sine_128, NELLY_BUF_LEN); | |||
s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, in1, ff_sine_128, NELLY_BUF_LEN); | |||
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out, s->in_buff); | |||
s->dsp.vector_fmul(s->buf[s->bufsel] + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN, | |||
ff_sine_128, NELLY_BUF_LEN); | |||
s->dsp.vector_fmul_reverse(s->buf[s->bufsel] + 2 * NELLY_BUF_LEN, s->buf[1 - s->bufsel], ff_sine_128, | |||
NELLY_BUF_LEN); | |||
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN); | |||
s->dsp.vector_fmul (s->in_buff, in1, ff_sine_128, NELLY_BUF_LEN); | |||
s->dsp.vector_fmul_reverse(s->in_buff + NELLY_BUF_LEN, in2, ff_sine_128, NELLY_BUF_LEN); | |||
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->in_buff); | |||
} | |||
static av_cold int encode_end(AVCodecContext *avctx) | |||
@@ -161,6 +160,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||
} | |||
avctx->frame_size = NELLY_SAMPLES; | |||
avctx->delay = NELLY_BUF_LEN; | |||
s->avctx = avctx; | |||
if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0) | |||
goto error; | |||
@@ -363,38 +363,33 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int | |||
} | |||
flush_put_bits(&pb); | |||
memset(put_bits_ptr(&pb), 0, output + output_size - put_bits_ptr(&pb)); | |||
} | |||
static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data) | |||
{ | |||
NellyMoserEncodeContext *s = avctx->priv_data; | |||
const float *samples = data; | |||
int i; | |||
if (s->last_frame) | |||
return 0; | |||
memcpy(s->buf, s->buf + NELLY_SAMPLES, NELLY_BUF_LEN * sizeof(*s->buf)); | |||
if (data) { | |||
memcpy(s->buf[s->bufsel], samples, avctx->frame_size * sizeof(*samples)); | |||
for (i = avctx->frame_size; i < NELLY_SAMPLES; i++) { | |||
s->buf[s->bufsel][i] = 0; | |||
} | |||
s->bufsel = 1 - s->bufsel; | |||
if (!s->have_saved) { | |||
s->have_saved = 1; | |||
return 0; | |||
memcpy(s->buf + NELLY_BUF_LEN, samples, avctx->frame_size * sizeof(*s->buf)); | |||
if (avctx->frame_size < NELLY_SAMPLES) { | |||
memset(s->buf + NELLY_BUF_LEN + avctx->frame_size, 0, | |||
(NELLY_SAMPLES - avctx->frame_size) * sizeof(*s->buf)); | |||
if (avctx->frame_size >= NELLY_BUF_LEN) | |||
s->last_frame = 1; | |||
} | |||
} else { | |||
memset(s->buf[s->bufsel], 0, sizeof(s->buf[0][0]) * NELLY_BUF_LEN); | |||
s->bufsel = 1 - s->bufsel; | |||
memset(s->buf + NELLY_BUF_LEN, 0, NELLY_SAMPLES * sizeof(*s->buf)); | |||
s->last_frame = 1; | |||
} | |||
if (s->have_saved) { | |||
encode_block(s, frame, buf_size); | |||
return NELLY_BLOCK_LEN; | |||
} | |||
return 0; | |||
encode_block(s, frame, buf_size); | |||
return NELLY_BLOCK_LEN; | |||
} | |||
AVCodec ff_nellymoser_encoder = { | |||
@@ -111,7 +111,9 @@ static const AVOption options[]={ | |||
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||
{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"}, | |||
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"}, | |||
#if FF_API_SUB_ID | |||
{"sub_id", NULL, OFFSET(sub_id), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX}, | |||
#endif | |||
{"me_method", "set motion estimation method", OFFSET(me_method), AV_OPT_TYPE_INT, {.dbl = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method"}, | |||
{"zero", "zero motion estimation (fastest)", 0, AV_OPT_TYPE_CONST, {.dbl = ME_ZERO }, INT_MIN, INT_MAX, V|E, "me_method" }, | |||
{"full", "full motion estimation (slowest)", 0, AV_OPT_TYPE_CONST, {.dbl = ME_FULL }, INT_MIN, INT_MAX, V|E, "me_method" }, | |||
@@ -42,6 +42,67 @@ enum { | |||
PRORES_PROFILE_HQ, | |||
}; | |||
enum { | |||
QUANT_MAT_PROXY = 0, | |||
QUANT_MAT_LT, | |||
QUANT_MAT_STANDARD, | |||
QUANT_MAT_HQ, | |||
QUANT_MAT_DEFAULT, | |||
}; | |||
static const uint8_t prores_quant_matrices[][64] = { | |||
{ // proxy | |||
4, 7, 9, 11, 13, 14, 15, 63, | |||
7, 7, 11, 12, 14, 15, 63, 63, | |||
9, 11, 13, 14, 15, 63, 63, 63, | |||
11, 11, 13, 14, 63, 63, 63, 63, | |||
11, 13, 14, 63, 63, 63, 63, 63, | |||
13, 14, 63, 63, 63, 63, 63, 63, | |||
13, 63, 63, 63, 63, 63, 63, 63, | |||
63, 63, 63, 63, 63, 63, 63, 63, | |||
}, | |||
{ // LT | |||
4, 5, 6, 7, 9, 11, 13, 15, | |||
5, 5, 7, 8, 11, 13, 15, 17, | |||
6, 7, 9, 11, 13, 15, 15, 17, | |||
7, 7, 9, 11, 13, 15, 17, 19, | |||
7, 9, 11, 13, 14, 16, 19, 23, | |||
9, 11, 13, 14, 16, 19, 23, 29, | |||
9, 11, 13, 15, 17, 21, 28, 35, | |||
11, 13, 16, 17, 21, 28, 35, 41, | |||
}, | |||
{ // standard | |||
4, 4, 5, 5, 6, 7, 7, 9, | |||
4, 4, 5, 6, 7, 7, 9, 9, | |||
5, 5, 6, 7, 7, 9, 9, 10, | |||
5, 5, 6, 7, 7, 9, 9, 10, | |||
5, 6, 7, 7, 8, 9, 10, 12, | |||
6, 7, 7, 8, 9, 10, 12, 15, | |||
6, 7, 7, 9, 10, 11, 14, 17, | |||
7, 7, 9, 10, 11, 14, 17, 21, | |||
}, | |||
{ // high quality | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 5, | |||
4, 4, 4, 4, 4, 4, 5, 5, | |||
4, 4, 4, 4, 4, 5, 5, 6, | |||
4, 4, 4, 4, 5, 5, 6, 7, | |||
4, 4, 4, 4, 5, 6, 7, 7, | |||
}, | |||
{ // codec default | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
}, | |||
}; | |||
#define NUM_MB_LIMITS 4 | |||
static const int prores_mb_limits[NUM_MB_LIMITS] = { | |||
1620, // up to 720x576 | |||
@@ -56,7 +117,7 @@ static const struct prores_profile { | |||
int min_quant; | |||
int max_quant; | |||
int br_tab[NUM_MB_LIMITS]; | |||
uint8_t quant[64]; | |||
int quant; | |||
} prores_profile_info[4] = { | |||
{ | |||
.full_name = "proxy", | |||
@@ -64,16 +125,7 @@ static const struct prores_profile { | |||
.min_quant = 4, | |||
.max_quant = 8, | |||
.br_tab = { 300, 242, 220, 194 }, | |||
.quant = { | |||
4, 7, 9, 11, 13, 14, 15, 63, | |||
7, 7, 11, 12, 14, 15, 63, 63, | |||
9, 11, 13, 14, 15, 63, 63, 63, | |||
11, 11, 13, 14, 63, 63, 63, 63, | |||
11, 13, 14, 63, 63, 63, 63, 63, | |||
13, 14, 63, 63, 63, 63, 63, 63, | |||
13, 63, 63, 63, 63, 63, 63, 63, | |||
63, 63, 63, 63, 63, 63, 63, 63, | |||
}, | |||
.quant = QUANT_MAT_PROXY, | |||
}, | |||
{ | |||
.full_name = "LT", | |||
@@ -81,16 +133,7 @@ static const struct prores_profile { | |||
.min_quant = 1, | |||
.max_quant = 9, | |||
.br_tab = { 720, 560, 490, 440 }, | |||
.quant = { | |||
4, 5, 6, 7, 9, 11, 13, 15, | |||
5, 5, 7, 8, 11, 13, 15, 17, | |||
6, 7, 9, 11, 13, 15, 15, 17, | |||
7, 7, 9, 11, 13, 15, 17, 19, | |||
7, 9, 11, 13, 14, 16, 19, 23, | |||
9, 11, 13, 14, 16, 19, 23, 29, | |||
9, 11, 13, 15, 17, 21, 28, 35, | |||
11, 13, 16, 17, 21, 28, 35, 41, | |||
}, | |||
.quant = QUANT_MAT_LT, | |||
}, | |||
{ | |||
.full_name = "standard", | |||
@@ -98,16 +141,7 @@ static const struct prores_profile { | |||
.min_quant = 1, | |||
.max_quant = 6, | |||
.br_tab = { 1050, 808, 710, 632 }, | |||
.quant = { | |||
4, 4, 5, 5, 6, 7, 7, 9, | |||
4, 4, 5, 6, 7, 7, 9, 9, | |||
5, 5, 6, 7, 7, 9, 9, 10, | |||
5, 5, 6, 7, 7, 9, 9, 10, | |||
5, 6, 7, 7, 8, 9, 10, 12, | |||
6, 7, 7, 8, 9, 10, 12, 15, | |||
6, 7, 7, 9, 10, 11, 14, 17, | |||
7, 7, 9, 10, 11, 14, 17, 21, | |||
}, | |||
.quant = QUANT_MAT_STANDARD, | |||
}, | |||
{ | |||
.full_name = "high quality", | |||
@@ -115,16 +149,7 @@ static const struct prores_profile { | |||
.min_quant = 1, | |||
.max_quant = 6, | |||
.br_tab = { 1566, 1216, 1070, 950 }, | |||
.quant = { | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 4, | |||
4, 4, 4, 4, 4, 4, 4, 5, | |||
4, 4, 4, 4, 4, 4, 5, 5, | |||
4, 4, 4, 4, 4, 5, 5, 6, | |||
4, 4, 4, 4, 5, 5, 6, 7, | |||
4, 4, 4, 4, 5, 6, 7, 7, | |||
}, | |||
.quant = QUANT_MAT_HQ, | |||
} | |||
// for 4444 profile bitrate numbers are { 2350, 1828, 1600, 1425 } | |||
}; | |||
@@ -147,6 +172,7 @@ typedef struct ProresContext { | |||
DECLARE_ALIGNED(16, uint16_t, emu_buf)[16*16]; | |||
int16_t quants[MAX_STORED_Q][64]; | |||
int16_t custom_q[64]; | |||
const uint8_t *quant_mat; | |||
ProresDSPContext dsp; | |||
ScanTable scantable; | |||
@@ -159,6 +185,9 @@ typedef struct ProresContext { | |||
int num_planes; | |||
int bits_per_mb; | |||
char *vendor; | |||
int quant_sel; | |||
int frame_size; | |||
int profile; | |||
@@ -373,7 +402,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, | |||
} else { | |||
qmat = ctx->custom_q; | |||
for (i = 0; i < 64; i++) | |||
qmat[i] = ctx->profile_info->quant[i] * quant; | |||
qmat[i] = ctx->quant_mat[i] * quant; | |||
} | |||
for (i = 0; i < ctx->num_planes; i++) { | |||
@@ -591,7 +620,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, | |||
} else { | |||
qmat = ctx->custom_q; | |||
for (i = 0; i < 64; i++) | |||
qmat[i] = ctx->profile_info->quant[i] * q; | |||
qmat[i] = ctx->quant_mat[i] * q; | |||
} | |||
for (i = 0; i < ctx->num_planes; i++) { | |||
bits += estimate_slice_plane(ctx, &error, i, | |||
@@ -684,7 +713,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
tmp = buf; | |||
buf += 2; // frame header size will be stored here | |||
bytestream_put_be16 (&buf, 0); // version 1 | |||
bytestream_put_buffer(&buf, "Lavc", 4); // creator | |||
bytestream_put_buffer(&buf, ctx->vendor, 4); | |||
bytestream_put_be16 (&buf, avctx->width); | |||
bytestream_put_be16 (&buf, avctx->height); | |||
bytestream_put_byte (&buf, ctx->chroma_factor << 6); // frame flags | |||
@@ -694,13 +723,17 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
bytestream_put_byte (&buf, avctx->colorspace); | |||
bytestream_put_byte (&buf, 0x40); // source format and alpha information | |||
bytestream_put_byte (&buf, 0); // reserved | |||
bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present | |||
// luma quantisation matrix | |||
for (i = 0; i < 64; i++) | |||
bytestream_put_byte(&buf, ctx->profile_info->quant[i]); | |||
// chroma quantisation matrix | |||
for (i = 0; i < 64; i++) | |||
bytestream_put_byte(&buf, ctx->profile_info->quant[i]); | |||
if (ctx->quant_sel != QUANT_MAT_DEFAULT) { | |||
bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present | |||
// luma quantisation matrix | |||
for (i = 0; i < 64; i++) | |||
bytestream_put_byte(&buf, ctx->quant_mat[i]); | |||
// chroma quantisation matrix | |||
for (i = 0; i < 64; i++) | |||
bytestream_put_byte(&buf, ctx->quant_mat[i]); | |||
} else { | |||
bytestream_put_byte (&buf, 0x00); // matrix flags - default matrices are used | |||
} | |||
bytestream_put_be16 (&tmp, buf - orig_buf); // write back frame header size | |||
// picture header | |||
@@ -816,10 +849,25 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||
ctx->slices_width += av_popcount(ctx->mb_width - ctx->slices_width * mps); | |||
ctx->num_slices = ctx->mb_height * ctx->slices_width; | |||
for (i = 0; i < NUM_MB_LIMITS - 1; i++) | |||
if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height) | |||
break; | |||
ctx->bits_per_mb = ctx->profile_info->br_tab[i]; | |||
if (ctx->quant_sel == -1) | |||
ctx->quant_mat = prores_quant_matrices[ctx->profile_info->quant]; | |||
else | |||
ctx->quant_mat = prores_quant_matrices[ctx->quant_sel]; | |||
if (strlen(ctx->vendor) != 4) { | |||
av_log(avctx, AV_LOG_ERROR, "vendor ID should be 4 bytes\n"); | |||
return AVERROR_INVALIDDATA; | |||
} | |||
if (!ctx->bits_per_mb) { | |||
for (i = 0; i < NUM_MB_LIMITS - 1; i++) | |||
if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height) | |||
break; | |||
ctx->bits_per_mb = ctx->profile_info->br_tab[i]; | |||
} else if (ctx->bits_per_mb < 128) { | |||
av_log(avctx, AV_LOG_ERROR, "too few bits per MB, please set at least 128\n"); | |||
return AVERROR_INVALIDDATA; | |||
} | |||
ctx->frame_size = ctx->num_slices * (2 + 2 * ctx->num_planes | |||
+ (2 * mps * ctx->bits_per_mb) / 8) | |||
@@ -829,7 +877,7 @@ static av_cold int encode_init(AVCodecContext *avctx) | |||
max_quant = ctx->profile_info->max_quant; | |||
for (i = min_quant; i < MAX_STORED_Q; i++) { | |||
for (j = 0; j < 64; j++) | |||
ctx->quants[i][j] = ctx->profile_info->quant[j] * i; | |||
ctx->quants[i][j] = ctx->quant_mat[j] * i; | |||
} | |||
avctx->codec_tag = ctx->profile_info->tag; | |||
@@ -877,6 +925,24 @@ static const AVOption options[] = { | |||
0, 0, VE, "profile" }, | |||
{ "hq", NULL, 0, AV_OPT_TYPE_CONST, { PRORES_PROFILE_HQ }, | |||
0, 0, VE, "profile" }, | |||
{ "vendor", "vendor ID", OFFSET(vendor), | |||
AV_OPT_TYPE_STRING, { .str = "Lavc" }, CHAR_MIN, CHAR_MAX, VE }, | |||
{ "bits_per_mb", "desired bits per macroblock", OFFSET(bits_per_mb), | |||
AV_OPT_TYPE_INT, { 0 }, 0, 8192, VE }, | |||
{ "quant_mat", "quantiser matrix", OFFSET(quant_sel), AV_OPT_TYPE_INT, | |||
{ -1 }, -1, QUANT_MAT_DEFAULT, VE, "quant_mat" }, | |||
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, { -1 }, | |||
0, 0, VE, "quant_mat" }, | |||
{ "proxy", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_PROXY }, | |||
0, 0, VE, "quant_mat" }, | |||
{ "lt", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_LT }, | |||
0, 0, VE, "quant_mat" }, | |||
{ "standard", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_STANDARD }, | |||
0, 0, VE, "quant_mat" }, | |||
{ "hq", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_HQ }, | |||
0, 0, VE, "quant_mat" }, | |||
{ "default", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_DEFAULT }, | |||
0, 0, VE, "quant_mat" }, | |||
{ NULL } | |||
}; | |||
@@ -416,7 +416,6 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, | |||
int err = 0; | |||
if (dst != src) { | |||
dst->sub_id = src->sub_id; | |||
dst->time_base = src->time_base; | |||
dst->width = src->width; | |||
dst->height = src->height; | |||
@@ -25,15 +25,17 @@ | |||
*/ | |||
#include "avcodec.h" | |||
#include "bytestream.h" | |||
typedef struct QpegContext{ | |||
AVCodecContext *avctx; | |||
AVFrame pic, ref; | |||
uint32_t pal[256]; | |||
GetByteContext buffer; | |||
} QpegContext; | |||
static int qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | |||
int stride, int width, int height) | |||
static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst, | |||
int stride, int width, int height) | |||
{ | |||
int i; | |||
int code; | |||
@@ -46,31 +48,26 @@ static int qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | |||
height--; | |||
dst = dst + height * stride; | |||
while((size > 0) && (rows_to_go > 0)) { | |||
code = *src++; | |||
size--; | |||
while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (rows_to_go > 0)) { | |||
code = bytestream2_get_byte(&qctx->buffer); | |||
run = copy = 0; | |||
if(code == 0xFC) /* end-of-picture code */ | |||
break; | |||
if(code >= 0xF8) { /* very long run */ | |||
c0 = *src++; | |||
c1 = *src++; | |||
size -= 2; | |||
c0 = bytestream2_get_byte(&qctx->buffer); | |||
c1 = bytestream2_get_byte(&qctx->buffer); | |||
run = ((code & 0x7) << 16) + (c0 << 8) + c1 + 2; | |||
} else if (code >= 0xF0) { /* long run */ | |||
c0 = *src++; | |||
size--; | |||
c0 = bytestream2_get_byte(&qctx->buffer); | |||
run = ((code & 0xF) << 8) + c0 + 2; | |||
} else if (code >= 0xE0) { /* short run */ | |||
run = (code & 0x1F) + 2; | |||
} else if (code >= 0xC0) { /* very long copy */ | |||
c0 = *src++; | |||
c1 = *src++; | |||
size -= 2; | |||
c0 = bytestream2_get_byte(&qctx->buffer); | |||
c1 = bytestream2_get_byte(&qctx->buffer); | |||
copy = ((code & 0x3F) << 16) + (c0 << 8) + c1 + 1; | |||
} else if (code >= 0x80) { /* long copy */ | |||
c0 = *src++; | |||
size--; | |||
c0 = bytestream2_get_byte(&qctx->buffer); | |||
copy = ((code & 0x7F) << 8) + c0 + 1; | |||
} else { /* short copy */ | |||
copy = code + 1; | |||
@@ -80,8 +77,7 @@ static int qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | |||
if(run) { | |||
int p; | |||
p = *src++; | |||
size--; | |||
p = bytestream2_get_byte(&qctx->buffer); | |||
for(i = 0; i < run; i++) { | |||
dst[filled++] = p; | |||
if (filled >= width) { | |||
@@ -93,11 +89,8 @@ static int qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | |||
} | |||
} | |||
} else { | |||
size -= copy; | |||
if (size<0) | |||
return AVERROR_INVALIDDATA; | |||
for(i = 0; i < copy; i++) { | |||
dst[filled++] = *src++; | |||
dst[filled++] = bytestream2_get_byte(&qctx->buffer); | |||
if (filled >= width) { | |||
filled = 0; | |||
dst -= stride; | |||
@@ -108,7 +101,6 @@ static int qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | |||
} | |||
} | |||
} | |||
return 0; | |||
} | |||
static const int qpeg_table_h[16] = | |||
@@ -117,9 +109,10 @@ static const int qpeg_table_w[16] = | |||
{ 0x00, 0x20, 0x18, 0x08, 0x18, 0x10, 0x20, 0x10, 0x08, 0x10, 0x20, 0x20, 0x08, 0x10, 0x18, 0x04}; | |||
/* Decodes delta frames */ | |||
static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
int stride, int width, int height, | |||
int delta, const uint8_t *ctable, uint8_t *refdata) | |||
static void qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, | |||
int stride, int width, int height, | |||
int delta, const uint8_t *ctable, | |||
uint8_t *refdata) | |||
{ | |||
int i, j; | |||
int code; | |||
@@ -137,13 +130,12 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
height--; | |||
dst = dst + height * stride; | |||
while((size > 0) && (height >= 0)) { | |||
code = *src++; | |||
size--; | |||
while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (height >= 0)) { | |||
code = bytestream2_get_byte(&qctx->buffer); | |||
if(delta) { | |||
/* motion compensation */ | |||
while(size > 0 && (code & 0xF0) == 0xF0) { | |||
while(bytestream2_get_bytes_left(&qctx->buffer) > 0 && (code & 0xF0) == 0xF0) { | |||
if(delta == 1) { | |||
int me_idx; | |||
int me_w, me_h, me_x, me_y; | |||
@@ -156,8 +148,7 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
me_h = qpeg_table_h[me_idx]; | |||
/* extract motion vector */ | |||
corr = *src++; | |||
size--; | |||
corr = bytestream2_get_byte(&qctx->buffer); | |||
val = corr >> 4; | |||
if(val > 7) | |||
@@ -184,8 +175,7 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
} | |||
} | |||
} | |||
code = *src++; | |||
size--; | |||
code = bytestream2_get_byte(&qctx->buffer); | |||
} | |||
} | |||
@@ -195,8 +185,7 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
int p; | |||
code &= 0x1F; | |||
p = *src++; | |||
size--; | |||
p = bytestream2_get_byte(&qctx->buffer); | |||
for(i = 0; i <= code; i++) { | |||
dst[filled++] = p; | |||
if(filled >= width) { | |||
@@ -210,11 +199,11 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
} else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */ | |||
code &= 0x1F; | |||
if(code + 1 > size) | |||
if(code + 1 > bytestream2_get_bytes_left(&qctx->buffer)) | |||
break; | |||
for(i = 0; i <= code; i++) { | |||
dst[filled++] = *src++; | |||
dst[filled++] = bytestream2_get_byte(&qctx->buffer); | |||
if(filled >= width) { | |||
filled = 0; | |||
dst -= stride; | |||
@@ -223,18 +212,17 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
break; | |||
} | |||
} | |||
size -= code + 1; | |||
} else if(code >= 0x80) { /* skip code: 0x80..0xBF */ | |||
int skip; | |||
code &= 0x3F; | |||
/* codes 0x80 and 0x81 are actually escape codes, | |||
skip value minus constant is in the next byte */ | |||
if(!code) { | |||
skip = (*src++) + 64; size--; | |||
} else if(code == 1) { | |||
skip = (*src++) + 320; size--; | |||
} else | |||
if(!code) | |||
skip = bytestream2_get_byte(&qctx->buffer) + 64; | |||
else if(code == 1) | |||
skip = bytestream2_get_byte(&qctx->buffer) + 320; | |||
else | |||
skip = code; | |||
filled += skip; | |||
while( filled >= width) { | |||
@@ -246,8 +234,9 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size, | |||
} | |||
} else { | |||
/* zero code treated as one-pixel skip */ | |||
if(code) | |||
if(code) { | |||
dst[filled++] = ctable[code & 0x7F]; | |||
} | |||
else | |||
filled++; | |||
if(filled >= width) { | |||
@@ -263,8 +252,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
void *data, int *data_size, | |||
AVPacket *avpkt) | |||
{ | |||
const uint8_t *buf = avpkt->data; | |||
int buf_size = avpkt->size; | |||
uint8_t ctable[128]; | |||
QpegContext * const a = avctx->priv_data; | |||
AVFrame * p = &a->pic; | |||
AVFrame * ref= &a->ref; | |||
@@ -272,6 +260,13 @@ static int decode_frame(AVCodecContext *avctx, | |||
int delta, ret = 0; | |||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||
if (avpkt->size < 0x86) { | |||
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); | |||
return AVERROR_INVALIDDATA; | |||
} | |||
bytestream2_init(&a->buffer, avpkt->data, avpkt->size); | |||
if(ref->data[0]) | |||
avctx->release_buffer(avctx, ref); | |||
FFSWAP(AVFrame, *ref, *p); | |||
@@ -282,16 +277,17 @@ static int decode_frame(AVCodecContext *avctx, | |||
return -1; | |||
} | |||
outdata = a->pic.data[0]; | |||
if(buf[0x85] == 0x10) { | |||
ret = qpeg_decode_intra(buf+0x86, outdata, buf_size - 0x86, a->pic.linesize[0], avctx->width, avctx->height); | |||
bytestream2_skip(&a->buffer, 4); | |||
bytestream2_get_buffer(&a->buffer, ctable, 128); | |||
bytestream2_skip(&a->buffer, 1); | |||
delta = bytestream2_get_byte(&a->buffer); | |||
if(delta == 0x10) { | |||
qpeg_decode_intra(a, outdata, a->pic.linesize[0], avctx->width, avctx->height); | |||
} else { | |||
delta = buf[0x85]; | |||
qpeg_decode_inter(buf+0x86, outdata, buf_size - 0x86, a->pic.linesize[0], avctx->width, avctx->height, delta, buf + 4, a->ref.data[0]); | |||
qpeg_decode_inter(a, outdata, a->pic.linesize[0], avctx->width, avctx->height, delta, ctable, a->ref.data[0]); | |||
} | |||
if (ret<0) | |||
return ret; | |||
/* make the palette available on the way out */ | |||
if (pal) { | |||
a->pic.palette_has_changed = 1; | |||
@@ -302,7 +298,7 @@ static int decode_frame(AVCodecContext *avctx, | |||
*data_size = sizeof(AVFrame); | |||
*(AVFrame*)data = a->pic; | |||
return buf_size; | |||
return avpkt->size; | |||
} | |||
static av_cold int decode_init(AVCodecContext *avctx){ | |||
@@ -36,6 +36,7 @@ typedef struct { | |||
AVCodecContext *avctx; | |||
AVFrame frame; | |||
LPCContext lpc_ctx; | |||
int last_frame; | |||
unsigned int old_energy; ///< previous frame energy | |||
@@ -53,6 +53,7 @@ static av_cold int ra144_encode_init(AVCodecContext * avctx) | |||
return -1; | |||
} | |||
avctx->frame_size = NBLOCKS * BLOCKSIZE; | |||
avctx->delay = avctx->frame_size; | |||
avctx->bit_rate = 8000; | |||
ractx = avctx->priv_data; | |||
ractx->lpc_coef[0] = ractx->lpc_tables[0]; | |||
@@ -433,7 +434,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, | |||
{ | |||
static const uint8_t sizes[LPC_ORDER] = {64, 32, 32, 16, 16, 8, 8, 8, 8, 4}; | |||
static const uint8_t bit_sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2}; | |||
RA144Context *ractx; | |||
RA144Context *ractx = avctx->priv_data; | |||
PutBitContext pb; | |||
int32_t lpc_data[NBLOCKS * BLOCKSIZE]; | |||
int32_t lpc_coefs[LPC_ORDER][MAX_LPC_ORDER]; | |||
@@ -445,11 +446,13 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, | |||
int energy = 0; | |||
int i, idx; | |||
if (ractx->last_frame) | |||
return 0; | |||
if (buf_size < FRAMESIZE) { | |||
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); | |||
return 0; | |||
} | |||
ractx = avctx->priv_data; | |||
/** | |||
* Since the LPC coefficients are calculated on a frame centered over the | |||
@@ -462,11 +465,15 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, | |||
lpc_data[i] = ractx->curr_block[BLOCKSIZE + BLOCKSIZE / 2 + i]; | |||
energy += (lpc_data[i] * lpc_data[i]) >> 4; | |||
} | |||
for (i = 2 * BLOCKSIZE + BLOCKSIZE / 2; i < NBLOCKS * BLOCKSIZE; i++) { | |||
lpc_data[i] = *((int16_t *)data + i - 2 * BLOCKSIZE - BLOCKSIZE / 2) >> | |||
2; | |||
energy += (lpc_data[i] * lpc_data[i]) >> 4; | |||
if (data) { | |||
int j; | |||
for (j = 0; j < avctx->frame_size && i < NBLOCKS * BLOCKSIZE; i++, j++) { | |||
lpc_data[i] = samples[j] >> 2; | |||
energy += (lpc_data[i] * lpc_data[i]) >> 4; | |||
} | |||
} | |||
if (i < NBLOCKS * BLOCKSIZE) | |||
memset(&lpc_data[i], 0, (NBLOCKS * BLOCKSIZE - i) * sizeof(*lpc_data)); | |||
energy = ff_energy_tab[quantize(ff_t_sqrt(energy >> 5) >> 10, ff_energy_tab, | |||
32)]; | |||
@@ -515,8 +522,17 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame, | |||
ractx->old_energy = energy; | |||
ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; | |||
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); | |||
for (i = 0; i < NBLOCKS * BLOCKSIZE; i++) | |||
ractx->curr_block[i] = samples[i] >> 2; | |||
/* copy input samples to current block for processing in next call */ | |||
i = 0; | |||
if (data) { | |||
for (; i < avctx->frame_size; i++) | |||
ractx->curr_block[i] = samples[i] >> 2; | |||
} else | |||
ractx->last_frame = 1; | |||
memset(&ractx->curr_block[i], 0, | |||
(NBLOCKS * BLOCKSIZE - i) * sizeof(*ractx->curr_block)); | |||
return FRAMESIZE; | |||
} | |||
@@ -529,6 +545,7 @@ AVCodec ff_ra_144_encoder = { | |||
.init = ra144_encode_init, | |||
.encode = ra144_encode_frame, | |||
.close = ra144_encode_close, | |||
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME, | |||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, | |||
AV_SAMPLE_FMT_NONE }, | |||
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"), | |||
@@ -40,6 +40,11 @@ | |||
#define DC_VLC_BITS 14 //FIXME find a better solution | |||
typedef struct RVDecContext { | |||
MpegEncContext m; | |||
int sub_id; | |||
} RVDecContext; | |||
static const uint16_t rv_lum_code[256] = | |||
{ | |||
0x3e7f, 0x0f00, 0x0f01, 0x0f02, 0x0f03, 0x0f04, 0x0f05, 0x0f06, | |||
@@ -293,8 +298,9 @@ static int rv10_decode_picture_header(MpegEncContext *s) | |||
return mb_count; | |||
} | |||
static int rv20_decode_picture_header(MpegEncContext *s) | |||
static int rv20_decode_picture_header(RVDecContext *rv) | |||
{ | |||
MpegEncContext *s = &rv->m; | |||
int seq, mb_pos, i; | |||
int rpr_bits; | |||
@@ -342,10 +348,10 @@ static int rv20_decode_picture_header(MpegEncContext *s) | |||
return -1; | |||
} | |||
if(RV_GET_MINOR_VER(s->avctx->sub_id) >= 2) | |||
if(RV_GET_MINOR_VER(rv->sub_id) >= 2) | |||
s->loop_filter = get_bits1(&s->gb); | |||
if(RV_GET_MINOR_VER(s->avctx->sub_id) <= 1) | |||
if(RV_GET_MINOR_VER(rv->sub_id) <= 1) | |||
seq = get_bits(&s->gb, 8) << 7; | |||
else | |||
seq = get_bits(&s->gb, 13) << 2; | |||
@@ -410,7 +416,7 @@ static int rv20_decode_picture_header(MpegEncContext *s) | |||
av_log(s->avctx, AV_LOG_DEBUG, "\n");*/ | |||
s->no_rounding= get_bits1(&s->gb); | |||
if(RV_GET_MINOR_VER(s->avctx->sub_id) <= 1 && s->pict_type == AV_PICTURE_TYPE_B) | |||
if(RV_GET_MINOR_VER(rv->sub_id) <= 1 && s->pict_type == AV_PICTURE_TYPE_B) | |||
skip_bits(&s->gb, 5); // binary decoder reads 3+2 bits here but they don't seem to be used | |||
s->f_code = 1; | |||
@@ -435,7 +441,8 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/ | |||
static av_cold int rv10_decode_init(AVCodecContext *avctx) | |||
{ | |||
MpegEncContext *s = avctx->priv_data; | |||
RVDecContext *rv = avctx->priv_data; | |||
MpegEncContext *s = &rv->m; | |||
static int done=0; | |||
int major_ver, minor_ver, micro_ver; | |||
@@ -454,11 +461,11 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) | |||
s->orig_height= s->height = avctx->coded_height; | |||
s->h263_long_vectors= ((uint8_t*)avctx->extradata)[3] & 1; | |||
avctx->sub_id= AV_RB32((uint8_t*)avctx->extradata + 4); | |||
rv->sub_id = AV_RB32((uint8_t*)avctx->extradata + 4); | |||
major_ver = RV_GET_MAJOR_VER(avctx->sub_id); | |||
minor_ver = RV_GET_MINOR_VER(avctx->sub_id); | |||
micro_ver = RV_GET_MICRO_VER(avctx->sub_id); | |||
major_ver = RV_GET_MAJOR_VER(rv->sub_id); | |||
minor_ver = RV_GET_MINOR_VER(rv->sub_id); | |||
micro_ver = RV_GET_MICRO_VER(rv->sub_id); | |||
s->low_delay = 1; | |||
switch (major_ver) { | |||
@@ -473,13 +480,13 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) | |||
} | |||
break; | |||
default: | |||
av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", avctx->sub_id); | |||
av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", rv->sub_id); | |||
av_log_missing_feature(avctx, "RV1/2 version", 1); | |||
return AVERROR_PATCHWELCOME; | |||
} | |||
if(avctx->debug & FF_DEBUG_PICT_INFO){ | |||
av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", avctx->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1); | |||
av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", rv->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1); | |||
} | |||
avctx->pix_fmt = PIX_FMT_YUV420P; | |||
@@ -514,7 +521,8 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx) | |||
static int rv10_decode_packet(AVCodecContext *avctx, | |||
const uint8_t *buf, int buf_size, int buf_size2) | |||
{ | |||
MpegEncContext *s = avctx->priv_data; | |||
RVDecContext *rv = avctx->priv_data; | |||
MpegEncContext *s = &rv->m; | |||
int mb_count, mb_pos, left, start_mb_x, active_bits_size; | |||
active_bits_size = buf_size * 8; | |||
@@ -522,7 +530,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, | |||
if(s->codec_id ==CODEC_ID_RV10) | |||
mb_count = rv10_decode_picture_header(s); | |||
else | |||
mb_count = rv20_decode_picture_header(s); | |||
mb_count = rv20_decode_picture_header(rv); | |||
if (mb_count < 0) { | |||
av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n"); | |||
return -1; | |||
@@ -733,7 +741,7 @@ AVCodec ff_rv10_decoder = { | |||
.name = "rv10", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = CODEC_ID_RV10, | |||
.priv_data_size = sizeof(MpegEncContext), | |||
.priv_data_size = sizeof(RVDecContext), | |||
.init = rv10_decode_init, | |||
.close = rv10_decode_end, | |||
.decode = rv10_decode_frame, | |||
@@ -747,7 +755,7 @@ AVCodec ff_rv20_decoder = { | |||
.name = "rv20", | |||
.type = AVMEDIA_TYPE_VIDEO, | |||
.id = CODEC_ID_RV20, | |||
.priv_data_size = sizeof(MpegEncContext), | |||
.priv_data_size = sizeof(RVDecContext), | |||
.init = rv10_decode_init, | |||
.close = rv10_decode_end, | |||
.decode = rv10_decode_frame, | |||
@@ -118,12 +118,12 @@ static void avcodec_init(void) | |||
ff_dsputil_static_init(); | |||
} | |||
static av_always_inline int codec_is_encoder(AVCodec *codec) | |||
int av_codec_is_encoder(AVCodec *codec) | |||
{ | |||
return codec && (codec->encode || codec->encode2); | |||
} | |||
static av_always_inline int codec_is_decoder(AVCodec *codec) | |||
int av_codec_is_decoder(AVCodec *codec) | |||
{ | |||
return codec && codec->decode; | |||
} | |||
@@ -798,7 +798,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD | |||
/* if the decoder init function was already called previously, | |||
free the already allocated subtitle_header before overwriting it */ | |||
if (codec_is_decoder(codec)) | |||
if (av_codec_is_decoder(codec)) | |||
av_freep(&avctx->subtitle_header); | |||
#define SANE_NB_CHANNELS 128U | |||
@@ -845,7 +845,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD | |||
ret = AVERROR(EINVAL); | |||
goto free_and_end; | |||
} | |||
if (codec_is_encoder(avctx->codec)) { | |||
if (av_codec_is_encoder(avctx->codec)) { | |||
int i; | |||
if (avctx->codec->sample_fmts) { | |||
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) | |||
@@ -914,7 +914,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD | |||
} | |||
} | |||
if (codec_is_decoder(avctx->codec) && !avctx->bit_rate) | |||
if (av_codec_is_decoder(avctx->codec) && !avctx->bit_rate) | |||
avctx->bit_rate = get_bit_rate(avctx); | |||
ret=0; | |||
@@ -1527,7 +1527,7 @@ av_cold int avcodec_close(AVCodecContext *avctx) | |||
av_opt_free(avctx->priv_data); | |||
av_opt_free(avctx); | |||
av_freep(&avctx->priv_data); | |||
if (codec_is_encoder(avctx->codec)) | |||
if (av_codec_is_encoder(avctx->codec)) | |||
av_freep(&avctx->extradata); | |||
avctx->codec = NULL; | |||
avctx->active_thread_type = 0; | |||
@@ -1556,7 +1556,7 @@ AVCodec *avcodec_find_encoder(enum CodecID id) | |||
p = first_avcodec; | |||
id= remap_deprecated_codec_id(id); | |||
while (p) { | |||
if (codec_is_encoder(p) && p->id == id) { | |||
if (av_codec_is_encoder(p) && p->id == id) { | |||
if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { | |||
experimental = p; | |||
} else | |||
@@ -1574,7 +1574,7 @@ AVCodec *avcodec_find_encoder_by_name(const char *name) | |||
return NULL; | |||
p = first_avcodec; | |||
while (p) { | |||
if (codec_is_encoder(p) && strcmp(name,p->name) == 0) | |||
if (av_codec_is_encoder(p) && strcmp(name,p->name) == 0) | |||
return p; | |||
p = p->next; | |||
} | |||
@@ -1587,7 +1587,7 @@ AVCodec *avcodec_find_decoder(enum CodecID id) | |||
p = first_avcodec; | |||
id= remap_deprecated_codec_id(id); | |||
while (p) { | |||
if (codec_is_decoder(p) && p->id == id) { | |||
if (av_codec_is_decoder(p) && p->id == id) { | |||
if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { | |||
experimental = p; | |||
} else | |||
@@ -1605,7 +1605,7 @@ AVCodec *avcodec_find_decoder_by_name(const char *name) | |||
return NULL; | |||
p = first_avcodec; | |||
while (p) { | |||
if (codec_is_decoder(p) && strcmp(name,p->name) == 0) | |||
if (av_codec_is_decoder(p) && strcmp(name,p->name) == 0) | |||
return p; | |||
p = p->next; | |||
} | |||
@@ -21,7 +21,7 @@ | |||
#define AVCODEC_VERSION_H | |||
#define LIBAVCODEC_VERSION_MAJOR 54 | |||
#define LIBAVCODEC_VERSION_MINOR 7 | |||
#define LIBAVCODEC_VERSION_MINOR 8 | |||
#define LIBAVCODEC_VERSION_MICRO 100 | |||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ | |||
@@ -69,5 +69,8 @@ | |||
#ifndef FF_API_INTER_THRESHOLD | |||
#define FF_API_INTER_THRESHOLD (LIBAVCODEC_VERSION_MAJOR < 55) | |||
#endif | |||
#ifndef FF_API_SUB_ID | |||
#define FF_API_SUB_ID (LIBAVCODEC_VERSION_MAJOR < 55) | |||
#endif | |||
#endif /* AVCODEC_VERSION_H */ |
@@ -137,13 +137,16 @@ typedef struct { | |||
#define RESIDUE_PART_SIZE 32 | |||
#define NUM_RESIDUE_PARTITIONS (RESIDUE_SIZE/RESIDUE_PART_SIZE) | |||
static inline void put_codeword(PutBitContext *pb, vorbis_enc_codebook *cb, | |||
int entry) | |||
static inline int put_codeword(PutBitContext *pb, vorbis_enc_codebook *cb, | |||
int entry) | |||
{ | |||
assert(entry >= 0); | |||
assert(entry < cb->nentries); | |||
assert(cb->lens[entry]); | |||
if (pb->size_in_bits - put_bits_count(pb) < cb->lens[entry]) | |||
return AVERROR(EINVAL); | |||
put_bits(pb, cb->lens[entry], cb->codewords[entry]); | |||
return 0; | |||
} | |||
static int cb_lookup_vals(int lookup, int dimentions, int entries) | |||
@@ -751,14 +754,16 @@ static int render_point(int x0, int y0, int x1, int y1, int x) | |||
return y0 + (x - x0) * (y1 - y0) / (x1 - x0); | |||
} | |||
static void floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc, | |||
PutBitContext *pb, uint16_t *posts, | |||
float *floor, int samples) | |||
static int floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc, | |||
PutBitContext *pb, uint16_t *posts, | |||
float *floor, int samples) | |||
{ | |||
int range = 255 / fc->multiplier + 1; | |||
int coded[MAX_FLOOR_VALUES]; // first 2 values are unused | |||
int i, counter; | |||
if (pb->size_in_bits - put_bits_count(pb) < 1 + 2 * ilog(range - 1)) | |||
return AVERROR(EINVAL); | |||
put_bits(pb, 1, 1); // non zero | |||
put_bits(pb, ilog(range - 1), posts[0]); | |||
put_bits(pb, ilog(range - 1), posts[1]); | |||
@@ -816,7 +821,8 @@ static void floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc, | |||
cval |= l << cshift; | |||
cshift += c->subclass; | |||
} | |||
put_codeword(pb, book, cval); | |||
if (put_codeword(pb, book, cval)) | |||
return AVERROR(EINVAL); | |||
} | |||
for (k = 0; k < c->dim; k++) { | |||
int book = c->books[cval & (csub-1)]; | |||
@@ -826,12 +832,15 @@ static void floor_encode(vorbis_enc_context *venc, vorbis_enc_floor *fc, | |||
continue; | |||
if (entry == -1) | |||
entry = 0; | |||
put_codeword(pb, &venc->codebooks[book], entry); | |||
if (put_codeword(pb, &venc->codebooks[book], entry)) | |||
return AVERROR(EINVAL); | |||
} | |||
} | |||
ff_vorbis_floor1_render_list(fc->list, fc->values, posts, coded, | |||
fc->multiplier, floor, samples); | |||
return 0; | |||
} | |||
static float *put_vector(vorbis_enc_codebook *book, PutBitContext *pb, | |||
@@ -852,13 +861,14 @@ static float *put_vector(vorbis_enc_codebook *book, PutBitContext *pb, | |||
distance = d; | |||
} | |||
} | |||
put_codeword(pb, book, entry); | |||
if (put_codeword(pb, book, entry)) | |||
return NULL; | |||
return &book->dimentions[entry * book->ndimentions]; | |||
} | |||
static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
PutBitContext *pb, float *coeffs, int samples, | |||
int real_ch) | |||
static int residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
PutBitContext *pb, float *coeffs, int samples, | |||
int real_ch) | |||
{ | |||
int pass, i, j, p, k; | |||
int psize = rc->partition_size; | |||
@@ -894,7 +904,8 @@ static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
entry *= rc->classifications; | |||
entry += classes[j][p + i]; | |||
} | |||
put_codeword(pb, book, entry); | |||
if (put_codeword(pb, book, entry)) | |||
return AVERROR(EINVAL); | |||
} | |||
for (i = 0; i < classwords && p < partitions; i++, p++) { | |||
for (j = 0; j < channels; j++) { | |||
@@ -909,8 +920,10 @@ static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
if (rc->type == 0) { | |||
for (k = 0; k < psize; k += book->ndimentions) { | |||
float *a = put_vector(book, pb, &buf[k]); | |||
int l; | |||
float *a = put_vector(book, pb, &buf[k]); | |||
if (!a) | |||
return AVERROR(EINVAL); | |||
for (l = 0; l < book->ndimentions; l++) | |||
buf[k + l] -= a[l]; | |||
} | |||
@@ -930,6 +943,8 @@ static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
} | |||
} | |||
pv = put_vector(book, pb, vec); | |||
if (!pv) | |||
return AVERROR(EINVAL); | |||
for (dim = book->ndimentions; dim--; ) { | |||
coeffs[a1 + b1] -= *pv++; | |||
if ((a1 += samples) == s) { | |||
@@ -943,6 +958,7 @@ static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, | |||
} | |||
} | |||
} | |||
return 0; | |||
} | |||
static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *audio, | |||
@@ -1016,6 +1032,11 @@ static int vorbis_encode_frame(AVCodecContext *avccontext, | |||
init_put_bits(&pb, packets, buf_size); | |||
if (pb.size_in_bits - put_bits_count(&pb) < 1 + ilog(venc->nmodes - 1)) { | |||
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
put_bits(&pb, 1, 0); // magic bit | |||
put_bits(&pb, ilog(venc->nmodes - 1), 0); // 0 bits, the mode | |||
@@ -1031,7 +1052,10 @@ static int vorbis_encode_frame(AVCodecContext *avccontext, | |||
vorbis_enc_floor *fc = &venc->floors[mapping->floor[mapping->mux[i]]]; | |||
uint16_t posts[MAX_FLOOR_VALUES]; | |||
floor_fit(venc, fc, &venc->coeffs[i * samples], posts, samples); | |||
floor_encode(venc, fc, &pb, posts, &venc->floor[i * samples], samples); | |||
if (floor_encode(venc, fc, &pb, posts, &venc->floor[i * samples], samples)) { | |||
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
} | |||
for (i = 0; i < venc->channels * samples; i++) | |||
@@ -1051,8 +1075,11 @@ static int vorbis_encode_frame(AVCodecContext *avccontext, | |||
} | |||
} | |||
residue_encode(venc, &venc->residues[mapping->residue[mapping->mux[0]]], | |||
&pb, venc->coeffs, samples, venc->channels); | |||
if (residue_encode(venc, &venc->residues[mapping->residue[mapping->mux[0]]], | |||
&pb, venc->coeffs, samples, venc->channels)) { | |||
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
avccontext->coded_frame->pts = venc->sample_count; | |||
venc->sample_count += avccontext->frame_size; | |||
@@ -39,6 +39,12 @@ static int encode_init(AVCodecContext * avctx){ | |||
return AVERROR(EINVAL); | |||
} | |||
if (avctx->sample_rate > 48000) { | |||
av_log(avctx, AV_LOG_ERROR, "sample rate is too high: %d > 48kHz", | |||
avctx->sample_rate); | |||
return AVERROR(EINVAL); | |||
} | |||
if(avctx->bit_rate < 24*1000) { | |||
av_log(avctx, AV_LOG_ERROR, "bitrate too low: got %i, need 24000 or higher\n", | |||
avctx->bit_rate); | |||
@@ -64,6 +70,8 @@ static int encode_init(AVCodecContext * avctx){ | |||
s->use_exp_vlc = flags2 & 0x0001; | |||
s->use_bit_reservoir = flags2 & 0x0002; | |||
s->use_variable_block_len = flags2 & 0x0004; | |||
if (avctx->channels == 2) | |||
s->ms_stereo = 1; | |||
ff_wma_init(avctx, flags2); | |||
@@ -71,8 +79,12 @@ static int encode_init(AVCodecContext * avctx){ | |||
for(i = 0; i < s->nb_block_sizes; i++) | |||
ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); | |||
avctx->block_align= | |||
s->block_align= avctx->bit_rate*(int64_t)s->frame_len / (avctx->sample_rate*8); | |||
s->block_align = avctx->bit_rate * (int64_t)s->frame_len / | |||
(avctx->sample_rate * 8); | |||
s->block_align = FFMIN(s->block_align, MAX_CODED_SUPERFRAME_SIZE); | |||
avctx->block_align = s->block_align; | |||
avctx->bit_rate = avctx->block_align * 8LL * avctx->sample_rate / | |||
s->frame_len; | |||
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d\n", s->block_align, avctx->bit_rate, s->frame_len, avctx->sample_rate); | |||
avctx->frame_size= s->frame_len; | |||
@@ -181,7 +193,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE], | |||
} | |||
if (s->nb_channels == 2) { | |||
put_bits(&s->pb, 1, s->ms_stereo= 1); | |||
put_bits(&s->pb, 1, !!s->ms_stereo); | |||
} | |||
for(ch = 0; ch < s->nb_channels; ch++) { | |||
@@ -355,6 +367,11 @@ static int encode_superframe(AVCodecContext *avctx, | |||
} | |||
} | |||
if (buf_size < 2 * MAX_CODED_SUPERFRAME_SIZE) { | |||
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
#if 1 | |||
total_gain= 128; | |||
for(i=64; i; i>>=1){ | |||
@@ -379,15 +396,17 @@ static int encode_superframe(AVCodecContext *avctx, | |||
} | |||
#endif | |||
encode_frame(s, s->coefs, buf, buf_size, total_gain); | |||
if ((i = encode_frame(s, s->coefs, buf, buf_size, total_gain)) >= 0) { | |||
av_log(avctx, AV_LOG_ERROR, "required frame size too large. please " | |||
"use a higher bit rate.\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
assert((put_bits_count(&s->pb) & 7) == 0); | |||
i= s->block_align - (put_bits_count(&s->pb)+7)/8; | |||
assert(i>=0); | |||
while(i--) | |||
while (i++) | |||
put_bits(&s->pb, 8, 'N'); | |||
flush_put_bits(&s->pb); | |||
return put_bits_ptr(&s->pb) - s->pb.buf; | |||
return s->block_align; | |||
} | |||
AVCodec ff_wmav1_encoder = { | |||
@@ -29,16 +29,16 @@ | |||
/* | |||
* MC functions | |||
*/ | |||
extern void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_epel4_h4_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_epel4_h6_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_epel4_v4_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_epel4_v6_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
@@ -80,7 +80,7 @@ extern void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_bilinear4_h_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride, | |||
@@ -93,7 +93,7 @@ extern void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride, | |||
extern void ff_put_vp8_bilinear4_v_mmx2 (uint8_t *dst, ptrdiff_t dststride, | |||
uint8_t *src, ptrdiff_t srcstride, | |||
int height, int mx, int my); | |||
extern void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride, | |||
@@ -139,27 +139,27 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \ | |||
} | |||
#if ARCH_X86_32 | |||
TAP_W8 (mmxext, epel, h4) | |||
TAP_W8 (mmxext, epel, h6) | |||
TAP_W16(mmxext, epel, h6) | |||
TAP_W8 (mmxext, epel, v4) | |||
TAP_W8 (mmxext, epel, v6) | |||
TAP_W16(mmxext, epel, v6) | |||
TAP_W8 (mmxext, bilinear, h) | |||
TAP_W16(mmxext, bilinear, h) | |||
TAP_W8 (mmxext, bilinear, v) | |||
TAP_W16(mmxext, bilinear, v) | |||
TAP_W8 (mmx2, epel, h4) | |||
TAP_W8 (mmx2, epel, h6) | |||
TAP_W16(mmx2, epel, h6) | |||
TAP_W8 (mmx2, epel, v4) | |||
TAP_W8 (mmx2, epel, v6) | |||
TAP_W16(mmx2, epel, v6) | |||
TAP_W8 (mmx2, bilinear, h) | |||
TAP_W16(mmx2, bilinear, h) | |||
TAP_W8 (mmx2, bilinear, v) | |||
TAP_W16(mmx2, bilinear, v) | |||
#endif | |||
TAP_W16(sse2, epel, h6) | |||
TAP_W16(sse2, epel, v6) | |||
TAP_W16(sse2, bilinear, h) | |||
TAP_W16(sse2, bilinear, v) | |||
TAP_W16(sse2, epel, h6) | |||
TAP_W16(sse2, epel, v6) | |||
TAP_W16(sse2, bilinear, h) | |||
TAP_W16(sse2, bilinear, v) | |||
TAP_W16(ssse3, epel, h6) | |||
TAP_W16(ssse3, epel, v6) | |||
TAP_W16(ssse3, bilinear, h) | |||
TAP_W16(ssse3, bilinear, v) | |||
TAP_W16(ssse3, epel, h6) | |||
TAP_W16(ssse3, epel, v6) | |||
TAP_W16(ssse3, bilinear, h) | |||
TAP_W16(ssse3, bilinear, v) | |||
#define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \ | |||
static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \ | |||
@@ -177,13 +177,13 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT | |||
#if ARCH_X86_32 | |||
#define HVTAPMMX(x, y) \ | |||
HVTAP(mmxext, 8, x, y, 4, 8) \ | |||
HVTAP(mmxext, 8, x, y, 8, 16) | |||
HVTAP(mmx2, 8, x, y, 4, 8) \ | |||
HVTAP(mmx2, 8, x, y, 8, 16) | |||
HVTAP(mmxext, 8, 6, 6, 16, 16) | |||
HVTAP(mmx2, 8, 6, 6, 16, 16) | |||
#else | |||
#define HVTAPMMX(x, y) \ | |||
HVTAP(mmxext, 8, x, y, 4, 8) | |||
HVTAP(mmx2, 8, x, y, 4, 8) | |||
#endif | |||
HVTAPMMX(4, 4) | |||
@@ -218,16 +218,16 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \ | |||
dst, dststride, tmp, SIZE, height, mx, my); \ | |||
} | |||
HVBILIN(mmxext, 8, 4, 8) | |||
HVBILIN(mmx2, 8, 4, 8) | |||
#if ARCH_X86_32 | |||
HVBILIN(mmxext, 8, 8, 16) | |||
HVBILIN(mmxext, 8, 16, 16) | |||
HVBILIN(mmx2, 8, 8, 16) | |||
HVBILIN(mmx2, 8, 16, 16) | |||
#endif | |||
HVBILIN(sse2, 8, 8, 16) | |||
HVBILIN(sse2, 8, 16, 16) | |||
HVBILIN(ssse3, 8, 4, 8) | |||
HVBILIN(ssse3, 8, 8, 16) | |||
HVBILIN(ssse3, 8, 16, 16) | |||
HVBILIN(sse2, 8, 8, 16) | |||
HVBILIN(sse2, 8, 16, 16) | |||
HVBILIN(ssse3, 8, 4, 8) | |||
HVBILIN(ssse3, 8, 8, 16) | |||
HVBILIN(ssse3, 8, 16, 16) | |||
extern void ff_vp8_idct_dc_add_mmx(uint8_t *dst, DCTELEM block[16], | |||
ptrdiff_t stride); | |||
@@ -283,7 +283,7 @@ extern void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \ | |||
int e, int i, int hvt); | |||
DECLARE_LOOP_FILTER(mmx) | |||
DECLARE_LOOP_FILTER(mmxext) | |||
DECLARE_LOOP_FILTER(mmx2) | |||
DECLARE_LOOP_FILTER(sse2) | |||
DECLARE_LOOP_FILTER(ssse3) | |||
DECLARE_LOOP_FILTER(sse4) | |||
@@ -351,26 +351,26 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) | |||
/* note that 4-tap width=16 functions are missing because w=16 | |||
* is only used for luma, and luma is always a copy or sixtap. */ | |||
if (mm_flags & AV_CPU_FLAG_MMX2) { | |||
VP8_MC_FUNC(2, 4, mmxext); | |||
VP8_BILINEAR_MC_FUNC(2, 4, mmxext); | |||
VP8_MC_FUNC(2, 4, mmx2); | |||
VP8_BILINEAR_MC_FUNC(2, 4, mmx2); | |||
#if ARCH_X86_32 | |||
VP8_LUMA_MC_FUNC(0, 16, mmxext); | |||
VP8_MC_FUNC(1, 8, mmxext); | |||
VP8_BILINEAR_MC_FUNC(0, 16, mmxext); | |||
VP8_BILINEAR_MC_FUNC(1, 8, mmxext); | |||
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; | |||
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; | |||
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; | |||
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; | |||
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; | |||
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; | |||
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; | |||
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; | |||
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; | |||
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; | |||
VP8_LUMA_MC_FUNC(0, 16, mmx2); | |||
VP8_MC_FUNC(1, 8, mmx2); | |||
VP8_BILINEAR_MC_FUNC(0, 16, mmx2); | |||
VP8_BILINEAR_MC_FUNC(1, 8, mmx2); | |||
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx2; | |||
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx2; | |||
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx2; | |||
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx2; | |||
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx2; | |||
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx2; | |||
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx2; | |||
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx2; | |||
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx2; | |||
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx2; | |||
#endif | |||
} | |||
@@ -251,6 +251,8 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9 | |||
%endrep | |||
%endif | |||
%xdefine %%stack_offset stack_offset | |||
%undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine | |||
%assign %%i 0 | |||
%rep %0 | |||
%xdefine %1q r %+ %%i %+ q | |||
@@ -262,7 +264,8 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9 | |||
%assign %%i %%i+1 | |||
%rotate 1 | |||
%endrep | |||
%assign n_arg_names %%i | |||
%xdefine stack_offset %%stack_offset | |||
%assign n_arg_names %0 | |||
%endmacro | |||
%if WIN64 ; Windows x64 ;================================================= | |||
@@ -34,7 +34,7 @@ static const char *sws_context_to_name(void *ptr) | |||
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM | |||
static const AVOption options[] = { | |||
{ "sws_flags", "scaler/cpu flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, { .dbl = DEFAULT }, 0, UINT_MAX, VE, "sws_flags" }, | |||
{ "sws_flags", "scaler flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, { .dbl = DEFAULT }, 0, UINT_MAX, VE, "sws_flags" }, | |||
{ "fast_bilinear", "fast bilinear", 0, AV_OPT_TYPE_CONST, { .dbl = SWS_FAST_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" }, | |||
{ "bilinear", "bilinear", 0, AV_OPT_TYPE_CONST, { .dbl = SWS_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" }, | |||
{ "bicubic", "bicubic", 0, AV_OPT_TYPE_CONST, { .dbl = SWS_BICUBIC }, INT_MIN, INT_MAX, VE, "sws_flags" }, | |||
@@ -131,8 +131,12 @@ cglobal yuv2planeX_%1, %3, 7, %2, filter, fltsize, src, dst, w, dither, offset | |||
; pixels per iteration. In order to not have to keep track of where | |||
; we are w.r.t. dithering, we unroll the mmx/8bit loop x2. | |||
%if %1 == 8 | |||
%rep 16/mmsize | |||
%endif ; %1 == 8 | |||
%assign %%repcnt 16/mmsize | |||
%else | |||
%assign %%repcnt 1 | |||
%endif | |||
%rep %%repcnt | |||
%if %1 == 8 | |||
%if ARCH_X86_32 | |||
@@ -146,7 +150,7 @@ cglobal yuv2planeX_%1, %3, 7, %2, filter, fltsize, src, dst, w, dither, offset | |||
mova m1, [yuv2yuvX_%1_start] | |||
mova m2, m1 | |||
%endif ; %1 == 8/9/10/16 | |||
movsx cntr_reg, r1m ; FIXME should be fltsizem, but the assembler does the wrong thing b/c of SUB above | |||
movsx cntr_reg, fltsizem | |||
.filterloop_ %+ %%i: | |||
; input pixels | |||
mov r6, [srcq+gprsize*cntr_reg-2*gprsize] | |||
@@ -226,10 +230,9 @@ cglobal yuv2planeX_%1, %3, 7, %2, filter, fltsize, src, dst, w, dither, offset | |||
add r5, mmsize/2 | |||
sub wd, mmsize/2 | |||
%if %1 == 8 | |||
%assign %%i %%i+2 | |||
%endrep | |||
%endif ; %1 == 8 | |||
jg .pixelloop | |||
%if %1 == 8 | |||
@@ -223,7 +223,7 @@ if [ -n "$do_xwd" ] ; then | |||
do_image_formats xwd | |||
fi | |||
if [ -n "$do_sun" ] ; then | |||
if [ -n "$do_sunrast" ] ; then | |||
do_image_formats sun | |||
fi | |||
@@ -1,4 +1,4 @@ | |||
26a7f6b0f0b7181df8df3fa589f6bf81 *./tests/data/acodec/wmav1.asf | |||
0260385b8a54df11ad349f9ba8240fd8 *./tests/data/acodec/wmav1.asf | |||
106004 ./tests/data/acodec/wmav1.asf | |||
stddev:12245.52 PSNR: 14.57 MAXDIFF:65521 bytes: 1064960/ 1058400 | |||
stddev: 2095.89 PSNR: 29.90 MAXDIFF:27658 bytes: 1056768/ 1058400 | |||
stddev:12241.90 PSNR: 14.57 MAXDIFF:65521 bytes: 1064960/ 1058400 | |||
stddev: 2074.79 PSNR: 29.99 MAXDIFF:27658 bytes: 1056768/ 1058400 |
@@ -1,4 +1,4 @@ | |||
7c6c0cb692af01b312ae345723674b5f *./tests/data/acodec/wmav2.asf | |||
bdb4c312fb109f990be83a70f8ec9bdc *./tests/data/acodec/wmav2.asf | |||
106044 ./tests/data/acodec/wmav2.asf | |||
stddev:12249.93 PSNR: 14.57 MAXDIFF:65521 bytes: 1064960/ 1058400 | |||
stddev: 2089.21 PSNR: 29.93 MAXDIFF:27650 bytes: 1056768/ 1058400 | |||
stddev:12246.35 PSNR: 14.57 MAXDIFF:65521 bytes: 1064960/ 1058400 | |||
stddev: 2068.08 PSNR: 30.02 MAXDIFF:27650 bytes: 1056768/ 1058400 |