Browse Source

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  8bps: cosmetics
  aasc: cosmetics, reformat
  ansi: remove an extra return
  asvdec: cosmetics, reformat
  aura: cosmetics, reformat

Conflicts:
	libavcodec/aasc.c
	libavcodec/asvdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
tags/n1.1
Michael Niedermayer 12 years ago
parent
commit
8e09e183fc
4 changed files with 144 additions and 144 deletions
  1. +0
    -25
      libavcodec/8bps.c
  2. +7
    -7
      libavcodec/aasc.c
  3. +127
    -101
      libavcodec/asvdec.c
  4. +10
    -11
      libavcodec/aura.c

+ 0
- 25
libavcodec/8bps.c View File

@@ -44,9 +44,6 @@
static const enum AVPixelFormat pixfmt_rgb24[] = { static const enum AVPixelFormat pixfmt_rgb24[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE }; AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };


/*
* Decoder context
*/
typedef struct EightBpsContext { typedef struct EightBpsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic; AVFrame pic;
@@ -57,12 +54,6 @@ typedef struct EightBpsContext {
uint32_t pal[256]; uint32_t pal[256];
} EightBpsContext; } EightBpsContext;



/*
*
* Decode a frame
*
*/
static int decode_frame(AVCodecContext *avctx, void *data, static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
@@ -151,12 +142,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size; return buf_size;
} }



/*
*
* Init 8BPS decoder
*
*/
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
@@ -202,14 +187,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0; return 0;
} }





/*
*
* Uninit 8BPS decoder
*
*/
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
@@ -220,8 +197,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0; return 0;
} }




AVCodec ff_eightbps_decoder = { AVCodec ff_eightbps_decoder = {
.name = "8bps", .name = "8bps",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,


+ 7
- 7
libavcodec/aasc.c View File

@@ -79,8 +79,8 @@ static int aasc_decode_frame(AVCodecContext *avctx,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride, psize; int compr, i, stride, psize;


if (buf_size < 4) { if (buf_size < 4) {
@@ -95,8 +95,8 @@ static int aasc_decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }


compr = AV_RL32(buf);
buf += 4;
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4; buf_size -= 4;
psize = avctx->bits_per_coded_sample / 8; psize = avctx->bits_per_coded_sample / 8;
switch (avctx->codec_tag) { switch (avctx->codec_tag) {
@@ -105,11 +105,11 @@ static int aasc_decode_frame(AVCodecContext *avctx,
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb); ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
break; break;
case MKTAG('A', 'A', 'S', 'C'): case MKTAG('A', 'A', 'S', 'C'):
switch(compr){
switch (compr) {
case 0: case 0:
stride = (avctx->width * psize + psize) & ~psize; stride = (avctx->width * psize + psize) & ~psize;
for(i = avctx->height - 1; i >= 0; i--){
if(avctx->width * psize > buf_size){
for (i = avctx->height - 1; i >= 0; i--) {
if (avctx->width * psize > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n"); av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
break; break;
} }


+ 127
- 101
libavcodec/asvdec.c View File

@@ -43,124 +43,148 @@ static VLC dc_ccp_vlc;
static VLC ac_ccp_vlc; static VLC ac_ccp_vlc;
static VLC asv2_level_vlc; static VLC asv2_level_vlc;


static av_cold void init_vlcs(ASV1Context *a){
static av_cold void init_vlcs(ASV1Context *a)
{
static int done = 0; static int done = 0;


if (!done) { if (!done) {
done = 1; done = 1;


INIT_VLC_STATIC(&ccp_vlc, VLC_BITS, 17, INIT_VLC_STATIC(&ccp_vlc, VLC_BITS, 17,
&ff_asv_ccp_tab[0][1], 2, 1,
&ff_asv_ccp_tab[0][0], 2, 1, 64);
&ff_asv_ccp_tab[0][1], 2, 1,
&ff_asv_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&dc_ccp_vlc, VLC_BITS, 8, INIT_VLC_STATIC(&dc_ccp_vlc, VLC_BITS, 8,
&ff_asv_dc_ccp_tab[0][1], 2, 1,
&ff_asv_dc_ccp_tab[0][0], 2, 1, 64);
&ff_asv_dc_ccp_tab[0][1], 2, 1,
&ff_asv_dc_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&ac_ccp_vlc, VLC_BITS, 16, INIT_VLC_STATIC(&ac_ccp_vlc, VLC_BITS, 16,
&ff_asv_ac_ccp_tab[0][1], 2, 1,
&ff_asv_ac_ccp_tab[0][0], 2, 1, 64);
&ff_asv_ac_ccp_tab[0][1], 2, 1,
&ff_asv_ac_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&level_vlc, VLC_BITS, 7, INIT_VLC_STATIC(&level_vlc, VLC_BITS, 7,
&ff_asv_level_tab[0][1], 2, 1,
&ff_asv_level_tab[0][0], 2, 1, 64);
&ff_asv_level_tab[0][1], 2, 1,
&ff_asv_level_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63, INIT_VLC_STATIC(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63,
&ff_asv2_level_tab[0][1], 2, 1,
&ff_asv2_level_tab[0][0], 2, 1, 1024);
&ff_asv2_level_tab[0][1], 2, 1,
&ff_asv2_level_tab[0][0], 2, 1, 1024);
} }
} }


//FIXME write a reversed bitstream reader to avoid the double reverse //FIXME write a reversed bitstream reader to avoid the double reverse
static inline int asv2_get_bits(GetBitContext *gb, int n){
return ff_reverse[ get_bits(gb, n) << (8-n) ];
static inline int asv2_get_bits(GetBitContext *gb, int n)
{
return ff_reverse[get_bits(gb, n) << (8-n)];
} }


static inline int asv1_get_level(GetBitContext *gb){
int code= get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
static inline int asv1_get_level(GetBitContext *gb)
{
int code = get_vlc2(gb, level_vlc.table, VLC_BITS, 1);


if(code==3) return get_sbits(gb, 8);
else return code - 3;
if (code == 3)
return get_sbits(gb, 8);
else
return code - 3;
} }


static inline int asv2_get_level(GetBitContext *gb){
int code= get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
static inline int asv2_get_level(GetBitContext *gb)
{
int code = get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);


if(code==31) return (int8_t)asv2_get_bits(gb, 8);
else return code - 31;
if (code == 31)
return (int8_t)asv2_get_bits(gb, 8);
else
return code - 31;
} }


static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){
static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64])
{
int i; int i;


block[0]= 8*get_bits(&a->gb, 8);
block[0] = 8 * get_bits(&a->gb, 8);


for(i=0; i<11; i++){
const int ccp= get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
for (i = 0; i < 11; i++) {
const int ccp = get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);


if(ccp){
if(ccp == 16) break;
if(ccp < 0 || i>=10){
if (ccp) {
if (ccp == 16)
break;
if (ccp < 0 || i >= 10) {
av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n"); av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
return -1; return -1;
} }


if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
if (ccp & 8)
block[a->scantable.permutated[4 * i + 0]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 0]) >> 4;
if (ccp & 4)
block[a->scantable.permutated[4 * i + 1]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 1]) >> 4;
if (ccp & 2)
block[a->scantable.permutated[4 * i + 2]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 2]) >> 4;
if (ccp & 1)
block[a->scantable.permutated[4 * i + 3]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 3]) >> 4;
} }
} }


return 0; return 0;
} }


static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64]){
static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64])
{
int i, count, ccp; int i, count, ccp;


count= asv2_get_bits(&a->gb, 4);
count = asv2_get_bits(&a->gb, 4);


block[0]= 8*asv2_get_bits(&a->gb, 8);
block[0] = 8 * asv2_get_bits(&a->gb, 8);


ccp= get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
if(ccp){
if(ccp&4) block[a->scantable.permutated[1]]= (asv2_get_level(&a->gb) * a->intra_matrix[1])>>4;
if(ccp&2) block[a->scantable.permutated[2]]= (asv2_get_level(&a->gb) * a->intra_matrix[2])>>4;
if(ccp&1) block[a->scantable.permutated[3]]= (asv2_get_level(&a->gb) * a->intra_matrix[3])>>4;
ccp = get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
if (ccp) {
if (ccp & 4)
block[a->scantable.permutated[1]] = (asv2_get_level(&a->gb) * a->intra_matrix[1]) >> 4;
if (ccp & 2)
block[a->scantable.permutated[2]] = (asv2_get_level(&a->gb) * a->intra_matrix[2]) >> 4;
if (ccp & 1)
block[a->scantable.permutated[3]] = (asv2_get_level(&a->gb) * a->intra_matrix[3]) >> 4;
} }


for(i=1; i<count+1; i++){
const int ccp= get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);

if(ccp){
if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
for (i = 1; i < count + 1; i++) {
const int ccp = get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);

if (ccp) {
if (ccp & 8)
block[a->scantable.permutated[4*i + 0]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 0]) >> 4;
if (ccp & 4)
block[a->scantable.permutated[4*i + 1]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 1]) >> 4;
if (ccp & 2)
block[a->scantable.permutated[4*i + 2]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 2]) >> 4;
if (ccp & 1)
block[a->scantable.permutated[4*i + 3]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 3]) >> 4;
} }
} }


return 0; return 0;
} }


static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64])
{
int i; int i;


a->dsp.clear_blocks(block[0]); a->dsp.clear_blocks(block[0]);


if(a->avctx->codec_id == AV_CODEC_ID_ASV1){
for(i=0; i<6; i++){
if( asv1_decode_block(a, block[i]) < 0)
if (a->avctx->codec_id == AV_CODEC_ID_ASV1) {
for (i = 0; i < 6; i++) {
if (asv1_decode_block(a, block[i]) < 0)
return -1; return -1;
} }
}else{
for(i=0; i<6; i++){
if( asv2_decode_block(a, block[i]) < 0)
} else {
for (i = 0; i < 6; i++) {
if (asv2_decode_block(a, block[i]) < 0)
return -1; return -1;
} }
} }
return 0; return 0;
} }


static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
DCTELEM (*block)[64]= a->block;
int linesize= a->picture.linesize[0];
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
{
DCTELEM (*block)[64] = a->block;
int linesize = a->picture.linesize[0];


uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8; uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
@@ -171,7 +195,7 @@ static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]); a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);


if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]); a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]); a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
} }
@@ -181,62 +205,62 @@ static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
AVFrame *picture = data;
AVFrame * const p= &a->picture;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AVFrame *picture = data;
AVFrame * const p = &a->picture;
int mb_x, mb_y; int mb_x, mb_y;


if(p->data[0])
if (p->data[0])
avctx->release_buffer(avctx, p); avctx->release_buffer(avctx, p);


p->reference= 0;
if(ff_get_buffer(avctx, p) < 0){
p->reference = 0;
if (ff_get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;


av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
buf_size); buf_size);
if (!a->bitstream_buffer) if (!a->bitstream_buffer)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);


if(avctx->codec_id == AV_CODEC_ID_ASV1)
if (avctx->codec_id == AV_CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4); a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
else{
else {
int i; int i;
for(i=0; i<buf_size; i++)
a->bitstream_buffer[i]= ff_reverse[ buf[i] ];
for (i = 0; i < buf_size; i++)
a->bitstream_buffer[i] = ff_reverse[buf[i]];
} }


init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8); init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);


for(mb_y=0; mb_y<a->mb_height2; mb_y++){
for(mb_x=0; mb_x<a->mb_width2; mb_x++){
if( decode_mb(a, a->block) <0)
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
if (decode_mb(a, a->block) < 0)
return -1; return -1;


idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
} }
} }


if(a->mb_width2 != a->mb_width){
mb_x= a->mb_width2;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
if( decode_mb(a, a->block) <0)
if (a->mb_width2 != a->mb_width) {
mb_x = a->mb_width2;
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
if (decode_mb(a, a->block) < 0)
return -1; return -1;


idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
} }
} }


if(a->mb_height2 != a->mb_height){
mb_y= a->mb_height2;
for(mb_x=0; mb_x<a->mb_width; mb_x++){
if( decode_mb(a, a->block) <0)
if (a->mb_height2 != a->mb_height) {
mb_y = a->mb_height2;
for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
if (decode_mb(a, a->block) < 0)
return -1; return -1;


idct_put(a, mb_x, mb_y); idct_put(a, mb_x, mb_y);
@@ -248,50 +272,52 @@ static int decode_frame(AVCodecContext *avctx,


emms_c(); emms_c();


return (get_bits_count(&a->gb)+31)/32*4;
return (get_bits_count(&a->gb) + 31) / 32 * 4;
} }


static av_cold int decode_init(AVCodecContext *avctx){
static av_cold int decode_init(AVCodecContext *avctx)
{
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
AVFrame *p= &a->picture;
AVFrame *p = &a->picture;
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i; int i;
const int scale= avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;


ff_asv_common_init(avctx); ff_asv_common_init(avctx);
init_vlcs(a); init_vlcs(a);
ff_init_scantable(a->dsp.idct_permutation, &a->scantable, ff_asv_scantab); ff_init_scantable(a->dsp.idct_permutation, &a->scantable, ff_asv_scantab);
avctx->pix_fmt= AV_PIX_FMT_YUV420P;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;


if(avctx->extradata_size < 1 || (a->inv_qscale= avctx->extradata[0]) == 0){
if (avctx->extradata_size < 1 || (a->inv_qscale = avctx->extradata[0]) == 0) {
av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n"); av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
if(avctx->codec_id == AV_CODEC_ID_ASV1)
a->inv_qscale= 6;
if (avctx->codec_id == AV_CODEC_ID_ASV1)
a->inv_qscale = 6;
else else
a->inv_qscale= 10;
a->inv_qscale = 10;
} }


for(i=0; i<64; i++){
for (i = 0; i < 64; i++) {
int index = ff_asv_scantab[i]; int index = ff_asv_scantab[i];


a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
} }


p->qstride= a->mb_width;
p->qscale_table= av_malloc( p->qstride * a->mb_height);
p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
p->qstride = a->mb_width;
p->qscale_table = av_malloc(p->qstride * a->mb_height);
p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
memset(p->qscale_table, p->quality, p->qstride * a->mb_height);


return 0; return 0;
} }


static av_cold int decode_end(AVCodecContext *avctx){
static av_cold int decode_end(AVCodecContext *avctx)
{
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;


av_freep(&a->bitstream_buffer); av_freep(&a->bitstream_buffer);
av_freep(&a->picture.qscale_table); av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size=0;
a->bitstream_buffer_size = 0;


if(a->picture.data[0])
if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture); avctx->release_buffer(avctx, &a->picture);


return 0; return 0;


+ 10
- 11
libavcodec/aura.c View File

@@ -50,8 +50,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *pkt) AVPacket *pkt)
{ {
AuraDecodeContext *s=avctx->priv_data;

AuraDecodeContext *s = avctx->priv_data;
uint8_t *Y, *U, *V; uint8_t *Y, *U, *V;
uint8_t val; uint8_t val;
int x, y; int x, y;
@@ -69,12 +68,12 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48; buf += 48;


if(s->frame.data[0])
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); avctx->release_buffer(avctx, &s->frame);


s->frame.buffer_hints = FF_BUFFER_HINTS_VALID; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0; s->frame.reference = 0;
if(ff_get_buffer(avctx, &s->frame) < 0) {
if (ff_get_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@@ -86,23 +85,23 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* iterate through each line in the height */ /* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
/* reset predictors */ /* reset predictors */
val = *buf++;
val = *buf++;
U[0] = val & 0xF0; U[0] = val & 0xF0;
Y[0] = val << 4; Y[0] = val << 4;
val = *buf++;
val = *buf++;
V[0] = val & 0xF0; V[0] = val & 0xF0;
Y[1] = Y[0] + delta_table[val & 0xF]; Y[1] = Y[0] + delta_table[val & 0xF];
Y += 2; U++; V++;
Y += 2; U++; V++;


/* iterate through the remaining pixel groups (4 pixels/group) */ /* iterate through the remaining pixel groups (4 pixels/group) */
for (x = 1; x < (avctx->width >> 1); x++) { for (x = 1; x < (avctx->width >> 1); x++) {
val = *buf++;
val = *buf++;
U[0] = U[-1] + delta_table[val >> 4]; U[0] = U[-1] + delta_table[val >> 4];
Y[0] = Y[-1] + delta_table[val & 0xF]; Y[0] = Y[-1] + delta_table[val & 0xF];
val = *buf++;
val = *buf++;
V[0] = V[-1] + delta_table[val >> 4]; V[0] = V[-1] + delta_table[val >> 4];
Y[1] = Y[ 0] + delta_table[val & 0xF]; Y[1] = Y[ 0] + delta_table[val & 0xF];
Y += 2; U++; V++;
Y += 2; U++; V++;
} }
Y += s->frame.linesize[0] - avctx->width; Y += s->frame.linesize[0] - avctx->width;
U += s->frame.linesize[1] - (avctx->width >> 1); U += s->frame.linesize[1] - (avctx->width >> 1);
@@ -110,7 +109,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
} }


*got_frame = 1; *got_frame = 1;
*(AVFrame*)data= s->frame;
*(AVFrame*)data = s->frame;


return pkt->size; return pkt->size;
} }


Loading…
Cancel
Save