You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

656 lines
19KB

  1. /*
  2. * ASUS V1/V2 codec
  3. * Copyright (c) 2003 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file asv1.c
  23. * ASUS V1/V2 codec.
  24. */
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "mpegvideo.h"
  28. //#undef NDEBUG
  29. //#include <assert.h>
  30. #define VLC_BITS 6
  31. #define ASV2_LEVEL_VLC_BITS 10
  32. typedef struct ASV1Context{
  33. AVCodecContext *avctx;
  34. DSPContext dsp;
  35. AVFrame picture;
  36. PutBitContext pb;
  37. GetBitContext gb;
  38. ScanTable scantable;
  39. int inv_qscale;
  40. int mb_width;
  41. int mb_height;
  42. int mb_width2;
  43. int mb_height2;
  44. DECLARE_ALIGNED_8(DCTELEM, block[6][64]);
  45. DECLARE_ALIGNED_8(uint16_t, intra_matrix[64]);
  46. DECLARE_ALIGNED_8(int, q_intra_matrix[64]);
  47. uint8_t *bitstream_buffer;
  48. unsigned int bitstream_buffer_size;
  49. } ASV1Context;
  50. static const uint8_t scantab[64]={
  51. 0x00,0x08,0x01,0x09,0x10,0x18,0x11,0x19,
  52. 0x02,0x0A,0x03,0x0B,0x12,0x1A,0x13,0x1B,
  53. 0x04,0x0C,0x05,0x0D,0x20,0x28,0x21,0x29,
  54. 0x06,0x0E,0x07,0x0F,0x14,0x1C,0x15,0x1D,
  55. 0x22,0x2A,0x23,0x2B,0x30,0x38,0x31,0x39,
  56. 0x16,0x1E,0x17,0x1F,0x24,0x2C,0x25,0x2D,
  57. 0x32,0x3A,0x33,0x3B,0x26,0x2E,0x27,0x2F,
  58. 0x34,0x3C,0x35,0x3D,0x36,0x3E,0x37,0x3F,
  59. };
  60. static const uint8_t ccp_tab[17][2]={
  61. {0x2,2}, {0x7,5}, {0xB,5}, {0x3,5},
  62. {0xD,5}, {0x5,5}, {0x9,5}, {0x1,5},
  63. {0xE,5}, {0x6,5}, {0xA,5}, {0x2,5},
  64. {0xC,5}, {0x4,5}, {0x8,5}, {0x3,2},
  65. {0xF,5}, //EOB
  66. };
  67. static const uint8_t level_tab[7][2]={
  68. {3,4}, {3,3}, {3,2}, {0,3}, {2,2}, {2,3}, {2,4}
  69. };
  70. static const uint8_t dc_ccp_tab[8][2]={
  71. {0x1,2}, {0xD,4}, {0xF,4}, {0xC,4},
  72. {0x5,3}, {0xE,4}, {0x4,3}, {0x0,2},
  73. };
  74. static const uint8_t ac_ccp_tab[16][2]={
  75. {0x00,2}, {0x3B,6}, {0x0A,4}, {0x3A,6},
  76. {0x02,3}, {0x39,6}, {0x3C,6}, {0x38,6},
  77. {0x03,3}, {0x3D,6}, {0x08,4}, {0x1F,5},
  78. {0x09,4}, {0x0B,4}, {0x0D,4}, {0x0C,4},
  79. };
  80. static const uint8_t asv2_level_tab[63][2]={
  81. {0x3F,10},{0x2F,10},{0x37,10},{0x27,10},{0x3B,10},{0x2B,10},{0x33,10},{0x23,10},
  82. {0x3D,10},{0x2D,10},{0x35,10},{0x25,10},{0x39,10},{0x29,10},{0x31,10},{0x21,10},
  83. {0x1F, 8},{0x17, 8},{0x1B, 8},{0x13, 8},{0x1D, 8},{0x15, 8},{0x19, 8},{0x11, 8},
  84. {0x0F, 6},{0x0B, 6},{0x0D, 6},{0x09, 6},
  85. {0x07, 4},{0x05, 4},
  86. {0x03, 2},
  87. {0x00, 5},
  88. {0x02, 2},
  89. {0x04, 4},{0x06, 4},
  90. {0x08, 6},{0x0C, 6},{0x0A, 6},{0x0E, 6},
  91. {0x10, 8},{0x18, 8},{0x14, 8},{0x1C, 8},{0x12, 8},{0x1A, 8},{0x16, 8},{0x1E, 8},
  92. {0x20,10},{0x30,10},{0x28,10},{0x38,10},{0x24,10},{0x34,10},{0x2C,10},{0x3C,10},
  93. {0x22,10},{0x32,10},{0x2A,10},{0x3A,10},{0x26,10},{0x36,10},{0x2E,10},{0x3E,10},
  94. };
  95. static VLC ccp_vlc;
  96. static VLC level_vlc;
  97. static VLC dc_ccp_vlc;
  98. static VLC ac_ccp_vlc;
  99. static VLC asv2_level_vlc;
  100. static void init_vlcs(ASV1Context *a){
  101. static int done = 0;
  102. if (!done) {
  103. done = 1;
  104. init_vlc(&ccp_vlc, VLC_BITS, 17,
  105. &ccp_tab[0][1], 2, 1,
  106. &ccp_tab[0][0], 2, 1, 1);
  107. init_vlc(&dc_ccp_vlc, VLC_BITS, 8,
  108. &dc_ccp_tab[0][1], 2, 1,
  109. &dc_ccp_tab[0][0], 2, 1, 1);
  110. init_vlc(&ac_ccp_vlc, VLC_BITS, 16,
  111. &ac_ccp_tab[0][1], 2, 1,
  112. &ac_ccp_tab[0][0], 2, 1, 1);
  113. init_vlc(&level_vlc, VLC_BITS, 7,
  114. &level_tab[0][1], 2, 1,
  115. &level_tab[0][0], 2, 1, 1);
  116. init_vlc(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63,
  117. &asv2_level_tab[0][1], 2, 1,
  118. &asv2_level_tab[0][0], 2, 1, 1);
  119. }
  120. }
  121. //FIXME write a reversed bitstream reader to avoid the double reverse
  122. static inline int asv2_get_bits(GetBitContext *gb, int n){
  123. return ff_reverse[ get_bits(gb, n) << (8-n) ];
  124. }
  125. static inline void asv2_put_bits(PutBitContext *pb, int n, int v){
  126. put_bits(pb, n, ff_reverse[ v << (8-n) ]);
  127. }
  128. static inline int asv1_get_level(GetBitContext *gb){
  129. int code= get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
  130. if(code==3) return get_sbits(gb, 8);
  131. else return code - 3;
  132. }
  133. static inline int asv2_get_level(GetBitContext *gb){
  134. int code= get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
  135. if(code==31) return (int8_t)asv2_get_bits(gb, 8);
  136. else return code - 31;
  137. }
  138. static inline void asv1_put_level(PutBitContext *pb, int level){
  139. unsigned int index= level + 3;
  140. if(index <= 6) put_bits(pb, level_tab[index][1], level_tab[index][0]);
  141. else{
  142. put_bits(pb, level_tab[3][1], level_tab[3][0]);
  143. put_bits(pb, 8, level&0xFF);
  144. }
  145. }
  146. static inline void asv2_put_level(PutBitContext *pb, int level){
  147. unsigned int index= level + 31;
  148. if(index <= 62) put_bits(pb, asv2_level_tab[index][1], asv2_level_tab[index][0]);
  149. else{
  150. put_bits(pb, asv2_level_tab[31][1], asv2_level_tab[31][0]);
  151. asv2_put_bits(pb, 8, level&0xFF);
  152. }
  153. }
  154. static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){
  155. int i;
  156. block[0]= 8*get_bits(&a->gb, 8);
  157. for(i=0; i<11; i++){
  158. const int ccp= get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
  159. if(ccp){
  160. if(ccp == 16) break;
  161. if(ccp < 0 || i>=10){
  162. av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
  163. return -1;
  164. }
  165. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  166. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  167. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  168. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  169. }
  170. }
  171. return 0;
  172. }
  173. static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64]){
  174. int i, count, ccp;
  175. count= asv2_get_bits(&a->gb, 4);
  176. block[0]= 8*asv2_get_bits(&a->gb, 8);
  177. ccp= get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
  178. if(ccp){
  179. if(ccp&4) block[a->scantable.permutated[1]]= (asv2_get_level(&a->gb) * a->intra_matrix[1])>>4;
  180. if(ccp&2) block[a->scantable.permutated[2]]= (asv2_get_level(&a->gb) * a->intra_matrix[2])>>4;
  181. if(ccp&1) block[a->scantable.permutated[3]]= (asv2_get_level(&a->gb) * a->intra_matrix[3])>>4;
  182. }
  183. for(i=1; i<count+1; i++){
  184. const int ccp= get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);
  185. if(ccp){
  186. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  187. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  188. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  189. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  190. }
  191. }
  192. return 0;
  193. }
  194. static inline void asv1_encode_block(ASV1Context *a, DCTELEM block[64]){
  195. int i;
  196. int nc_count=0;
  197. put_bits(&a->pb, 8, (block[0] + 32)>>6);
  198. block[0]= 0;
  199. for(i=0; i<10; i++){
  200. const int index= scantab[4*i];
  201. int ccp=0;
  202. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  203. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  204. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  205. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  206. if(ccp){
  207. for(;nc_count; nc_count--)
  208. put_bits(&a->pb, ccp_tab[0][1], ccp_tab[0][0]);
  209. put_bits(&a->pb, ccp_tab[ccp][1], ccp_tab[ccp][0]);
  210. if(ccp&8) asv1_put_level(&a->pb, block[index + 0]);
  211. if(ccp&4) asv1_put_level(&a->pb, block[index + 8]);
  212. if(ccp&2) asv1_put_level(&a->pb, block[index + 1]);
  213. if(ccp&1) asv1_put_level(&a->pb, block[index + 9]);
  214. }else{
  215. nc_count++;
  216. }
  217. }
  218. put_bits(&a->pb, ccp_tab[16][1], ccp_tab[16][0]);
  219. }
  220. static inline void asv2_encode_block(ASV1Context *a, DCTELEM block[64]){
  221. int i;
  222. int count=0;
  223. for(count=63; count>3; count--){
  224. const int index= scantab[count];
  225. if( (block[index]*a->q_intra_matrix[index] + (1<<15))>>16 )
  226. break;
  227. }
  228. count >>= 2;
  229. asv2_put_bits(&a->pb, 4, count);
  230. asv2_put_bits(&a->pb, 8, (block[0] + 32)>>6);
  231. block[0]= 0;
  232. for(i=0; i<=count; i++){
  233. const int index= scantab[4*i];
  234. int ccp=0;
  235. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  236. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  237. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  238. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  239. assert(i || ccp<8);
  240. if(i) put_bits(&a->pb, ac_ccp_tab[ccp][1], ac_ccp_tab[ccp][0]);
  241. else put_bits(&a->pb, dc_ccp_tab[ccp][1], dc_ccp_tab[ccp][0]);
  242. if(ccp){
  243. if(ccp&8) asv2_put_level(&a->pb, block[index + 0]);
  244. if(ccp&4) asv2_put_level(&a->pb, block[index + 8]);
  245. if(ccp&2) asv2_put_level(&a->pb, block[index + 1]);
  246. if(ccp&1) asv2_put_level(&a->pb, block[index + 9]);
  247. }
  248. }
  249. }
  250. static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
  251. int i;
  252. a->dsp.clear_blocks(block[0]);
  253. if(a->avctx->codec_id == CODEC_ID_ASV1){
  254. for(i=0; i<6; i++){
  255. if( asv1_decode_block(a, block[i]) < 0)
  256. return -1;
  257. }
  258. }else{
  259. for(i=0; i<6; i++){
  260. if( asv2_decode_block(a, block[i]) < 0)
  261. return -1;
  262. }
  263. }
  264. return 0;
  265. }
  266. static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){
  267. int i;
  268. if(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < 30*16*16*3/2/8){
  269. av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  270. return -1;
  271. }
  272. if(a->avctx->codec_id == CODEC_ID_ASV1){
  273. for(i=0; i<6; i++)
  274. asv1_encode_block(a, block[i]);
  275. }else{
  276. for(i=0; i<6; i++)
  277. asv2_encode_block(a, block[i]);
  278. }
  279. return 0;
  280. }
  281. static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
  282. DCTELEM (*block)[64]= a->block;
  283. int linesize= a->picture.linesize[0];
  284. uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  285. uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  286. uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  287. a->dsp.idct_put(dest_y , linesize, block[0]);
  288. a->dsp.idct_put(dest_y + 8, linesize, block[1]);
  289. a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
  290. a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
  291. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  292. a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
  293. a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
  294. }
  295. }
  296. static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
  297. DCTELEM (*block)[64]= a->block;
  298. int linesize= a->picture.linesize[0];
  299. int i;
  300. uint8_t *ptr_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  301. uint8_t *ptr_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  302. uint8_t *ptr_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  303. a->dsp.get_pixels(block[0], ptr_y , linesize);
  304. a->dsp.get_pixels(block[1], ptr_y + 8, linesize);
  305. a->dsp.get_pixels(block[2], ptr_y + 8*linesize , linesize);
  306. a->dsp.get_pixels(block[3], ptr_y + 8*linesize + 8, linesize);
  307. for(i=0; i<4; i++)
  308. a->dsp.fdct(block[i]);
  309. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  310. a->dsp.get_pixels(block[4], ptr_cb, a->picture.linesize[1]);
  311. a->dsp.get_pixels(block[5], ptr_cr, a->picture.linesize[2]);
  312. for(i=4; i<6; i++)
  313. a->dsp.fdct(block[i]);
  314. }
  315. }
  316. static int decode_frame(AVCodecContext *avctx,
  317. void *data, int *data_size,
  318. uint8_t *buf, int buf_size)
  319. {
  320. ASV1Context * const a = avctx->priv_data;
  321. AVFrame *picture = data;
  322. AVFrame * const p= (AVFrame*)&a->picture;
  323. int mb_x, mb_y;
  324. if(p->data[0])
  325. avctx->release_buffer(avctx, p);
  326. p->reference= 0;
  327. if(avctx->get_buffer(avctx, p) < 0){
  328. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  329. return -1;
  330. }
  331. p->pict_type= I_TYPE;
  332. p->key_frame= 1;
  333. a->bitstream_buffer= av_fast_realloc(a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
  334. if(avctx->codec_id == CODEC_ID_ASV1)
  335. a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (uint32_t*)buf, buf_size/4);
  336. else{
  337. int i;
  338. for(i=0; i<buf_size; i++)
  339. a->bitstream_buffer[i]= ff_reverse[ buf[i] ];
  340. }
  341. init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
  342. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  343. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  344. if( decode_mb(a, a->block) <0)
  345. return -1;
  346. idct_put(a, mb_x, mb_y);
  347. }
  348. }
  349. if(a->mb_width2 != a->mb_width){
  350. mb_x= a->mb_width2;
  351. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  352. if( decode_mb(a, a->block) <0)
  353. return -1;
  354. idct_put(a, mb_x, mb_y);
  355. }
  356. }
  357. if(a->mb_height2 != a->mb_height){
  358. mb_y= a->mb_height2;
  359. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  360. if( decode_mb(a, a->block) <0)
  361. return -1;
  362. idct_put(a, mb_x, mb_y);
  363. }
  364. }
  365. #if 0
  366. int i;
  367. printf("%d %d\n", 8*buf_size, get_bits_count(&a->gb));
  368. for(i=get_bits_count(&a->gb); i<8*buf_size; i++){
  369. printf("%d", get_bits1(&a->gb));
  370. }
  371. for(i=0; i<s->avctx->extradata_size; i++){
  372. printf("%c\n", ((uint8_t*)s->avctx->extradata)[i]);
  373. }
  374. #endif
  375. *picture= *(AVFrame*)&a->picture;
  376. *data_size = sizeof(AVPicture);
  377. emms_c();
  378. return (get_bits_count(&a->gb)+31)/32*4;
  379. }
  380. #ifdef CONFIG_ENCODERS
  381. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  382. ASV1Context * const a = avctx->priv_data;
  383. AVFrame *pict = data;
  384. AVFrame * const p= (AVFrame*)&a->picture;
  385. int size;
  386. int mb_x, mb_y;
  387. init_put_bits(&a->pb, buf, buf_size);
  388. *p = *pict;
  389. p->pict_type= I_TYPE;
  390. p->key_frame= 1;
  391. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  392. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  393. dct_get(a, mb_x, mb_y);
  394. encode_mb(a, a->block);
  395. }
  396. }
  397. if(a->mb_width2 != a->mb_width){
  398. mb_x= a->mb_width2;
  399. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  400. dct_get(a, mb_x, mb_y);
  401. encode_mb(a, a->block);
  402. }
  403. }
  404. if(a->mb_height2 != a->mb_height){
  405. mb_y= a->mb_height2;
  406. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  407. dct_get(a, mb_x, mb_y);
  408. encode_mb(a, a->block);
  409. }
  410. }
  411. emms_c();
  412. align_put_bits(&a->pb);
  413. while(put_bits_count(&a->pb)&31)
  414. put_bits(&a->pb, 8, 0);
  415. size= put_bits_count(&a->pb)/32;
  416. if(avctx->codec_id == CODEC_ID_ASV1)
  417. a->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
  418. else{
  419. int i;
  420. for(i=0; i<4*size; i++)
  421. buf[i]= ff_reverse[ buf[i] ];
  422. }
  423. return size*4;
  424. }
  425. #endif /* CONFIG_ENCODERS */
  426. static void common_init(AVCodecContext *avctx){
  427. ASV1Context * const a = avctx->priv_data;
  428. dsputil_init(&a->dsp, avctx);
  429. a->mb_width = (avctx->width + 15) / 16;
  430. a->mb_height = (avctx->height + 15) / 16;
  431. a->mb_width2 = (avctx->width + 0) / 16;
  432. a->mb_height2 = (avctx->height + 0) / 16;
  433. avctx->coded_frame= (AVFrame*)&a->picture;
  434. a->avctx= avctx;
  435. }
  436. static int decode_init(AVCodecContext *avctx){
  437. ASV1Context * const a = avctx->priv_data;
  438. AVFrame *p= (AVFrame*)&a->picture;
  439. int i;
  440. const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
  441. common_init(avctx);
  442. init_vlcs(a);
  443. ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab);
  444. avctx->pix_fmt= PIX_FMT_YUV420P;
  445. a->inv_qscale= ((uint8_t*)avctx->extradata)[0];
  446. if(a->inv_qscale == 0){
  447. av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
  448. if(avctx->codec_id == CODEC_ID_ASV1)
  449. a->inv_qscale= 6;
  450. else
  451. a->inv_qscale= 10;
  452. }
  453. for(i=0; i<64; i++){
  454. int index= scantab[i];
  455. a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
  456. }
  457. p->qstride= a->mb_width;
  458. p->qscale_table= av_malloc( p->qstride * a->mb_height);
  459. p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
  460. memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
  461. return 0;
  462. }
  463. #ifdef CONFIG_ENCODERS
  464. static int encode_init(AVCodecContext *avctx){
  465. ASV1Context * const a = avctx->priv_data;
  466. int i;
  467. const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
  468. common_init(avctx);
  469. if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE;
  470. a->inv_qscale= (32*scale*FF_QUALITY_SCALE + avctx->global_quality/2) / avctx->global_quality;
  471. avctx->extradata= av_mallocz(8);
  472. avctx->extradata_size=8;
  473. ((uint32_t*)avctx->extradata)[0]= le2me_32(a->inv_qscale);
  474. ((uint32_t*)avctx->extradata)[1]= le2me_32(ff_get_fourcc("ASUS"));
  475. for(i=0; i<64; i++){
  476. int q= 32*scale*ff_mpeg1_default_intra_matrix[i];
  477. a->q_intra_matrix[i]= ((a->inv_qscale<<16) + q/2) / q;
  478. }
  479. return 0;
  480. }
  481. #endif
  482. static int decode_end(AVCodecContext *avctx){
  483. ASV1Context * const a = avctx->priv_data;
  484. av_freep(&a->bitstream_buffer);
  485. av_freep(&a->picture.qscale_table);
  486. a->bitstream_buffer_size=0;
  487. return 0;
  488. }
  489. AVCodec asv1_decoder = {
  490. "asv1",
  491. CODEC_TYPE_VIDEO,
  492. CODEC_ID_ASV1,
  493. sizeof(ASV1Context),
  494. decode_init,
  495. NULL,
  496. decode_end,
  497. decode_frame,
  498. CODEC_CAP_DR1,
  499. };
  500. AVCodec asv2_decoder = {
  501. "asv2",
  502. CODEC_TYPE_VIDEO,
  503. CODEC_ID_ASV2,
  504. sizeof(ASV1Context),
  505. decode_init,
  506. NULL,
  507. decode_end,
  508. decode_frame,
  509. CODEC_CAP_DR1,
  510. };
  511. #ifdef CONFIG_ENCODERS
  512. AVCodec asv1_encoder = {
  513. "asv1",
  514. CODEC_TYPE_VIDEO,
  515. CODEC_ID_ASV1,
  516. sizeof(ASV1Context),
  517. encode_init,
  518. encode_frame,
  519. //encode_end,
  520. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  521. };
  522. AVCodec asv2_encoder = {
  523. "asv2",
  524. CODEC_TYPE_VIDEO,
  525. CODEC_ID_ASV2,
  526. sizeof(ASV1Context),
  527. encode_init,
  528. encode_frame,
  529. //encode_end,
  530. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  531. };
  532. #endif //CONFIG_ENCODERS