You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

669 lines
21KB

  1. /*
  2. * ASUS V1/V2 codec
  3. * Copyright (c) 2003 Michael Niedermayer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * ASUS V1/V2 codec.
  24. */
  25. #include "avcodec.h"
  26. #include "libavutil/common.h"
  27. #include "put_bits.h"
  28. #include "dsputil.h"
  29. #include "mpeg12data.h"
  30. //#undef NDEBUG
  31. //#include <assert.h>
  32. #define VLC_BITS 6
  33. #define ASV2_LEVEL_VLC_BITS 10
  34. typedef struct ASV1Context{
  35. AVCodecContext *avctx;
  36. DSPContext dsp;
  37. AVFrame picture;
  38. PutBitContext pb;
  39. GetBitContext gb;
  40. ScanTable scantable;
  41. int inv_qscale;
  42. int mb_width;
  43. int mb_height;
  44. int mb_width2;
  45. int mb_height2;
  46. DECLARE_ALIGNED(16, DCTELEM, block)[6][64];
  47. uint16_t intra_matrix[64];
  48. int q_intra_matrix[64];
  49. uint8_t *bitstream_buffer;
  50. unsigned int bitstream_buffer_size;
  51. } ASV1Context;
  52. static const uint8_t scantab[64]={
  53. 0x00,0x08,0x01,0x09,0x10,0x18,0x11,0x19,
  54. 0x02,0x0A,0x03,0x0B,0x12,0x1A,0x13,0x1B,
  55. 0x04,0x0C,0x05,0x0D,0x20,0x28,0x21,0x29,
  56. 0x06,0x0E,0x07,0x0F,0x14,0x1C,0x15,0x1D,
  57. 0x22,0x2A,0x23,0x2B,0x30,0x38,0x31,0x39,
  58. 0x16,0x1E,0x17,0x1F,0x24,0x2C,0x25,0x2D,
  59. 0x32,0x3A,0x33,0x3B,0x26,0x2E,0x27,0x2F,
  60. 0x34,0x3C,0x35,0x3D,0x36,0x3E,0x37,0x3F,
  61. };
  62. static const uint8_t ccp_tab[17][2]={
  63. {0x2,2}, {0x7,5}, {0xB,5}, {0x3,5},
  64. {0xD,5}, {0x5,5}, {0x9,5}, {0x1,5},
  65. {0xE,5}, {0x6,5}, {0xA,5}, {0x2,5},
  66. {0xC,5}, {0x4,5}, {0x8,5}, {0x3,2},
  67. {0xF,5}, //EOB
  68. };
  69. static const uint8_t level_tab[7][2]={
  70. {3,4}, {3,3}, {3,2}, {0,3}, {2,2}, {2,3}, {2,4}
  71. };
  72. static const uint8_t dc_ccp_tab[8][2]={
  73. {0x1,2}, {0xD,4}, {0xF,4}, {0xC,4},
  74. {0x5,3}, {0xE,4}, {0x4,3}, {0x0,2},
  75. };
  76. static const uint8_t ac_ccp_tab[16][2]={
  77. {0x00,2}, {0x3B,6}, {0x0A,4}, {0x3A,6},
  78. {0x02,3}, {0x39,6}, {0x3C,6}, {0x38,6},
  79. {0x03,3}, {0x3D,6}, {0x08,4}, {0x1F,5},
  80. {0x09,4}, {0x0B,4}, {0x0D,4}, {0x0C,4},
  81. };
  82. static const uint8_t asv2_level_tab[63][2]={
  83. {0x3F,10},{0x2F,10},{0x37,10},{0x27,10},{0x3B,10},{0x2B,10},{0x33,10},{0x23,10},
  84. {0x3D,10},{0x2D,10},{0x35,10},{0x25,10},{0x39,10},{0x29,10},{0x31,10},{0x21,10},
  85. {0x1F, 8},{0x17, 8},{0x1B, 8},{0x13, 8},{0x1D, 8},{0x15, 8},{0x19, 8},{0x11, 8},
  86. {0x0F, 6},{0x0B, 6},{0x0D, 6},{0x09, 6},
  87. {0x07, 4},{0x05, 4},
  88. {0x03, 2},
  89. {0x00, 5},
  90. {0x02, 2},
  91. {0x04, 4},{0x06, 4},
  92. {0x08, 6},{0x0C, 6},{0x0A, 6},{0x0E, 6},
  93. {0x10, 8},{0x18, 8},{0x14, 8},{0x1C, 8},{0x12, 8},{0x1A, 8},{0x16, 8},{0x1E, 8},
  94. {0x20,10},{0x30,10},{0x28,10},{0x38,10},{0x24,10},{0x34,10},{0x2C,10},{0x3C,10},
  95. {0x22,10},{0x32,10},{0x2A,10},{0x3A,10},{0x26,10},{0x36,10},{0x2E,10},{0x3E,10},
  96. };
  97. static VLC ccp_vlc;
  98. static VLC level_vlc;
  99. static VLC dc_ccp_vlc;
  100. static VLC ac_ccp_vlc;
  101. static VLC asv2_level_vlc;
  102. static av_cold void init_vlcs(ASV1Context *a){
  103. static int done = 0;
  104. if (!done) {
  105. done = 1;
  106. INIT_VLC_STATIC(&ccp_vlc, VLC_BITS, 17,
  107. &ccp_tab[0][1], 2, 1,
  108. &ccp_tab[0][0], 2, 1, 64);
  109. INIT_VLC_STATIC(&dc_ccp_vlc, VLC_BITS, 8,
  110. &dc_ccp_tab[0][1], 2, 1,
  111. &dc_ccp_tab[0][0], 2, 1, 64);
  112. INIT_VLC_STATIC(&ac_ccp_vlc, VLC_BITS, 16,
  113. &ac_ccp_tab[0][1], 2, 1,
  114. &ac_ccp_tab[0][0], 2, 1, 64);
  115. INIT_VLC_STATIC(&level_vlc, VLC_BITS, 7,
  116. &level_tab[0][1], 2, 1,
  117. &level_tab[0][0], 2, 1, 64);
  118. INIT_VLC_STATIC(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63,
  119. &asv2_level_tab[0][1], 2, 1,
  120. &asv2_level_tab[0][0], 2, 1, 1024);
  121. }
  122. }
  123. //FIXME write a reversed bitstream reader to avoid the double reverse
  124. static inline int asv2_get_bits(GetBitContext *gb, int n){
  125. return av_reverse[ get_bits(gb, n) << (8-n) ];
  126. }
  127. static inline void asv2_put_bits(PutBitContext *pb, int n, int v){
  128. put_bits(pb, n, av_reverse[ v << (8-n) ]);
  129. }
  130. static inline int asv1_get_level(GetBitContext *gb){
  131. int code= get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
  132. if(code==3) return get_sbits(gb, 8);
  133. else return code - 3;
  134. }
  135. static inline int asv2_get_level(GetBitContext *gb){
  136. int code= get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
  137. if(code==31) return (int8_t)asv2_get_bits(gb, 8);
  138. else return code - 31;
  139. }
  140. static inline void asv1_put_level(PutBitContext *pb, int level){
  141. unsigned int index= level + 3;
  142. if(index <= 6) put_bits(pb, level_tab[index][1], level_tab[index][0]);
  143. else{
  144. put_bits(pb, level_tab[3][1], level_tab[3][0]);
  145. put_sbits(pb, 8, level);
  146. }
  147. }
  148. static inline void asv2_put_level(PutBitContext *pb, int level){
  149. unsigned int index= level + 31;
  150. if(index <= 62) put_bits(pb, asv2_level_tab[index][1], asv2_level_tab[index][0]);
  151. else{
  152. put_bits(pb, asv2_level_tab[31][1], asv2_level_tab[31][0]);
  153. asv2_put_bits(pb, 8, level&0xFF);
  154. }
  155. }
  156. static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){
  157. int i;
  158. block[0]= 8*get_bits(&a->gb, 8);
  159. for(i=0; i<11; i++){
  160. const int ccp= get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
  161. if(ccp){
  162. if(ccp == 16) break;
  163. if(ccp < 0 || i>=10){
  164. av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
  165. return -1;
  166. }
  167. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  168. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  169. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  170. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  171. }
  172. }
  173. return 0;
  174. }
  175. static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64]){
  176. int i, count, ccp;
  177. count= asv2_get_bits(&a->gb, 4);
  178. block[0]= 8*asv2_get_bits(&a->gb, 8);
  179. ccp= get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
  180. if(ccp){
  181. if(ccp&4) block[a->scantable.permutated[1]]= (asv2_get_level(&a->gb) * a->intra_matrix[1])>>4;
  182. if(ccp&2) block[a->scantable.permutated[2]]= (asv2_get_level(&a->gb) * a->intra_matrix[2])>>4;
  183. if(ccp&1) block[a->scantable.permutated[3]]= (asv2_get_level(&a->gb) * a->intra_matrix[3])>>4;
  184. }
  185. for(i=1; i<count+1; i++){
  186. const int ccp= get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);
  187. if(ccp){
  188. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  189. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  190. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  191. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  192. }
  193. }
  194. return 0;
  195. }
  196. static inline void asv1_encode_block(ASV1Context *a, DCTELEM block[64]){
  197. int i;
  198. int nc_count=0;
  199. put_bits(&a->pb, 8, (block[0] + 32)>>6);
  200. block[0]= 0;
  201. for(i=0; i<10; i++){
  202. const int index= scantab[4*i];
  203. int ccp=0;
  204. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  205. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  206. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  207. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  208. if(ccp){
  209. for(;nc_count; nc_count--)
  210. put_bits(&a->pb, ccp_tab[0][1], ccp_tab[0][0]);
  211. put_bits(&a->pb, ccp_tab[ccp][1], ccp_tab[ccp][0]);
  212. if(ccp&8) asv1_put_level(&a->pb, block[index + 0]);
  213. if(ccp&4) asv1_put_level(&a->pb, block[index + 8]);
  214. if(ccp&2) asv1_put_level(&a->pb, block[index + 1]);
  215. if(ccp&1) asv1_put_level(&a->pb, block[index + 9]);
  216. }else{
  217. nc_count++;
  218. }
  219. }
  220. put_bits(&a->pb, ccp_tab[16][1], ccp_tab[16][0]);
  221. }
  222. static inline void asv2_encode_block(ASV1Context *a, DCTELEM block[64]){
  223. int i;
  224. int count=0;
  225. for(count=63; count>3; count--){
  226. const int index= scantab[count];
  227. if( (block[index]*a->q_intra_matrix[index] + (1<<15))>>16 )
  228. break;
  229. }
  230. count >>= 2;
  231. asv2_put_bits(&a->pb, 4, count);
  232. asv2_put_bits(&a->pb, 8, (block[0] + 32)>>6);
  233. block[0]= 0;
  234. for(i=0; i<=count; i++){
  235. const int index= scantab[4*i];
  236. int ccp=0;
  237. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  238. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  239. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  240. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  241. assert(i || ccp<8);
  242. if(i) put_bits(&a->pb, ac_ccp_tab[ccp][1], ac_ccp_tab[ccp][0]);
  243. else put_bits(&a->pb, dc_ccp_tab[ccp][1], dc_ccp_tab[ccp][0]);
  244. if(ccp){
  245. if(ccp&8) asv2_put_level(&a->pb, block[index + 0]);
  246. if(ccp&4) asv2_put_level(&a->pb, block[index + 8]);
  247. if(ccp&2) asv2_put_level(&a->pb, block[index + 1]);
  248. if(ccp&1) asv2_put_level(&a->pb, block[index + 9]);
  249. }
  250. }
  251. }
  252. static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
  253. int i;
  254. a->dsp.clear_blocks(block[0]);
  255. if(a->avctx->codec_id == CODEC_ID_ASV1){
  256. for(i=0; i<6; i++){
  257. if( asv1_decode_block(a, block[i]) < 0)
  258. return -1;
  259. }
  260. }else{
  261. for(i=0; i<6; i++){
  262. if( asv2_decode_block(a, block[i]) < 0)
  263. return -1;
  264. }
  265. }
  266. return 0;
  267. }
  268. #define MAX_MB_SIZE (30*16*16*3/2/8)
  269. static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){
  270. int i;
  271. if (a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < MAX_MB_SIZE) {
  272. av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  273. return -1;
  274. }
  275. if(a->avctx->codec_id == CODEC_ID_ASV1){
  276. for(i=0; i<6; i++)
  277. asv1_encode_block(a, block[i]);
  278. }else{
  279. for(i=0; i<6; i++)
  280. asv2_encode_block(a, block[i]);
  281. }
  282. return 0;
  283. }
  284. static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
  285. DCTELEM (*block)[64]= a->block;
  286. int linesize= a->picture.linesize[0];
  287. uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  288. uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  289. uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  290. a->dsp.idct_put(dest_y , linesize, block[0]);
  291. a->dsp.idct_put(dest_y + 8, linesize, block[1]);
  292. a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
  293. a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
  294. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  295. a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
  296. a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
  297. }
  298. }
  299. static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
  300. DCTELEM (*block)[64]= a->block;
  301. int linesize= a->picture.linesize[0];
  302. int i;
  303. uint8_t *ptr_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  304. uint8_t *ptr_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  305. uint8_t *ptr_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  306. a->dsp.get_pixels(block[0], ptr_y , linesize);
  307. a->dsp.get_pixels(block[1], ptr_y + 8, linesize);
  308. a->dsp.get_pixels(block[2], ptr_y + 8*linesize , linesize);
  309. a->dsp.get_pixels(block[3], ptr_y + 8*linesize + 8, linesize);
  310. for(i=0; i<4; i++)
  311. a->dsp.fdct(block[i]);
  312. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  313. a->dsp.get_pixels(block[4], ptr_cb, a->picture.linesize[1]);
  314. a->dsp.get_pixels(block[5], ptr_cr, a->picture.linesize[2]);
  315. for(i=4; i<6; i++)
  316. a->dsp.fdct(block[i]);
  317. }
  318. }
  319. static int decode_frame(AVCodecContext *avctx,
  320. void *data, int *data_size,
  321. AVPacket *avpkt)
  322. {
  323. const uint8_t *buf = avpkt->data;
  324. int buf_size = avpkt->size;
  325. ASV1Context * const a = avctx->priv_data;
  326. AVFrame *picture = data;
  327. AVFrame * const p= &a->picture;
  328. int mb_x, mb_y;
  329. if(p->data[0])
  330. avctx->release_buffer(avctx, p);
  331. p->reference= 0;
  332. if(avctx->get_buffer(avctx, p) < 0){
  333. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  334. return -1;
  335. }
  336. p->pict_type= AV_PICTURE_TYPE_I;
  337. p->key_frame= 1;
  338. av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
  339. buf_size);
  340. if (!a->bitstream_buffer)
  341. return AVERROR(ENOMEM);
  342. if(avctx->codec_id == CODEC_ID_ASV1)
  343. a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
  344. else{
  345. int i;
  346. for(i=0; i<buf_size; i++)
  347. a->bitstream_buffer[i]= av_reverse[ buf[i] ];
  348. }
  349. init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
  350. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  351. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  352. if( decode_mb(a, a->block) <0)
  353. return -1;
  354. idct_put(a, mb_x, mb_y);
  355. }
  356. }
  357. if(a->mb_width2 != a->mb_width){
  358. mb_x= a->mb_width2;
  359. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  360. if( decode_mb(a, a->block) <0)
  361. return -1;
  362. idct_put(a, mb_x, mb_y);
  363. }
  364. }
  365. if(a->mb_height2 != a->mb_height){
  366. mb_y= a->mb_height2;
  367. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  368. if( decode_mb(a, a->block) <0)
  369. return -1;
  370. idct_put(a, mb_x, mb_y);
  371. }
  372. }
  373. *picture = a->picture;
  374. *data_size = sizeof(AVPicture);
  375. emms_c();
  376. return (get_bits_count(&a->gb)+31)/32*4;
  377. }
  378. #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER
  379. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  380. const AVFrame *pict, int *got_packet)
  381. {
  382. ASV1Context * const a = avctx->priv_data;
  383. AVFrame * const p= &a->picture;
  384. int size, ret;
  385. int mb_x, mb_y;
  386. if (!pkt->data &&
  387. (ret = av_new_packet(pkt, a->mb_height*a->mb_width*MAX_MB_SIZE +
  388. FF_MIN_BUFFER_SIZE)) < 0) {
  389. av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
  390. return ret;
  391. }
  392. init_put_bits(&a->pb, pkt->data, pkt->size);
  393. *p = *pict;
  394. p->pict_type= AV_PICTURE_TYPE_I;
  395. p->key_frame= 1;
  396. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  397. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  398. dct_get(a, mb_x, mb_y);
  399. encode_mb(a, a->block);
  400. }
  401. }
  402. if(a->mb_width2 != a->mb_width){
  403. mb_x= a->mb_width2;
  404. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  405. dct_get(a, mb_x, mb_y);
  406. encode_mb(a, a->block);
  407. }
  408. }
  409. if(a->mb_height2 != a->mb_height){
  410. mb_y= a->mb_height2;
  411. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  412. dct_get(a, mb_x, mb_y);
  413. encode_mb(a, a->block);
  414. }
  415. }
  416. emms_c();
  417. avpriv_align_put_bits(&a->pb);
  418. while(put_bits_count(&a->pb)&31)
  419. put_bits(&a->pb, 8, 0);
  420. size= put_bits_count(&a->pb)/32;
  421. if(avctx->codec_id == CODEC_ID_ASV1)
  422. a->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
  423. else{
  424. int i;
  425. for(i=0; i<4*size; i++)
  426. pkt->data[i] = av_reverse[pkt->data[i]];
  427. }
  428. pkt->size = size*4;
  429. pkt->flags |= AV_PKT_FLAG_KEY;
  430. *got_packet = 1;
  431. return 0;
  432. }
  433. #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */
  434. static av_cold void common_init(AVCodecContext *avctx){
  435. ASV1Context * const a = avctx->priv_data;
  436. ff_dsputil_init(&a->dsp, avctx);
  437. a->mb_width = (avctx->width + 15) / 16;
  438. a->mb_height = (avctx->height + 15) / 16;
  439. a->mb_width2 = (avctx->width + 0) / 16;
  440. a->mb_height2 = (avctx->height + 0) / 16;
  441. avctx->coded_frame= &a->picture;
  442. a->avctx= avctx;
  443. }
  444. static av_cold int decode_init(AVCodecContext *avctx){
  445. ASV1Context * const a = avctx->priv_data;
  446. AVFrame *p= &a->picture;
  447. int i;
  448. const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
  449. common_init(avctx);
  450. init_vlcs(a);
  451. ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab);
  452. avctx->pix_fmt= PIX_FMT_YUV420P;
  453. a->inv_qscale= avctx->extradata[0];
  454. if(a->inv_qscale == 0){
  455. av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
  456. if(avctx->codec_id == CODEC_ID_ASV1)
  457. a->inv_qscale= 6;
  458. else
  459. a->inv_qscale= 10;
  460. }
  461. for(i=0; i<64; i++){
  462. int index= scantab[i];
  463. a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
  464. }
  465. p->qstride= a->mb_width;
  466. p->qscale_table= av_malloc( p->qstride * a->mb_height);
  467. p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
  468. memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
  469. return 0;
  470. }
  471. #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER
  472. static av_cold int encode_init(AVCodecContext *avctx){
  473. ASV1Context * const a = avctx->priv_data;
  474. int i;
  475. const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
  476. common_init(avctx);
  477. if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE;
  478. a->inv_qscale= (32*scale*FF_QUALITY_SCALE + avctx->global_quality/2) / avctx->global_quality;
  479. avctx->extradata= av_mallocz(8);
  480. avctx->extradata_size=8;
  481. ((uint32_t*)avctx->extradata)[0]= av_le2ne32(a->inv_qscale);
  482. ((uint32_t*)avctx->extradata)[1]= av_le2ne32(AV_RL32("ASUS"));
  483. for(i=0; i<64; i++){
  484. int q= 32*scale*ff_mpeg1_default_intra_matrix[i];
  485. a->q_intra_matrix[i]= ((a->inv_qscale<<16) + q/2) / q;
  486. }
  487. return 0;
  488. }
  489. #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */
  490. static av_cold int decode_end(AVCodecContext *avctx){
  491. ASV1Context * const a = avctx->priv_data;
  492. av_freep(&a->bitstream_buffer);
  493. av_freep(&a->picture.qscale_table);
  494. a->bitstream_buffer_size=0;
  495. if(a->picture.data[0])
  496. avctx->release_buffer(avctx, &a->picture);
  497. return 0;
  498. }
  499. AVCodec ff_asv1_decoder = {
  500. .name = "asv1",
  501. .type = AVMEDIA_TYPE_VIDEO,
  502. .id = CODEC_ID_ASV1,
  503. .priv_data_size = sizeof(ASV1Context),
  504. .init = decode_init,
  505. .close = decode_end,
  506. .decode = decode_frame,
  507. .capabilities = CODEC_CAP_DR1,
  508. .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
  509. };
  510. AVCodec ff_asv2_decoder = {
  511. .name = "asv2",
  512. .type = AVMEDIA_TYPE_VIDEO,
  513. .id = CODEC_ID_ASV2,
  514. .priv_data_size = sizeof(ASV1Context),
  515. .init = decode_init,
  516. .close = decode_end,
  517. .decode = decode_frame,
  518. .capabilities = CODEC_CAP_DR1,
  519. .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
  520. };
  521. #if CONFIG_ASV1_ENCODER
  522. AVCodec ff_asv1_encoder = {
  523. .name = "asv1",
  524. .type = AVMEDIA_TYPE_VIDEO,
  525. .id = CODEC_ID_ASV1,
  526. .priv_data_size = sizeof(ASV1Context),
  527. .init = encode_init,
  528. .encode2 = encode_frame,
  529. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
  530. .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
  531. };
  532. #endif
  533. #if CONFIG_ASV2_ENCODER
  534. AVCodec ff_asv2_encoder = {
  535. .name = "asv2",
  536. .type = AVMEDIA_TYPE_VIDEO,
  537. .id = CODEC_ID_ASV2,
  538. .priv_data_size = sizeof(ASV1Context),
  539. .init = encode_init,
  540. .encode2 = encode_frame,
  541. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
  542. .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
  543. };
  544. #endif