You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

667 lines
21KB

  1. /*
  2. * ASUS V1/V2 codec
  3. * Copyright (c) 2003 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * ASUS V1/V2 codec.
  24. */
  25. #include "avcodec.h"
  26. #include "internal.h"
  27. #include "libavutil/common.h"
  28. #include "put_bits.h"
  29. #include "dsputil.h"
  30. #include "mpeg12data.h"
  31. #define VLC_BITS 6
  32. #define ASV2_LEVEL_VLC_BITS 10
  33. typedef struct ASV1Context{
  34. AVCodecContext *avctx;
  35. DSPContext dsp;
  36. AVFrame picture;
  37. PutBitContext pb;
  38. GetBitContext gb;
  39. ScanTable scantable;
  40. int inv_qscale;
  41. int mb_width;
  42. int mb_height;
  43. int mb_width2;
  44. int mb_height2;
  45. DECLARE_ALIGNED(16, DCTELEM, block)[6][64];
  46. uint16_t intra_matrix[64];
  47. int q_intra_matrix[64];
  48. uint8_t *bitstream_buffer;
  49. unsigned int bitstream_buffer_size;
  50. } ASV1Context;
  51. static const uint8_t scantab[64]={
  52. 0x00,0x08,0x01,0x09,0x10,0x18,0x11,0x19,
  53. 0x02,0x0A,0x03,0x0B,0x12,0x1A,0x13,0x1B,
  54. 0x04,0x0C,0x05,0x0D,0x20,0x28,0x21,0x29,
  55. 0x06,0x0E,0x07,0x0F,0x14,0x1C,0x15,0x1D,
  56. 0x22,0x2A,0x23,0x2B,0x30,0x38,0x31,0x39,
  57. 0x16,0x1E,0x17,0x1F,0x24,0x2C,0x25,0x2D,
  58. 0x32,0x3A,0x33,0x3B,0x26,0x2E,0x27,0x2F,
  59. 0x34,0x3C,0x35,0x3D,0x36,0x3E,0x37,0x3F,
  60. };
  61. static const uint8_t ccp_tab[17][2]={
  62. {0x2,2}, {0x7,5}, {0xB,5}, {0x3,5},
  63. {0xD,5}, {0x5,5}, {0x9,5}, {0x1,5},
  64. {0xE,5}, {0x6,5}, {0xA,5}, {0x2,5},
  65. {0xC,5}, {0x4,5}, {0x8,5}, {0x3,2},
  66. {0xF,5}, //EOB
  67. };
  68. static const uint8_t level_tab[7][2]={
  69. {3,4}, {3,3}, {3,2}, {0,3}, {2,2}, {2,3}, {2,4}
  70. };
  71. static const uint8_t dc_ccp_tab[8][2]={
  72. {0x1,2}, {0xD,4}, {0xF,4}, {0xC,4},
  73. {0x5,3}, {0xE,4}, {0x4,3}, {0x0,2},
  74. };
  75. static const uint8_t ac_ccp_tab[16][2]={
  76. {0x00,2}, {0x3B,6}, {0x0A,4}, {0x3A,6},
  77. {0x02,3}, {0x39,6}, {0x3C,6}, {0x38,6},
  78. {0x03,3}, {0x3D,6}, {0x08,4}, {0x1F,5},
  79. {0x09,4}, {0x0B,4}, {0x0D,4}, {0x0C,4},
  80. };
  81. static const uint8_t asv2_level_tab[63][2]={
  82. {0x3F,10},{0x2F,10},{0x37,10},{0x27,10},{0x3B,10},{0x2B,10},{0x33,10},{0x23,10},
  83. {0x3D,10},{0x2D,10},{0x35,10},{0x25,10},{0x39,10},{0x29,10},{0x31,10},{0x21,10},
  84. {0x1F, 8},{0x17, 8},{0x1B, 8},{0x13, 8},{0x1D, 8},{0x15, 8},{0x19, 8},{0x11, 8},
  85. {0x0F, 6},{0x0B, 6},{0x0D, 6},{0x09, 6},
  86. {0x07, 4},{0x05, 4},
  87. {0x03, 2},
  88. {0x00, 5},
  89. {0x02, 2},
  90. {0x04, 4},{0x06, 4},
  91. {0x08, 6},{0x0C, 6},{0x0A, 6},{0x0E, 6},
  92. {0x10, 8},{0x18, 8},{0x14, 8},{0x1C, 8},{0x12, 8},{0x1A, 8},{0x16, 8},{0x1E, 8},
  93. {0x20,10},{0x30,10},{0x28,10},{0x38,10},{0x24,10},{0x34,10},{0x2C,10},{0x3C,10},
  94. {0x22,10},{0x32,10},{0x2A,10},{0x3A,10},{0x26,10},{0x36,10},{0x2E,10},{0x3E,10},
  95. };
  96. static VLC ccp_vlc;
  97. static VLC level_vlc;
  98. static VLC dc_ccp_vlc;
  99. static VLC ac_ccp_vlc;
  100. static VLC asv2_level_vlc;
  101. static av_cold void init_vlcs(ASV1Context *a){
  102. static int done = 0;
  103. if (!done) {
  104. done = 1;
  105. INIT_VLC_STATIC(&ccp_vlc, VLC_BITS, 17,
  106. &ccp_tab[0][1], 2, 1,
  107. &ccp_tab[0][0], 2, 1, 64);
  108. INIT_VLC_STATIC(&dc_ccp_vlc, VLC_BITS, 8,
  109. &dc_ccp_tab[0][1], 2, 1,
  110. &dc_ccp_tab[0][0], 2, 1, 64);
  111. INIT_VLC_STATIC(&ac_ccp_vlc, VLC_BITS, 16,
  112. &ac_ccp_tab[0][1], 2, 1,
  113. &ac_ccp_tab[0][0], 2, 1, 64);
  114. INIT_VLC_STATIC(&level_vlc, VLC_BITS, 7,
  115. &level_tab[0][1], 2, 1,
  116. &level_tab[0][0], 2, 1, 64);
  117. INIT_VLC_STATIC(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63,
  118. &asv2_level_tab[0][1], 2, 1,
  119. &asv2_level_tab[0][0], 2, 1, 1024);
  120. }
  121. }
  122. //FIXME write a reversed bitstream reader to avoid the double reverse
  123. static inline int asv2_get_bits(GetBitContext *gb, int n){
  124. return av_reverse[ get_bits(gb, n) << (8-n) ];
  125. }
  126. static inline void asv2_put_bits(PutBitContext *pb, int n, int v){
  127. put_bits(pb, n, av_reverse[ v << (8-n) ]);
  128. }
  129. static inline int asv1_get_level(GetBitContext *gb){
  130. int code= get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
  131. if(code==3) return get_sbits(gb, 8);
  132. else return code - 3;
  133. }
  134. static inline int asv2_get_level(GetBitContext *gb){
  135. int code= get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
  136. if(code==31) return (int8_t)asv2_get_bits(gb, 8);
  137. else return code - 31;
  138. }
  139. static inline void asv1_put_level(PutBitContext *pb, int level){
  140. unsigned int index= level + 3;
  141. if(index <= 6) put_bits(pb, level_tab[index][1], level_tab[index][0]);
  142. else{
  143. put_bits(pb, level_tab[3][1], level_tab[3][0]);
  144. put_sbits(pb, 8, level);
  145. }
  146. }
  147. static inline void asv2_put_level(PutBitContext *pb, int level){
  148. unsigned int index= level + 31;
  149. if(index <= 62) put_bits(pb, asv2_level_tab[index][1], asv2_level_tab[index][0]);
  150. else{
  151. put_bits(pb, asv2_level_tab[31][1], asv2_level_tab[31][0]);
  152. asv2_put_bits(pb, 8, level&0xFF);
  153. }
  154. }
  155. static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){
  156. int i;
  157. block[0]= 8*get_bits(&a->gb, 8);
  158. for(i=0; i<11; i++){
  159. const int ccp= get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
  160. if(ccp){
  161. if(ccp == 16) break;
  162. if(ccp < 0 || i>=10){
  163. av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
  164. return -1;
  165. }
  166. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  167. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  168. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  169. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  170. }
  171. }
  172. return 0;
  173. }
  174. static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64]){
  175. int i, count, ccp;
  176. count= asv2_get_bits(&a->gb, 4);
  177. block[0]= 8*asv2_get_bits(&a->gb, 8);
  178. ccp= get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
  179. if(ccp){
  180. if(ccp&4) block[a->scantable.permutated[1]]= (asv2_get_level(&a->gb) * a->intra_matrix[1])>>4;
  181. if(ccp&2) block[a->scantable.permutated[2]]= (asv2_get_level(&a->gb) * a->intra_matrix[2])>>4;
  182. if(ccp&1) block[a->scantable.permutated[3]]= (asv2_get_level(&a->gb) * a->intra_matrix[3])>>4;
  183. }
  184. for(i=1; i<count+1; i++){
  185. const int ccp= get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);
  186. if(ccp){
  187. if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
  188. if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
  189. if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
  190. if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
  191. }
  192. }
  193. return 0;
  194. }
  195. static inline void asv1_encode_block(ASV1Context *a, DCTELEM block[64]){
  196. int i;
  197. int nc_count=0;
  198. put_bits(&a->pb, 8, (block[0] + 32)>>6);
  199. block[0]= 0;
  200. for(i=0; i<10; i++){
  201. const int index= scantab[4*i];
  202. int ccp=0;
  203. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  204. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  205. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  206. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  207. if(ccp){
  208. for(;nc_count; nc_count--)
  209. put_bits(&a->pb, ccp_tab[0][1], ccp_tab[0][0]);
  210. put_bits(&a->pb, ccp_tab[ccp][1], ccp_tab[ccp][0]);
  211. if(ccp&8) asv1_put_level(&a->pb, block[index + 0]);
  212. if(ccp&4) asv1_put_level(&a->pb, block[index + 8]);
  213. if(ccp&2) asv1_put_level(&a->pb, block[index + 1]);
  214. if(ccp&1) asv1_put_level(&a->pb, block[index + 9]);
  215. }else{
  216. nc_count++;
  217. }
  218. }
  219. put_bits(&a->pb, ccp_tab[16][1], ccp_tab[16][0]);
  220. }
  221. static inline void asv2_encode_block(ASV1Context *a, DCTELEM block[64]){
  222. int i;
  223. int count=0;
  224. for(count=63; count>3; count--){
  225. const int index= scantab[count];
  226. if( (block[index]*a->q_intra_matrix[index] + (1<<15))>>16 )
  227. break;
  228. }
  229. count >>= 2;
  230. asv2_put_bits(&a->pb, 4, count);
  231. asv2_put_bits(&a->pb, 8, (block[0] + 32)>>6);
  232. block[0]= 0;
  233. for(i=0; i<=count; i++){
  234. const int index= scantab[4*i];
  235. int ccp=0;
  236. if( (block[index + 0] = (block[index + 0]*a->q_intra_matrix[index + 0] + (1<<15))>>16) ) ccp |= 8;
  237. if( (block[index + 8] = (block[index + 8]*a->q_intra_matrix[index + 8] + (1<<15))>>16) ) ccp |= 4;
  238. if( (block[index + 1] = (block[index + 1]*a->q_intra_matrix[index + 1] + (1<<15))>>16) ) ccp |= 2;
  239. if( (block[index + 9] = (block[index + 9]*a->q_intra_matrix[index + 9] + (1<<15))>>16) ) ccp |= 1;
  240. av_assert2(i || ccp<8);
  241. if(i) put_bits(&a->pb, ac_ccp_tab[ccp][1], ac_ccp_tab[ccp][0]);
  242. else put_bits(&a->pb, dc_ccp_tab[ccp][1], dc_ccp_tab[ccp][0]);
  243. if(ccp){
  244. if(ccp&8) asv2_put_level(&a->pb, block[index + 0]);
  245. if(ccp&4) asv2_put_level(&a->pb, block[index + 8]);
  246. if(ccp&2) asv2_put_level(&a->pb, block[index + 1]);
  247. if(ccp&1) asv2_put_level(&a->pb, block[index + 9]);
  248. }
  249. }
  250. }
  251. static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
  252. int i;
  253. a->dsp.clear_blocks(block[0]);
  254. if(a->avctx->codec_id == AV_CODEC_ID_ASV1){
  255. for(i=0; i<6; i++){
  256. if( asv1_decode_block(a, block[i]) < 0)
  257. return -1;
  258. }
  259. }else{
  260. for(i=0; i<6; i++){
  261. if( asv2_decode_block(a, block[i]) < 0)
  262. return -1;
  263. }
  264. }
  265. return 0;
  266. }
  267. #define MAX_MB_SIZE (30*16*16*3/2/8)
  268. static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){
  269. int i;
  270. if (a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < MAX_MB_SIZE) {
  271. av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  272. return -1;
  273. }
  274. if(a->avctx->codec_id == AV_CODEC_ID_ASV1){
  275. for(i=0; i<6; i++)
  276. asv1_encode_block(a, block[i]);
  277. }else{
  278. for(i=0; i<6; i++)
  279. asv2_encode_block(a, block[i]);
  280. }
  281. return 0;
  282. }
  283. static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
  284. DCTELEM (*block)[64]= a->block;
  285. int linesize= a->picture.linesize[0];
  286. uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  287. uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  288. uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  289. a->dsp.idct_put(dest_y , linesize, block[0]);
  290. a->dsp.idct_put(dest_y + 8, linesize, block[1]);
  291. a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
  292. a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
  293. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  294. a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
  295. a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
  296. }
  297. }
  298. static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
  299. DCTELEM (*block)[64]= a->block;
  300. int linesize= a->picture.linesize[0];
  301. int i;
  302. uint8_t *ptr_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
  303. uint8_t *ptr_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
  304. uint8_t *ptr_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
  305. a->dsp.get_pixels(block[0], ptr_y , linesize);
  306. a->dsp.get_pixels(block[1], ptr_y + 8, linesize);
  307. a->dsp.get_pixels(block[2], ptr_y + 8*linesize , linesize);
  308. a->dsp.get_pixels(block[3], ptr_y + 8*linesize + 8, linesize);
  309. for(i=0; i<4; i++)
  310. a->dsp.fdct(block[i]);
  311. if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
  312. a->dsp.get_pixels(block[4], ptr_cb, a->picture.linesize[1]);
  313. a->dsp.get_pixels(block[5], ptr_cr, a->picture.linesize[2]);
  314. for(i=4; i<6; i++)
  315. a->dsp.fdct(block[i]);
  316. }
  317. }
  318. static int decode_frame(AVCodecContext *avctx,
  319. void *data, int *data_size,
  320. AVPacket *avpkt)
  321. {
  322. const uint8_t *buf = avpkt->data;
  323. int buf_size = avpkt->size;
  324. ASV1Context * const a = avctx->priv_data;
  325. AVFrame *picture = data;
  326. AVFrame * const p= &a->picture;
  327. int mb_x, mb_y;
  328. if(p->data[0])
  329. avctx->release_buffer(avctx, p);
  330. p->reference= 0;
  331. if(avctx->get_buffer(avctx, p) < 0){
  332. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  333. return -1;
  334. }
  335. p->pict_type= AV_PICTURE_TYPE_I;
  336. p->key_frame= 1;
  337. av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
  338. buf_size);
  339. if (!a->bitstream_buffer)
  340. return AVERROR(ENOMEM);
  341. if(avctx->codec_id == AV_CODEC_ID_ASV1)
  342. a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
  343. else{
  344. int i;
  345. for(i=0; i<buf_size; i++)
  346. a->bitstream_buffer[i]= av_reverse[ buf[i] ];
  347. }
  348. init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
  349. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  350. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  351. if( decode_mb(a, a->block) <0)
  352. return -1;
  353. idct_put(a, mb_x, mb_y);
  354. }
  355. }
  356. if(a->mb_width2 != a->mb_width){
  357. mb_x= a->mb_width2;
  358. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  359. if( decode_mb(a, a->block) <0)
  360. return -1;
  361. idct_put(a, mb_x, mb_y);
  362. }
  363. }
  364. if(a->mb_height2 != a->mb_height){
  365. mb_y= a->mb_height2;
  366. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  367. if( decode_mb(a, a->block) <0)
  368. return -1;
  369. idct_put(a, mb_x, mb_y);
  370. }
  371. }
  372. *picture = a->picture;
  373. *data_size = sizeof(AVPicture);
  374. emms_c();
  375. return (get_bits_count(&a->gb)+31)/32*4;
  376. }
  377. #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER
  378. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  379. const AVFrame *pict, int *got_packet)
  380. {
  381. ASV1Context * const a = avctx->priv_data;
  382. AVFrame * const p= &a->picture;
  383. int size, ret;
  384. int mb_x, mb_y;
  385. if ((ret = ff_alloc_packet2(avctx, pkt, a->mb_height*a->mb_width*MAX_MB_SIZE +
  386. FF_MIN_BUFFER_SIZE)) < 0)
  387. return ret;
  388. init_put_bits(&a->pb, pkt->data, pkt->size);
  389. *p = *pict;
  390. p->pict_type= AV_PICTURE_TYPE_I;
  391. p->key_frame= 1;
  392. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  393. for(mb_x=0; mb_x<a->mb_width2; mb_x++){
  394. dct_get(a, mb_x, mb_y);
  395. encode_mb(a, a->block);
  396. }
  397. }
  398. if(a->mb_width2 != a->mb_width){
  399. mb_x= a->mb_width2;
  400. for(mb_y=0; mb_y<a->mb_height2; mb_y++){
  401. dct_get(a, mb_x, mb_y);
  402. encode_mb(a, a->block);
  403. }
  404. }
  405. if(a->mb_height2 != a->mb_height){
  406. mb_y= a->mb_height2;
  407. for(mb_x=0; mb_x<a->mb_width; mb_x++){
  408. dct_get(a, mb_x, mb_y);
  409. encode_mb(a, a->block);
  410. }
  411. }
  412. emms_c();
  413. avpriv_align_put_bits(&a->pb);
  414. while(put_bits_count(&a->pb)&31)
  415. put_bits(&a->pb, 8, 0);
  416. size= put_bits_count(&a->pb)/32;
  417. if(avctx->codec_id == AV_CODEC_ID_ASV1)
  418. a->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
  419. else{
  420. int i;
  421. for(i=0; i<4*size; i++)
  422. pkt->data[i] = av_reverse[pkt->data[i]];
  423. }
  424. pkt->size = size*4;
  425. pkt->flags |= AV_PKT_FLAG_KEY;
  426. *got_packet = 1;
  427. return 0;
  428. }
  429. #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */
  430. static av_cold void common_init(AVCodecContext *avctx){
  431. ASV1Context * const a = avctx->priv_data;
  432. ff_dsputil_init(&a->dsp, avctx);
  433. a->mb_width = (avctx->width + 15) / 16;
  434. a->mb_height = (avctx->height + 15) / 16;
  435. a->mb_width2 = (avctx->width + 0) / 16;
  436. a->mb_height2 = (avctx->height + 0) / 16;
  437. avctx->coded_frame= &a->picture;
  438. a->avctx= avctx;
  439. }
  440. static av_cold int decode_init(AVCodecContext *avctx){
  441. ASV1Context * const a = avctx->priv_data;
  442. AVFrame *p= &a->picture;
  443. int i;
  444. const int scale= avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
  445. common_init(avctx);
  446. init_vlcs(a);
  447. ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab);
  448. avctx->pix_fmt= PIX_FMT_YUV420P;
  449. if(avctx->extradata_size < 1 || (a->inv_qscale= avctx->extradata[0]) == 0){
  450. av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
  451. if(avctx->codec_id == AV_CODEC_ID_ASV1)
  452. a->inv_qscale= 6;
  453. else
  454. a->inv_qscale= 10;
  455. }
  456. for(i=0; i<64; i++){
  457. int index= scantab[i];
  458. a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
  459. }
  460. p->qstride= a->mb_width;
  461. p->qscale_table= av_malloc( p->qstride * a->mb_height);
  462. p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
  463. memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
  464. return 0;
  465. }
  466. #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER
  467. static av_cold int encode_init(AVCodecContext *avctx){
  468. ASV1Context * const a = avctx->priv_data;
  469. int i;
  470. const int scale= avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
  471. common_init(avctx);
  472. if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE;
  473. a->inv_qscale= (32*scale*FF_QUALITY_SCALE + avctx->global_quality/2) / avctx->global_quality;
  474. avctx->extradata= av_mallocz(8);
  475. avctx->extradata_size=8;
  476. ((uint32_t*)avctx->extradata)[0]= av_le2ne32(a->inv_qscale);
  477. ((uint32_t*)avctx->extradata)[1]= av_le2ne32(AV_RL32("ASUS"));
  478. for(i=0; i<64; i++){
  479. int q= 32*scale*ff_mpeg1_default_intra_matrix[i];
  480. a->q_intra_matrix[i]= ((a->inv_qscale<<16) + q/2) / q;
  481. }
  482. return 0;
  483. }
  484. #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */
  485. static av_cold int decode_end(AVCodecContext *avctx){
  486. ASV1Context * const a = avctx->priv_data;
  487. av_freep(&a->bitstream_buffer);
  488. av_freep(&a->picture.qscale_table);
  489. a->bitstream_buffer_size=0;
  490. if(a->picture.data[0])
  491. avctx->release_buffer(avctx, &a->picture);
  492. return 0;
  493. }
  494. #if CONFIG_ASV1_DECODER
  495. AVCodec ff_asv1_decoder = {
  496. .name = "asv1",
  497. .type = AVMEDIA_TYPE_VIDEO,
  498. .id = AV_CODEC_ID_ASV1,
  499. .priv_data_size = sizeof(ASV1Context),
  500. .init = decode_init,
  501. .close = decode_end,
  502. .decode = decode_frame,
  503. .capabilities = CODEC_CAP_DR1,
  504. .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
  505. };
  506. #endif
  507. #if CONFIG_ASV2_DECODER
  508. AVCodec ff_asv2_decoder = {
  509. .name = "asv2",
  510. .type = AVMEDIA_TYPE_VIDEO,
  511. .id = AV_CODEC_ID_ASV2,
  512. .priv_data_size = sizeof(ASV1Context),
  513. .init = decode_init,
  514. .close = decode_end,
  515. .decode = decode_frame,
  516. .capabilities = CODEC_CAP_DR1,
  517. .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
  518. };
  519. #endif
  520. #if CONFIG_ASV1_ENCODER
  521. AVCodec ff_asv1_encoder = {
  522. .name = "asv1",
  523. .type = AVMEDIA_TYPE_VIDEO,
  524. .id = AV_CODEC_ID_ASV1,
  525. .priv_data_size = sizeof(ASV1Context),
  526. .init = encode_init,
  527. .encode2 = encode_frame,
  528. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
  529. .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
  530. };
  531. #endif
  532. #if CONFIG_ASV2_ENCODER
  533. AVCodec ff_asv2_encoder = {
  534. .name = "asv2",
  535. .type = AVMEDIA_TYPE_VIDEO,
  536. .id = AV_CODEC_ID_ASV2,
  537. .priv_data_size = sizeof(ASV1Context),
  538. .init = encode_init,
  539. .encode2 = encode_frame,
  540. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
  541. .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
  542. };
  543. #endif