You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

849 lines
25KB

  1. /*
  2. * Copyright (c) 2002 The FFmpeg Project.
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2 of the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. */
  19. /**
  20. * @file wmv2.c
  21. * wmv2 codec.
  22. */
  23. #include "simple_idct.h"
  24. #define SKIP_TYPE_NONE 0
  25. #define SKIP_TYPE_MPEG 1
  26. #define SKIP_TYPE_ROW 2
  27. #define SKIP_TYPE_COL 3
  28. typedef struct Wmv2Context{
  29. MpegEncContext s;
  30. int j_type_bit;
  31. int j_type;
  32. int flag3;
  33. int flag63;
  34. int abt_flag;
  35. int abt_type;
  36. int abt_type_table[6];
  37. int per_mb_abt;
  38. int per_block_abt;
  39. int mspel_bit;
  40. int cbp_table_index;
  41. int top_left_mv_flag;
  42. int per_mb_rl_bit;
  43. int skip_type;
  44. int hshift;
  45. ScanTable abt_scantable[2];
  46. DCTELEM abt_block2[6][64] __align8;
  47. }Wmv2Context;
  48. static void wmv2_common_init(Wmv2Context * w){
  49. MpegEncContext * const s= &w->s;
  50. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], wmv2_scantableA);
  51. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB);
  52. }
  53. #ifdef CONFIG_ENCODERS
  54. static int encode_ext_header(Wmv2Context *w){
  55. MpegEncContext * const s= &w->s;
  56. PutBitContext pb;
  57. int code;
  58. init_put_bits(&pb, s->avctx->extradata, s->avctx->extradata_size);
  59. put_bits(&pb, 5, s->avctx->frame_rate / s->avctx->frame_rate_base); //yes 29.97 -> 29
  60. put_bits(&pb, 11, FFMIN(s->bit_rate/1024, 2047));
  61. put_bits(&pb, 1, w->mspel_bit=1);
  62. put_bits(&pb, 1, w->flag3=1);
  63. put_bits(&pb, 1, w->abt_flag=1);
  64. put_bits(&pb, 1, w->j_type_bit=1);
  65. put_bits(&pb, 1, w->top_left_mv_flag=0);
  66. put_bits(&pb, 1, w->per_mb_rl_bit=1);
  67. put_bits(&pb, 3, code=1);
  68. flush_put_bits(&pb);
  69. s->slice_height = s->mb_height / code;
  70. return 0;
  71. }
  72. static int wmv2_encode_init(AVCodecContext *avctx){
  73. Wmv2Context * const w= avctx->priv_data;
  74. if(MPV_encode_init(avctx) < 0)
  75. return -1;
  76. wmv2_common_init(w);
  77. avctx->extradata_size= 4;
  78. avctx->extradata= av_mallocz(avctx->extradata_size + 10);
  79. encode_ext_header(w);
  80. return 0;
  81. }
  82. static int wmv2_encode_end(AVCodecContext *avctx){
  83. if(MPV_encode_end(avctx) < 0)
  84. return -1;
  85. avctx->extradata_size= 0;
  86. av_freep(&avctx->extradata);
  87. return 0;
  88. }
  89. int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
  90. {
  91. Wmv2Context * const w= (Wmv2Context*)s;
  92. put_bits(&s->pb, 1, s->pict_type - 1);
  93. if(s->pict_type == I_TYPE){
  94. put_bits(&s->pb, 7, 0);
  95. }
  96. put_bits(&s->pb, 5, s->qscale);
  97. s->dc_table_index = 1;
  98. s->mv_table_index = 1; /* only if P frame */
  99. // s->use_skip_mb_code = 1; /* only if P frame */
  100. s->per_mb_rl_table = 0;
  101. s->mspel= 0;
  102. w->per_mb_abt=0;
  103. w->abt_type=0;
  104. w->j_type=0;
  105. assert(s->flipflop_rounding);
  106. if (s->pict_type == I_TYPE) {
  107. assert(s->no_rounding==1);
  108. if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
  109. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  110. if(!s->per_mb_rl_table){
  111. code012(&s->pb, s->rl_chroma_table_index);
  112. code012(&s->pb, s->rl_table_index);
  113. }
  114. put_bits(&s->pb, 1, s->dc_table_index);
  115. s->inter_intra_pred= 0;
  116. }else{
  117. int cbp_index;
  118. put_bits(&s->pb, 2, SKIP_TYPE_NONE);
  119. code012(&s->pb, cbp_index=0);
  120. if(s->qscale <= 10){
  121. int map[3]= {0,2,1};
  122. w->cbp_table_index= map[cbp_index];
  123. }else if(s->qscale <= 20){
  124. int map[3]= {1,0,2};
  125. w->cbp_table_index= map[cbp_index];
  126. }else{
  127. int map[3]= {2,1,0};
  128. w->cbp_table_index= map[cbp_index];
  129. }
  130. if(w->mspel_bit) put_bits(&s->pb, 1, s->mspel);
  131. if(w->abt_flag){
  132. put_bits(&s->pb, 1, w->per_mb_abt^1);
  133. if(!w->per_mb_abt){
  134. code012(&s->pb, w->abt_type);
  135. }
  136. }
  137. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  138. if(!s->per_mb_rl_table){
  139. code012(&s->pb, s->rl_table_index);
  140. s->rl_chroma_table_index = s->rl_table_index;
  141. }
  142. put_bits(&s->pb, 1, s->dc_table_index);
  143. put_bits(&s->pb, 1, s->mv_table_index);
  144. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  145. }
  146. s->esc3_level_length= 0;
  147. s->esc3_run_length= 0;
  148. return 0;
  149. }
  150. // nearly idential to wmv1 but thats just because we dont use the useless M$ crap features
  151. // its duplicated here in case someone wants to add support for these carp features
  152. void ff_wmv2_encode_mb(MpegEncContext * s,
  153. DCTELEM block[6][64],
  154. int motion_x, int motion_y)
  155. {
  156. Wmv2Context * const w= (Wmv2Context*)s;
  157. int cbp, coded_cbp, i;
  158. int pred_x, pred_y;
  159. uint8_t *coded_block;
  160. handle_slices(s);
  161. if (!s->mb_intra) {
  162. /* compute cbp */
  163. set_stat(ST_INTER_MB);
  164. cbp = 0;
  165. for (i = 0; i < 6; i++) {
  166. if (s->block_last_index[i] >= 0)
  167. cbp |= 1 << (5 - i);
  168. }
  169. put_bits(&s->pb,
  170. wmv2_inter_table[w->cbp_table_index][cbp + 64][1],
  171. wmv2_inter_table[w->cbp_table_index][cbp + 64][0]);
  172. /* motion vector */
  173. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  174. msmpeg4_encode_motion(s, motion_x - pred_x,
  175. motion_y - pred_y);
  176. } else {
  177. /* compute cbp */
  178. cbp = 0;
  179. coded_cbp = 0;
  180. for (i = 0; i < 6; i++) {
  181. int val, pred;
  182. val = (s->block_last_index[i] >= 1);
  183. cbp |= val << (5 - i);
  184. if (i < 4) {
  185. /* predict value for close blocks only for luma */
  186. pred = coded_block_pred(s, i, &coded_block);
  187. *coded_block = val;
  188. val = val ^ pred;
  189. }
  190. coded_cbp |= val << (5 - i);
  191. }
  192. #if 0
  193. if (coded_cbp)
  194. printf("cbp=%x %x\n", cbp, coded_cbp);
  195. #endif
  196. if (s->pict_type == I_TYPE) {
  197. set_stat(ST_INTRA_MB);
  198. put_bits(&s->pb,
  199. table_mb_intra[coded_cbp][1], table_mb_intra[coded_cbp][0]);
  200. } else {
  201. put_bits(&s->pb,
  202. wmv2_inter_table[w->cbp_table_index][cbp][1],
  203. wmv2_inter_table[w->cbp_table_index][cbp][0]);
  204. }
  205. set_stat(ST_INTRA_MB);
  206. put_bits(&s->pb, 1, 0); /* no AC prediction yet */
  207. if(s->inter_intra_pred){
  208. s->h263_aic_dir=0;
  209. put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
  210. }
  211. }
  212. for (i = 0; i < 6; i++) {
  213. msmpeg4_encode_block(s, block[i], i);
  214. }
  215. }
  216. #endif //CONFIG_ENCODERS
  217. static void parse_mb_skip(Wmv2Context * w){
  218. int mb_x, mb_y;
  219. MpegEncContext * const s= &w->s;
  220. uint32_t * const mb_type= s->current_picture_ptr->mb_type;
  221. w->skip_type= get_bits(&s->gb, 2);
  222. switch(w->skip_type){
  223. case SKIP_TYPE_NONE:
  224. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  225. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  226. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0;
  227. }
  228. }
  229. break;
  230. case SKIP_TYPE_MPEG:
  231. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  232. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  233. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  234. }
  235. }
  236. break;
  237. case SKIP_TYPE_ROW:
  238. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  239. if(get_bits1(&s->gb)){
  240. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  241. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  242. }
  243. }else{
  244. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  245. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  246. }
  247. }
  248. }
  249. break;
  250. case SKIP_TYPE_COL:
  251. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  252. if(get_bits1(&s->gb)){
  253. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  254. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  255. }
  256. }else{
  257. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  258. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  259. }
  260. }
  261. }
  262. break;
  263. }
  264. }
  265. static int decode_ext_header(Wmv2Context *w){
  266. MpegEncContext * const s= &w->s;
  267. GetBitContext gb;
  268. int fps;
  269. int code;
  270. if(s->avctx->extradata_size<4) return -1;
  271. init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
  272. fps = get_bits(&gb, 5);
  273. s->bit_rate = get_bits(&gb, 11)*1024;
  274. w->mspel_bit = get_bits1(&gb);
  275. w->flag3 = get_bits1(&gb);
  276. w->abt_flag = get_bits1(&gb);
  277. w->j_type_bit = get_bits1(&gb);
  278. w->top_left_mv_flag= get_bits1(&gb);
  279. w->per_mb_rl_bit = get_bits1(&gb);
  280. code = get_bits(&gb, 3);
  281. if(code==0) return -1;
  282. s->slice_height = s->mb_height / code;
  283. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  284. av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, flag3:%d, slices:%d\n",
  285. fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, w->flag3,
  286. code);
  287. }
  288. return 0;
  289. }
  290. int ff_wmv2_decode_picture_header(MpegEncContext * s)
  291. {
  292. Wmv2Context * const w= (Wmv2Context*)s;
  293. int code;
  294. #if 0
  295. {
  296. int i;
  297. for(i=0; i<s->gb.size*8; i++)
  298. printf("%d", get_bits1(&s->gb));
  299. // get_bits1(&s->gb);
  300. printf("END\n");
  301. return -1;
  302. }
  303. #endif
  304. if(s->picture_number==0)
  305. decode_ext_header(w);
  306. s->pict_type = get_bits(&s->gb, 1) + 1;
  307. if(s->pict_type == I_TYPE){
  308. code = get_bits(&s->gb, 7);
  309. av_log(s->avctx, AV_LOG_ERROR, "I7:%X/\n", code);
  310. }
  311. s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
  312. if(s->qscale < 0)
  313. return -1;
  314. return 0;
  315. }
  316. int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
  317. {
  318. Wmv2Context * const w= (Wmv2Context*)s;
  319. if (s->pict_type == I_TYPE) {
  320. if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
  321. else w->j_type= 0; //FIXME check
  322. if(!w->j_type){
  323. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  324. else s->per_mb_rl_table= 0;
  325. if(!s->per_mb_rl_table){
  326. s->rl_chroma_table_index = decode012(&s->gb);
  327. s->rl_table_index = decode012(&s->gb);
  328. }
  329. s->dc_table_index = get_bits1(&s->gb);
  330. }
  331. s->inter_intra_pred= 0;
  332. s->no_rounding = 1;
  333. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  334. av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
  335. s->qscale,
  336. s->rl_chroma_table_index,
  337. s->rl_table_index,
  338. s->dc_table_index,
  339. s->per_mb_rl_table,
  340. w->j_type);
  341. }
  342. }else{
  343. int cbp_index;
  344. w->j_type=0;
  345. parse_mb_skip(w);
  346. cbp_index= decode012(&s->gb);
  347. if(s->qscale <= 10){
  348. int map[3]= {0,2,1};
  349. w->cbp_table_index= map[cbp_index];
  350. }else if(s->qscale <= 20){
  351. int map[3]= {1,0,2};
  352. w->cbp_table_index= map[cbp_index];
  353. }else{
  354. int map[3]= {2,1,0};
  355. w->cbp_table_index= map[cbp_index];
  356. }
  357. if(w->mspel_bit) s->mspel= get_bits1(&s->gb);
  358. else s->mspel= 0; //FIXME check
  359. if(w->abt_flag){
  360. w->per_mb_abt= get_bits1(&s->gb)^1;
  361. if(!w->per_mb_abt){
  362. w->abt_type= decode012(&s->gb);
  363. }
  364. }
  365. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  366. else s->per_mb_rl_table= 0;
  367. if(!s->per_mb_rl_table){
  368. s->rl_table_index = decode012(&s->gb);
  369. s->rl_chroma_table_index = s->rl_table_index;
  370. }
  371. s->dc_table_index = get_bits1(&s->gb);
  372. s->mv_table_index = get_bits1(&s->gb);
  373. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  374. s->no_rounding ^= 1;
  375. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  376. av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
  377. s->rl_table_index,
  378. s->rl_chroma_table_index,
  379. s->dc_table_index,
  380. s->mv_table_index,
  381. s->per_mb_rl_table,
  382. s->qscale,
  383. s->mspel,
  384. w->per_mb_abt,
  385. w->abt_type,
  386. w->cbp_table_index,
  387. s->inter_intra_pred);
  388. }
  389. }
  390. s->esc3_level_length= 0;
  391. s->esc3_run_length= 0;
  392. s->picture_number++; //FIXME ?
  393. // if(w->j_type)
  394. // return wmv2_decode_j_picture(w); //FIXME
  395. if(w->j_type){
  396. av_log(s->avctx, AV_LOG_ERROR, "J-type picture isnt supported\n");
  397. return -1;
  398. }
  399. return 0;
  400. }
  401. static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){
  402. MpegEncContext * const s= &w->s;
  403. int ret;
  404. ret= msmpeg4_decode_motion(s, mx_ptr, my_ptr);
  405. if(ret<0) return -1;
  406. if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel)
  407. w->hshift= get_bits1(&s->gb);
  408. else
  409. w->hshift= 0;
  410. //printf("%d %d ", *mx_ptr, *my_ptr);
  411. return 0;
  412. }
  413. static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
  414. MpegEncContext * const s= &w->s;
  415. int xy, wrap, diff, type;
  416. int16_t *A, *B, *C, *mot_val;
  417. wrap = s->b8_stride;
  418. xy = s->block_index[0];
  419. mot_val = s->current_picture.motion_val[0][xy];
  420. A = s->current_picture.motion_val[0][xy - 1];
  421. B = s->current_picture.motion_val[0][xy - wrap];
  422. C = s->current_picture.motion_val[0][xy + 2 - wrap];
  423. diff= FFMAX(ABS(A[0] - B[0]), ABS(A[1] - B[1]));
  424. if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag && diff >= 8)
  425. type= get_bits1(&s->gb);
  426. else
  427. type= 2;
  428. if(type == 0){
  429. *px= A[0];
  430. *py= A[1];
  431. }else if(type == 1){
  432. *px= B[0];
  433. *py= B[1];
  434. }else{
  435. /* special case for first (slice) line */
  436. if (s->first_slice_line) {
  437. *px = A[0];
  438. *py = A[1];
  439. } else {
  440. *px = mid_pred(A[0], B[0], C[0]);
  441. *py = mid_pred(A[1], B[1], C[1]);
  442. }
  443. }
  444. return mot_val;
  445. }
  446. static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){
  447. MpegEncContext * const s= &w->s;
  448. static const int sub_cbp_table[3]= {2,3,1};
  449. int sub_cbp;
  450. if(!cbp){
  451. s->block_last_index[n] = -1;
  452. return 0;
  453. }
  454. if(w->per_block_abt)
  455. w->abt_type= decode012(&s->gb);
  456. #if 0
  457. if(w->per_block_abt)
  458. printf("B%d", w->abt_type);
  459. #endif
  460. w->abt_type_table[n]= w->abt_type;
  461. if(w->abt_type){
  462. // const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated;
  463. const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable;
  464. // const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable;
  465. sub_cbp= sub_cbp_table[ decode012(&s->gb) ];
  466. // printf("S%d", sub_cbp);
  467. if(sub_cbp&1){
  468. if (msmpeg4_decode_block(s, block, n, 1, scantable) < 0)
  469. return -1;
  470. }
  471. if(sub_cbp&2){
  472. if (msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0)
  473. return -1;
  474. }
  475. s->block_last_index[n] = 63;
  476. return 0;
  477. }else{
  478. return msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated);
  479. }
  480. }
  481. static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){
  482. MpegEncContext * const s= &w->s;
  483. switch(w->abt_type_table[n]){
  484. case 0:
  485. if (s->block_last_index[n] >= 0) {
  486. s->dsp.idct_add (dst, stride, block1);
  487. }
  488. break;
  489. case 1:
  490. simple_idct84_add(dst , stride, block1);
  491. simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]);
  492. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  493. break;
  494. case 2:
  495. simple_idct48_add(dst , stride, block1);
  496. simple_idct48_add(dst + 4 , stride, w->abt_block2[n]);
  497. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  498. break;
  499. default:
  500. av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n");
  501. }
  502. }
  503. void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){
  504. Wmv2Context * const w= (Wmv2Context*)s;
  505. wmv2_add_block(w, block1[0], dest_y , s->linesize, 0);
  506. wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1);
  507. wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2);
  508. wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3);
  509. if(s->flags&CODEC_FLAG_GRAY) return;
  510. wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4);
  511. wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5);
  512. }
  513. void ff_mspel_motion(MpegEncContext *s,
  514. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  515. uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  516. int motion_x, int motion_y, int h)
  517. {
  518. Wmv2Context * const w= (Wmv2Context*)s;
  519. uint8_t *ptr;
  520. int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize;
  521. int emu=0;
  522. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  523. dxy = 2*dxy + w->hshift;
  524. src_x = s->mb_x * 16 + (motion_x >> 1);
  525. src_y = s->mb_y * 16 + (motion_y >> 1);
  526. /* WARNING: do no forget half pels */
  527. v_edge_pos = s->v_edge_pos;
  528. src_x = clip(src_x, -16, s->width);
  529. src_y = clip(src_y, -16, s->height);
  530. linesize = s->linesize;
  531. uvlinesize = s->uvlinesize;
  532. ptr = ref_picture[0] + (src_y * linesize) + src_x;
  533. if(s->flags&CODEC_FLAG_EMU_EDGE){
  534. if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
  535. || src_y + h+1 >= v_edge_pos){
  536. ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
  537. src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
  538. ptr= s->edge_emu_buffer + 1 + s->linesize;
  539. emu=1;
  540. }
  541. }
  542. s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize);
  543. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize);
  544. s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize);
  545. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize);
  546. if(s->flags&CODEC_FLAG_GRAY) return;
  547. if (s->out_format == FMT_H263) {
  548. dxy = 0;
  549. if ((motion_x & 3) != 0)
  550. dxy |= 1;
  551. if ((motion_y & 3) != 0)
  552. dxy |= 2;
  553. mx = motion_x >> 2;
  554. my = motion_y >> 2;
  555. } else {
  556. mx = motion_x / 2;
  557. my = motion_y / 2;
  558. dxy = ((my & 1) << 1) | (mx & 1);
  559. mx >>= 1;
  560. my >>= 1;
  561. }
  562. src_x = s->mb_x * 8 + mx;
  563. src_y = s->mb_y * 8 + my;
  564. src_x = clip(src_x, -8, s->width >> 1);
  565. if (src_x == (s->width >> 1))
  566. dxy &= ~1;
  567. src_y = clip(src_y, -8, s->height >> 1);
  568. if (src_y == (s->height >> 1))
  569. dxy &= ~2;
  570. offset = (src_y * uvlinesize) + src_x;
  571. ptr = ref_picture[1] + offset;
  572. if(emu){
  573. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  574. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  575. ptr= s->edge_emu_buffer;
  576. }
  577. pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
  578. ptr = ref_picture[2] + offset;
  579. if(emu){
  580. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  581. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  582. ptr= s->edge_emu_buffer;
  583. }
  584. pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
  585. }
  586. static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
  587. {
  588. Wmv2Context * const w= (Wmv2Context*)s;
  589. int cbp, code, i;
  590. uint8_t *coded_val;
  591. if(w->j_type) return 0;
  592. if (s->pict_type == P_TYPE) {
  593. if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
  594. /* skip mb */
  595. s->mb_intra = 0;
  596. for(i=0;i<6;i++)
  597. s->block_last_index[i] = -1;
  598. s->mv_dir = MV_DIR_FORWARD;
  599. s->mv_type = MV_TYPE_16X16;
  600. s->mv[0][0][0] = 0;
  601. s->mv[0][0][1] = 0;
  602. s->mb_skiped = 1;
  603. w->hshift=0;
  604. return 0;
  605. }
  606. code = get_vlc2(&s->gb, mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3);
  607. if (code < 0)
  608. return -1;
  609. s->mb_intra = (~code & 0x40) >> 6;
  610. cbp = code & 0x3f;
  611. } else {
  612. s->mb_intra = 1;
  613. code = get_vlc2(&s->gb, mb_intra_vlc.table, MB_INTRA_VLC_BITS, 2);
  614. if (code < 0){
  615. av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y);
  616. return -1;
  617. }
  618. /* predict coded block pattern */
  619. cbp = 0;
  620. for(i=0;i<6;i++) {
  621. int val = ((code >> (5 - i)) & 1);
  622. if (i < 4) {
  623. int pred = coded_block_pred(s, i, &coded_val);
  624. val = val ^ pred;
  625. *coded_val = val;
  626. }
  627. cbp |= val << (5 - i);
  628. }
  629. }
  630. if (!s->mb_intra) {
  631. int mx, my;
  632. //printf("P at %d %d\n", s->mb_x, s->mb_y);
  633. wmv2_pred_motion(w, &mx, &my);
  634. if(cbp){
  635. if(s->per_mb_rl_table){
  636. s->rl_table_index = decode012(&s->gb);
  637. s->rl_chroma_table_index = s->rl_table_index;
  638. }
  639. if(w->abt_flag && w->per_mb_abt){
  640. w->per_block_abt= get_bits1(&s->gb);
  641. if(!w->per_block_abt)
  642. w->abt_type= decode012(&s->gb);
  643. }else
  644. w->per_block_abt=0;
  645. }
  646. if (wmv2_decode_motion(w, &mx, &my) < 0)
  647. return -1;
  648. s->mv_dir = MV_DIR_FORWARD;
  649. s->mv_type = MV_TYPE_16X16;
  650. s->mv[0][0][0] = mx;
  651. s->mv[0][0][1] = my;
  652. for (i = 0; i < 6; i++) {
  653. if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0)
  654. {
  655. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  656. return -1;
  657. }
  658. }
  659. } else {
  660. //if(s->pict_type==P_TYPE)
  661. // printf("%d%d ", s->inter_intra_pred, cbp);
  662. //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
  663. s->ac_pred = get_bits1(&s->gb);
  664. if(s->inter_intra_pred){
  665. s->h263_aic_dir= get_vlc2(&s->gb, inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
  666. // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
  667. }
  668. if(s->per_mb_rl_table && cbp){
  669. s->rl_table_index = decode012(&s->gb);
  670. s->rl_chroma_table_index = s->rl_table_index;
  671. }
  672. for (i = 0; i < 6; i++) {
  673. if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
  674. {
  675. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  676. return -1;
  677. }
  678. }
  679. }
  680. return 0;
  681. }
  682. static int wmv2_decode_init(AVCodecContext *avctx){
  683. Wmv2Context * const w= avctx->priv_data;
  684. if(ff_h263_decode_init(avctx) < 0)
  685. return -1;
  686. wmv2_common_init(w);
  687. return 0;
  688. }
  689. AVCodec wmv2_decoder = {
  690. "wmv2",
  691. CODEC_TYPE_VIDEO,
  692. CODEC_ID_WMV2,
  693. sizeof(Wmv2Context),
  694. wmv2_decode_init,
  695. NULL,
  696. ff_h263_decode_end,
  697. ff_h263_decode_frame,
  698. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
  699. };
  700. #ifdef CONFIG_ENCODERS
  701. AVCodec wmv2_encoder = {
  702. "wmv2",
  703. CODEC_TYPE_VIDEO,
  704. CODEC_ID_WMV2,
  705. sizeof(Wmv2Context),
  706. wmv2_encode_init,
  707. MPV_encode_picture,
  708. MPV_encode_end,
  709. };
  710. #endif