You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

851 lines
24KB

  1. /*
  2. * Copyright (c) 2002 The FFmpeg Project.
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2 of the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. */
  19. #include "simple_idct.h"
  20. #define SKIP_TYPE_NONE 0
  21. #define SKIP_TYPE_MPEG 1
  22. #define SKIP_TYPE_ROW 2
  23. #define SKIP_TYPE_COL 3
  24. typedef struct Wmv2Context{
  25. MpegEncContext s;
  26. int j_type_bit;
  27. int j_type;
  28. int flag3;
  29. int flag63;
  30. int abt_flag;
  31. int abt_type;
  32. int abt_type_table[6];
  33. int per_mb_abt;
  34. int per_block_abt;
  35. int mspel_bit;
  36. int cbp_table_index;
  37. int top_left_mv_flag;
  38. int per_mb_rl_bit;
  39. int skip_type;
  40. int hshift;
  41. ScanTable abt_scantable[2];
  42. DCTELEM abt_block2[6][64] __align8;
  43. }Wmv2Context;
  44. static void wmv2_common_init(Wmv2Context * w){
  45. MpegEncContext * const s= &w->s;
  46. ff_init_scantable(s, &w->abt_scantable[0], wmv2_scantableA);
  47. ff_init_scantable(s, &w->abt_scantable[1], wmv2_scantableB);
  48. }
  49. static int encode_ext_header(Wmv2Context *w){
  50. MpegEncContext * const s= &w->s;
  51. PutBitContext pb;
  52. int code;
  53. init_put_bits(&pb, s->avctx->extradata, s->avctx->extradata_size, NULL, NULL);
  54. put_bits(&pb, 5, s->frame_rate / FRAME_RATE_BASE); //yes 29.97 -> 29
  55. put_bits(&pb, 11, FFMIN(s->bit_rate/1024, 2047));
  56. put_bits(&pb, 1, w->mspel_bit=1);
  57. put_bits(&pb, 1, w->flag3=1);
  58. put_bits(&pb, 1, w->abt_flag=1);
  59. put_bits(&pb, 1, w->j_type_bit=1);
  60. put_bits(&pb, 1, w->top_left_mv_flag=0);
  61. put_bits(&pb, 1, w->per_mb_rl_bit=1);
  62. put_bits(&pb, 3, code=1);
  63. flush_put_bits(&pb);
  64. s->slice_height = s->mb_height / code;
  65. return 0;
  66. }
  67. static int wmv2_encode_init(AVCodecContext *avctx){
  68. Wmv2Context * const w= avctx->priv_data;
  69. if(MPV_encode_init(avctx) < 0)
  70. return -1;
  71. wmv2_common_init(w);
  72. avctx->extradata_size= 4;
  73. avctx->extradata= av_mallocz(avctx->extradata_size + 10);
  74. encode_ext_header(w);
  75. return 0;
  76. }
  77. static int wmv2_encode_end(AVCodecContext *avctx){
  78. Wmv2Context * const w= avctx->priv_data;
  79. if(MPV_encode_end(avctx) < 0)
  80. return -1;
  81. avctx->extradata_size= 0;
  82. av_freep(&avctx->extradata);
  83. return 0;
  84. }
  85. int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
  86. {
  87. Wmv2Context * const w= (Wmv2Context*)s;
  88. put_bits(&s->pb, 1, s->pict_type - 1);
  89. if(s->pict_type == I_TYPE){
  90. put_bits(&s->pb, 7, 0);
  91. }
  92. put_bits(&s->pb, 5, s->qscale);
  93. s->dc_table_index = 1;
  94. s->mv_table_index = 1; /* only if P frame */
  95. // s->use_skip_mb_code = 1; /* only if P frame */
  96. s->per_mb_rl_table = 0;
  97. s->mspel= 0;
  98. w->per_mb_abt=0;
  99. w->abt_type=0;
  100. w->j_type=0;
  101. if (s->pict_type == I_TYPE) {
  102. if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
  103. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  104. if(!s->per_mb_rl_table){
  105. code012(&s->pb, s->rl_chroma_table_index);
  106. code012(&s->pb, s->rl_table_index);
  107. }
  108. put_bits(&s->pb, 1, s->dc_table_index);
  109. s->inter_intra_pred= 0;
  110. s->no_rounding = 1;
  111. }else{
  112. int cbp_index;
  113. put_bits(&s->pb, 2, SKIP_TYPE_NONE);
  114. code012(&s->pb, cbp_index=0);
  115. if(s->qscale <= 10){
  116. int map[3]= {0,2,1};
  117. w->cbp_table_index= map[cbp_index];
  118. }else if(s->qscale <= 20){
  119. int map[3]= {1,0,2};
  120. w->cbp_table_index= map[cbp_index];
  121. }else{
  122. int map[3]= {2,1,0};
  123. w->cbp_table_index= map[cbp_index];
  124. }
  125. if(w->mspel_bit) put_bits(&s->pb, 1, s->mspel);
  126. if(w->abt_flag){
  127. put_bits(&s->pb, 1, w->per_mb_abt^1);
  128. if(!w->per_mb_abt){
  129. code012(&s->pb, w->abt_type);
  130. }
  131. }
  132. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  133. if(!s->per_mb_rl_table){
  134. code012(&s->pb, s->rl_table_index);
  135. s->rl_chroma_table_index = s->rl_table_index;
  136. }
  137. put_bits(&s->pb, 1, s->dc_table_index);
  138. put_bits(&s->pb, 1, s->mv_table_index);
  139. s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  140. s->no_rounding ^= 1;
  141. }
  142. s->esc3_level_length= 0;
  143. s->esc3_run_length= 0;
  144. return 0;
  145. }
  146. // nearly idential to wmv1 but thats just because we dont use the useless M$ crap features
  147. // its duplicated here in case someone wants to add support for these carp features
  148. void ff_wmv2_encode_mb(MpegEncContext * s,
  149. DCTELEM block[6][64],
  150. int motion_x, int motion_y)
  151. {
  152. Wmv2Context * const w= (Wmv2Context*)s;
  153. int cbp, coded_cbp, i;
  154. int pred_x, pred_y;
  155. UINT8 *coded_block;
  156. handle_slices(s);
  157. if (!s->mb_intra) {
  158. /* compute cbp */
  159. set_stat(ST_INTER_MB);
  160. cbp = 0;
  161. for (i = 0; i < 6; i++) {
  162. if (s->block_last_index[i] >= 0)
  163. cbp |= 1 << (5 - i);
  164. }
  165. put_bits(&s->pb,
  166. wmv2_inter_table[w->cbp_table_index][cbp + 64][1],
  167. wmv2_inter_table[w->cbp_table_index][cbp + 64][0]);
  168. /* motion vector */
  169. h263_pred_motion(s, 0, &pred_x, &pred_y);
  170. msmpeg4_encode_motion(s, motion_x - pred_x,
  171. motion_y - pred_y);
  172. } else {
  173. /* compute cbp */
  174. cbp = 0;
  175. coded_cbp = 0;
  176. for (i = 0; i < 6; i++) {
  177. int val, pred;
  178. val = (s->block_last_index[i] >= 1);
  179. cbp |= val << (5 - i);
  180. if (i < 4) {
  181. /* predict value for close blocks only for luma */
  182. pred = coded_block_pred(s, i, &coded_block);
  183. *coded_block = val;
  184. val = val ^ pred;
  185. }
  186. coded_cbp |= val << (5 - i);
  187. }
  188. #if 0
  189. if (coded_cbp)
  190. printf("cbp=%x %x\n", cbp, coded_cbp);
  191. #endif
  192. if (s->pict_type == I_TYPE) {
  193. set_stat(ST_INTRA_MB);
  194. put_bits(&s->pb,
  195. table_mb_intra[coded_cbp][1], table_mb_intra[coded_cbp][0]);
  196. } else {
  197. put_bits(&s->pb,
  198. wmv2_inter_table[w->cbp_table_index][cbp][1],
  199. wmv2_inter_table[w->cbp_table_index][cbp][0]);
  200. }
  201. set_stat(ST_INTRA_MB);
  202. put_bits(&s->pb, 1, 0); /* no AC prediction yet */
  203. if(s->inter_intra_pred){
  204. s->h263_aic_dir=0;
  205. put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
  206. }
  207. }
  208. for (i = 0; i < 6; i++) {
  209. msmpeg4_encode_block(s, block[i], i);
  210. }
  211. }
  212. static void parse_mb_skip(Wmv2Context * w){
  213. int mb_x, mb_y;
  214. MpegEncContext * const s= &w->s;
  215. w->skip_type= get_bits(&s->gb, 2);
  216. switch(w->skip_type){
  217. case SKIP_TYPE_NONE:
  218. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  219. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  220. s->mb_type[mb_y*s->mb_width + mb_x]= 0;
  221. }
  222. }
  223. break;
  224. case SKIP_TYPE_MPEG:
  225. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  226. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  227. s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
  228. }
  229. }
  230. break;
  231. case SKIP_TYPE_ROW:
  232. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  233. if(get_bits1(&s->gb)){
  234. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  235. s->mb_type[mb_y*s->mb_width + mb_x]= MB_TYPE_SKIPED;
  236. }
  237. }else{
  238. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  239. s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
  240. }
  241. }
  242. }
  243. break;
  244. case SKIP_TYPE_COL:
  245. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  246. if(get_bits1(&s->gb)){
  247. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  248. s->mb_type[mb_y*s->mb_width + mb_x]= MB_TYPE_SKIPED;
  249. }
  250. }else{
  251. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  252. s->mb_type[mb_y*s->mb_width + mb_x]= get_bits1(&s->gb) ? MB_TYPE_SKIPED : 0;
  253. }
  254. }
  255. }
  256. break;
  257. }
  258. }
  259. static int decode_ext_header(Wmv2Context *w){
  260. MpegEncContext * const s= &w->s;
  261. GetBitContext gb;
  262. int fps;
  263. int code;
  264. if(s->avctx->extradata_size<4) return -1;
  265. init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size);
  266. fps = get_bits(&gb, 5);
  267. s->bit_rate = get_bits(&gb, 11)*1024;
  268. w->mspel_bit = get_bits1(&gb);
  269. w->flag3 = get_bits1(&gb);
  270. w->abt_flag = get_bits1(&gb);
  271. w->j_type_bit = get_bits1(&gb);
  272. w->top_left_mv_flag= get_bits1(&gb);
  273. w->per_mb_rl_bit = get_bits1(&gb);
  274. code = get_bits(&gb, 3);
  275. if(code==0) return -1;
  276. s->slice_height = s->mb_height / code;
  277. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  278. printf("fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, flag3:%d\n",
  279. fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, w->flag3);
  280. }
  281. return 0;
  282. }
  283. int ff_wmv2_decode_picture_header(MpegEncContext * s)
  284. {
  285. Wmv2Context * const w= (Wmv2Context*)s;
  286. int code, i;
  287. #if 0
  288. {
  289. int i;
  290. for(i=0; i<s->gb.size*8; i++)
  291. printf("%d", get_bits1(&s->gb));
  292. // get_bits1(&s->gb);
  293. printf("END\n");
  294. return -1;
  295. }
  296. #endif
  297. if(s->picture_number==0)
  298. decode_ext_header(w);
  299. s->pict_type = get_bits(&s->gb, 1) + 1;
  300. if(s->pict_type == I_TYPE){
  301. code = get_bits(&s->gb, 7);
  302. printf("I7:%X/\n", code);
  303. }
  304. s->qscale = get_bits(&s->gb, 5);
  305. if (s->pict_type == I_TYPE) {
  306. if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
  307. else w->j_type= 0; //FIXME check
  308. if(!w->j_type){
  309. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  310. else s->per_mb_rl_table= 0;
  311. if(!s->per_mb_rl_table){
  312. s->rl_chroma_table_index = decode012(&s->gb);
  313. s->rl_table_index = decode012(&s->gb);
  314. }
  315. s->dc_table_index = get_bits1(&s->gb);
  316. }
  317. s->inter_intra_pred= 0;
  318. s->no_rounding = 1;
  319. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  320. printf("qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
  321. s->qscale,
  322. s->rl_chroma_table_index,
  323. s->rl_table_index,
  324. s->dc_table_index,
  325. s->per_mb_rl_table,
  326. w->j_type);
  327. }
  328. }else{
  329. int cbp_index;
  330. w->j_type=0;
  331. parse_mb_skip(w);
  332. cbp_index= decode012(&s->gb);
  333. if(s->qscale <= 10){
  334. int map[3]= {0,2,1};
  335. w->cbp_table_index= map[cbp_index];
  336. }else if(s->qscale <= 20){
  337. int map[3]= {1,0,2};
  338. w->cbp_table_index= map[cbp_index];
  339. }else{
  340. int map[3]= {2,1,0};
  341. w->cbp_table_index= map[cbp_index];
  342. }
  343. if(w->mspel_bit) s->mspel= get_bits1(&s->gb);
  344. else s->mspel= 0; //FIXME check
  345. if(w->abt_flag){
  346. w->per_mb_abt= get_bits1(&s->gb)^1;
  347. if(!w->per_mb_abt){
  348. w->abt_type= decode012(&s->gb);
  349. }
  350. }
  351. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  352. else s->per_mb_rl_table= 0;
  353. if(!s->per_mb_rl_table){
  354. s->rl_table_index = decode012(&s->gb);
  355. s->rl_chroma_table_index = s->rl_table_index;
  356. }
  357. s->dc_table_index = get_bits1(&s->gb);
  358. s->mv_table_index = get_bits1(&s->gb);
  359. s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  360. s->no_rounding ^= 1;
  361. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  362. printf("rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
  363. s->rl_table_index,
  364. s->rl_chroma_table_index,
  365. s->dc_table_index,
  366. s->mv_table_index,
  367. s->per_mb_rl_table,
  368. s->qscale,
  369. s->mspel,
  370. w->per_mb_abt,
  371. w->abt_type,
  372. w->cbp_table_index,
  373. s->inter_intra_pred);
  374. }
  375. }
  376. s->esc3_level_length= 0;
  377. s->esc3_run_length= 0;
  378. if(s->avctx->debug&FF_DEBUG_SKIP){
  379. for(i=0; i<s->mb_num; i++){
  380. if(i%s->mb_width==0) printf("\n");
  381. printf("%d", s->mb_type[i]);
  382. }
  383. }
  384. s->picture_number++; //FIXME ?
  385. // if(w->j_type)
  386. // return wmv2_decode_j_picture(w); //FIXME
  387. if(w->j_type){
  388. printf("J-type picture isnt supported\n");
  389. return -1;
  390. }
  391. return 0;
  392. }
  393. void ff_wmv2_decode_init(MpegEncContext *s){
  394. }
  395. static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){
  396. MpegEncContext * const s= &w->s;
  397. int ret;
  398. ret= msmpeg4_decode_motion(s, mx_ptr, my_ptr);
  399. if(ret<0) return -1;
  400. if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel)
  401. w->hshift= get_bits1(&s->gb);
  402. else
  403. w->hshift= 0;
  404. //printf("%d %d ", *mx_ptr, *my_ptr);
  405. return 0;
  406. }
  407. static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
  408. MpegEncContext * const s= &w->s;
  409. int xy, wrap, diff, type;
  410. INT16 *A, *B, *C, *mot_val;
  411. wrap = s->block_wrap[0];
  412. xy = s->block_index[0];
  413. mot_val = s->motion_val[xy];
  414. A = s->motion_val[xy - 1];
  415. B = s->motion_val[xy - wrap];
  416. C = s->motion_val[xy + 2 - wrap];
  417. diff= FFMAX(ABS(A[0] - B[0]), ABS(A[1] - B[1]));
  418. if(s->mb_x && s->mb_y && !s->mspel && w->top_left_mv_flag && diff >= 8)
  419. //FIXME top/left bit too if y=!0 && first_slice_line?
  420. type= get_bits1(&s->gb);
  421. else
  422. type= 2;
  423. if(type == 0){
  424. *px= A[0];
  425. *py= A[1];
  426. }else if(type == 1){
  427. *px= B[0];
  428. *py= B[1];
  429. }else{
  430. /* special case for first (slice) line */
  431. if (s->first_slice_line) {
  432. *px = A[0];
  433. *py = A[1];
  434. } else {
  435. *px = mid_pred(A[0], B[0], C[0]);
  436. *py = mid_pred(A[1], B[1], C[1]);
  437. }
  438. }
  439. return mot_val;
  440. }
  441. static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){
  442. MpegEncContext * const s= &w->s;
  443. static const int sub_cbp_table[3]= {2,3,1};
  444. int sub_cbp;
  445. if(!cbp){
  446. s->block_last_index[n] = -1;
  447. return 0;
  448. }
  449. if(w->per_block_abt)
  450. w->abt_type= decode012(&s->gb);
  451. #if 0
  452. if(w->per_block_abt)
  453. printf("B%d", w->abt_type);
  454. #endif
  455. w->abt_type_table[n]= w->abt_type;
  456. if(w->abt_type){
  457. // const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated;
  458. const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable;
  459. // const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable;
  460. sub_cbp= sub_cbp_table[ decode012(&s->gb) ];
  461. // printf("S%d", sub_cbp);
  462. if(sub_cbp&1){
  463. if (msmpeg4_decode_block(s, block, n, 1, scantable) < 0)
  464. return -1;
  465. }
  466. if(sub_cbp&2){
  467. if (msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0)
  468. return -1;
  469. }
  470. s->block_last_index[n] = 63;
  471. return 0;
  472. }else{
  473. return msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated);
  474. }
  475. }
  476. static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){
  477. MpegEncContext * const s= &w->s;
  478. uint8_t temp[2][64];
  479. int i;
  480. if(w->abt_type_table[n] && 0){
  481. int a,b;
  482. a= block1[0];
  483. b= w->abt_block2[n][0];
  484. block1[0]= a+b;
  485. w->abt_block2[n][0]= a-b;
  486. }
  487. switch(w->abt_type_table[n]){
  488. case 0:
  489. if (s->block_last_index[n] >= 0) {
  490. s->idct_add (dst, stride, block1);
  491. }
  492. break;
  493. case 1:
  494. simple_idct84_add(dst , stride, block1);
  495. simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]);
  496. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  497. break;
  498. case 2:
  499. simple_idct48_add(dst , stride, block1);
  500. simple_idct48_add(dst + 4 , stride, w->abt_block2[n]);
  501. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  502. break;
  503. default:
  504. fprintf(stderr, "internal error in WMV2 abt\n");
  505. }
  506. }
  507. void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){
  508. Wmv2Context * const w= (Wmv2Context*)s;
  509. wmv2_add_block(w, block1[0], dest_y , s->linesize, 0);
  510. wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1);
  511. wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2);
  512. wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3);
  513. if(s->flags&CODEC_FLAG_GRAY) return;
  514. wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4);
  515. wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5);
  516. }
  517. void ff_mspel_motion(MpegEncContext *s,
  518. UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
  519. UINT8 **ref_picture, op_pixels_func (*pix_op)[4],
  520. int motion_x, int motion_y, int h)
  521. {
  522. Wmv2Context * const w= (Wmv2Context*)s;
  523. UINT8 *ptr;
  524. int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize;
  525. int emu=0;
  526. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  527. dxy = 2*dxy + w->hshift;
  528. src_x = s->mb_x * 16 + (motion_x >> 1);
  529. src_y = s->mb_y * 16 + (motion_y >> 1);
  530. /* WARNING: do no forget half pels */
  531. v_edge_pos = s->v_edge_pos;
  532. src_x = clip(src_x, -16, s->width);
  533. src_y = clip(src_y, -16, s->height);
  534. linesize = s->linesize;
  535. uvlinesize = s->uvlinesize;
  536. ptr = ref_picture[0] + (src_y * linesize) + src_x;
  537. if(s->flags&CODEC_FLAG_EMU_EDGE){
  538. if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
  539. || src_y + h+1 >= v_edge_pos){
  540. ff_emulated_edge_mc(s, ptr - 1 - s->linesize, s->linesize, 19, 19,
  541. src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
  542. ptr= s->edge_emu_buffer + 1 + s->linesize;
  543. emu=1;
  544. }
  545. }
  546. s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize);
  547. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize);
  548. s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize);
  549. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize);
  550. if(s->flags&CODEC_FLAG_GRAY) return;
  551. if (s->out_format == FMT_H263) {
  552. dxy = 0;
  553. if ((motion_x & 3) != 0)
  554. dxy |= 1;
  555. if ((motion_y & 3) != 0)
  556. dxy |= 2;
  557. mx = motion_x >> 2;
  558. my = motion_y >> 2;
  559. } else {
  560. mx = motion_x / 2;
  561. my = motion_y / 2;
  562. dxy = ((my & 1) << 1) | (mx & 1);
  563. mx >>= 1;
  564. my >>= 1;
  565. }
  566. src_x = s->mb_x * 8 + mx;
  567. src_y = s->mb_y * 8 + my;
  568. src_x = clip(src_x, -8, s->width >> 1);
  569. if (src_x == (s->width >> 1))
  570. dxy &= ~1;
  571. src_y = clip(src_y, -8, s->height >> 1);
  572. if (src_y == (s->height >> 1))
  573. dxy &= ~2;
  574. offset = (src_y * uvlinesize) + src_x;
  575. ptr = ref_picture[1] + offset;
  576. if(emu){
  577. ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9,
  578. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  579. ptr= s->edge_emu_buffer;
  580. }
  581. pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
  582. ptr = ref_picture[2] + offset;
  583. if(emu){
  584. ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9,
  585. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  586. ptr= s->edge_emu_buffer;
  587. }
  588. pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
  589. }
  590. static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
  591. {
  592. Wmv2Context * const w= (Wmv2Context*)s;
  593. int cbp, code, i;
  594. UINT8 *coded_val;
  595. if(w->j_type) return 0;
  596. s->error_status_table[s->mb_x + s->mb_y*s->mb_width]= 0;
  597. if (s->pict_type == P_TYPE) {
  598. if(s->mb_type[s->mb_y * s->mb_width + s->mb_x]&MB_TYPE_SKIPED){
  599. /* skip mb */
  600. s->mb_intra = 0;
  601. for(i=0;i<6;i++)
  602. s->block_last_index[i] = -1;
  603. s->mv_dir = MV_DIR_FORWARD;
  604. s->mv_type = MV_TYPE_16X16;
  605. s->mv[0][0][0] = 0;
  606. s->mv[0][0][1] = 0;
  607. s->mb_skiped = 1;
  608. return 0;
  609. }
  610. code = get_vlc2(&s->gb, mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3);
  611. if (code < 0)
  612. return -1;
  613. s->mb_intra = (~code & 0x40) >> 6;
  614. cbp = code & 0x3f;
  615. } else {
  616. s->mb_intra = 1;
  617. code = get_vlc2(&s->gb, mb_intra_vlc.table, MB_INTRA_VLC_BITS, 2);
  618. if (code < 0){
  619. fprintf(stderr, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y);
  620. return -1;
  621. }
  622. /* predict coded block pattern */
  623. cbp = 0;
  624. for(i=0;i<6;i++) {
  625. int val = ((code >> (5 - i)) & 1);
  626. if (i < 4) {
  627. int pred = coded_block_pred(s, i, &coded_val);
  628. val = val ^ pred;
  629. *coded_val = val;
  630. }
  631. cbp |= val << (5 - i);
  632. }
  633. }
  634. if (!s->mb_intra) {
  635. int mx, my;
  636. //printf("P at %d %d\n", s->mb_x, s->mb_y);
  637. wmv2_pred_motion(w, &mx, &my);
  638. if(cbp){
  639. if(s->per_mb_rl_table){
  640. s->rl_table_index = decode012(&s->gb);
  641. s->rl_chroma_table_index = s->rl_table_index;
  642. }
  643. if(w->abt_flag && w->per_mb_abt){
  644. w->per_block_abt= get_bits1(&s->gb);
  645. if(!w->per_block_abt)
  646. w->abt_type= decode012(&s->gb);
  647. }else
  648. w->per_block_abt=0;
  649. }
  650. if (wmv2_decode_motion(w, &mx, &my) < 0)
  651. return -1;
  652. s->mv_dir = MV_DIR_FORWARD;
  653. s->mv_type = MV_TYPE_16X16;
  654. s->mv[0][0][0] = mx;
  655. s->mv[0][0][1] = my;
  656. for (i = 0; i < 6; i++) {
  657. if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0)
  658. {
  659. fprintf(stderr,"\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  660. return -1;
  661. }
  662. }
  663. } else {
  664. //if(s->pict_type==P_TYPE)
  665. // printf("%d%d ", s->inter_intra_pred, cbp);
  666. //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
  667. s->ac_pred = get_bits1(&s->gb);
  668. if(s->inter_intra_pred){
  669. s->h263_aic_dir= get_vlc2(&s->gb, inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
  670. // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
  671. }
  672. if(s->per_mb_rl_table && cbp){
  673. s->rl_table_index = decode012(&s->gb);
  674. s->rl_chroma_table_index = s->rl_table_index;
  675. }
  676. for (i = 0; i < 6; i++) {
  677. if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
  678. {
  679. fprintf(stderr,"\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  680. return -1;
  681. }
  682. }
  683. }
  684. return 0;
  685. }
  686. static int wmv2_decode_init(AVCodecContext *avctx){
  687. Wmv2Context * const w= avctx->priv_data;
  688. if(ff_h263_decode_init(avctx) < 0)
  689. return -1;
  690. wmv2_common_init(w);
  691. return 0;
  692. }
  693. AVCodec wmv2_decoder = {
  694. "wmv2",
  695. CODEC_TYPE_VIDEO,
  696. CODEC_ID_WMV2,
  697. sizeof(Wmv2Context),
  698. wmv2_decode_init,
  699. NULL,
  700. ff_h263_decode_end,
  701. ff_h263_decode_frame,
  702. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
  703. };
  704. AVCodec wmv2_encoder = {
  705. "wmv2",
  706. CODEC_TYPE_VIDEO,
  707. CODEC_ID_WMV2,
  708. sizeof(Wmv2Context),
  709. wmv2_encode_init,
  710. MPV_encode_picture,
  711. MPV_encode_end,
  712. };