You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

862 lines
25KB

  1. /*
  2. * Copyright (c) 2002 The FFmpeg Project.
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. */
  21. /**
  22. * @file wmv2.c
  23. * wmv2 codec.
  24. */
  25. #include "simple_idct.h"
  26. #define SKIP_TYPE_NONE 0
  27. #define SKIP_TYPE_MPEG 1
  28. #define SKIP_TYPE_ROW 2
  29. #define SKIP_TYPE_COL 3
  30. typedef struct Wmv2Context{
  31. MpegEncContext s;
  32. int j_type_bit;
  33. int j_type;
  34. int flag3;
  35. int flag63;
  36. int abt_flag;
  37. int abt_type;
  38. int abt_type_table[6];
  39. int per_mb_abt;
  40. int per_block_abt;
  41. int mspel_bit;
  42. int cbp_table_index;
  43. int top_left_mv_flag;
  44. int per_mb_rl_bit;
  45. int skip_type;
  46. int hshift;
  47. ScanTable abt_scantable[2];
  48. DECLARE_ALIGNED_8(DCTELEM, abt_block2[6][64]);
  49. }Wmv2Context;
  50. static void wmv2_common_init(Wmv2Context * w){
  51. MpegEncContext * const s= &w->s;
  52. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], wmv2_scantableA);
  53. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB);
  54. }
  55. #ifdef CONFIG_ENCODERS
  56. static int encode_ext_header(Wmv2Context *w){
  57. MpegEncContext * const s= &w->s;
  58. PutBitContext pb;
  59. int code;
  60. init_put_bits(&pb, s->avctx->extradata, s->avctx->extradata_size);
  61. put_bits(&pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
  62. put_bits(&pb, 11, FFMIN(s->bit_rate/1024, 2047));
  63. put_bits(&pb, 1, w->mspel_bit=1);
  64. put_bits(&pb, 1, w->flag3=1);
  65. put_bits(&pb, 1, w->abt_flag=1);
  66. put_bits(&pb, 1, w->j_type_bit=1);
  67. put_bits(&pb, 1, w->top_left_mv_flag=0);
  68. put_bits(&pb, 1, w->per_mb_rl_bit=1);
  69. put_bits(&pb, 3, code=1);
  70. flush_put_bits(&pb);
  71. s->slice_height = s->mb_height / code;
  72. return 0;
  73. }
  74. static int wmv2_encode_init(AVCodecContext *avctx){
  75. Wmv2Context * const w= avctx->priv_data;
  76. if(MPV_encode_init(avctx) < 0)
  77. return -1;
  78. wmv2_common_init(w);
  79. avctx->extradata_size= 4;
  80. avctx->extradata= av_mallocz(avctx->extradata_size + 10);
  81. encode_ext_header(w);
  82. return 0;
  83. }
  84. #if 0 /* unused, remove? */
  85. static int wmv2_encode_end(AVCodecContext *avctx){
  86. if(MPV_encode_end(avctx) < 0)
  87. return -1;
  88. avctx->extradata_size= 0;
  89. av_freep(&avctx->extradata);
  90. return 0;
  91. }
  92. #endif
  93. int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
  94. {
  95. Wmv2Context * const w= (Wmv2Context*)s;
  96. put_bits(&s->pb, 1, s->pict_type - 1);
  97. if(s->pict_type == I_TYPE){
  98. put_bits(&s->pb, 7, 0);
  99. }
  100. put_bits(&s->pb, 5, s->qscale);
  101. s->dc_table_index = 1;
  102. s->mv_table_index = 1; /* only if P frame */
  103. // s->use_skip_mb_code = 1; /* only if P frame */
  104. s->per_mb_rl_table = 0;
  105. s->mspel= 0;
  106. w->per_mb_abt=0;
  107. w->abt_type=0;
  108. w->j_type=0;
  109. assert(s->flipflop_rounding);
  110. if (s->pict_type == I_TYPE) {
  111. assert(s->no_rounding==1);
  112. if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
  113. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  114. if(!s->per_mb_rl_table){
  115. code012(&s->pb, s->rl_chroma_table_index);
  116. code012(&s->pb, s->rl_table_index);
  117. }
  118. put_bits(&s->pb, 1, s->dc_table_index);
  119. s->inter_intra_pred= 0;
  120. }else{
  121. int cbp_index;
  122. put_bits(&s->pb, 2, SKIP_TYPE_NONE);
  123. code012(&s->pb, cbp_index=0);
  124. if(s->qscale <= 10){
  125. int map[3]= {0,2,1};
  126. w->cbp_table_index= map[cbp_index];
  127. }else if(s->qscale <= 20){
  128. int map[3]= {1,0,2};
  129. w->cbp_table_index= map[cbp_index];
  130. }else{
  131. int map[3]= {2,1,0};
  132. w->cbp_table_index= map[cbp_index];
  133. }
  134. if(w->mspel_bit) put_bits(&s->pb, 1, s->mspel);
  135. if(w->abt_flag){
  136. put_bits(&s->pb, 1, w->per_mb_abt^1);
  137. if(!w->per_mb_abt){
  138. code012(&s->pb, w->abt_type);
  139. }
  140. }
  141. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  142. if(!s->per_mb_rl_table){
  143. code012(&s->pb, s->rl_table_index);
  144. s->rl_chroma_table_index = s->rl_table_index;
  145. }
  146. put_bits(&s->pb, 1, s->dc_table_index);
  147. put_bits(&s->pb, 1, s->mv_table_index);
  148. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  149. }
  150. s->esc3_level_length= 0;
  151. s->esc3_run_length= 0;
  152. return 0;
  153. }
  154. // nearly idential to wmv1 but thats just because we dont use the useless M$ crap features
  155. // its duplicated here in case someone wants to add support for these carp features
  156. void ff_wmv2_encode_mb(MpegEncContext * s,
  157. DCTELEM block[6][64],
  158. int motion_x, int motion_y)
  159. {
  160. Wmv2Context * const w= (Wmv2Context*)s;
  161. int cbp, coded_cbp, i;
  162. int pred_x, pred_y;
  163. uint8_t *coded_block;
  164. handle_slices(s);
  165. if (!s->mb_intra) {
  166. /* compute cbp */
  167. cbp = 0;
  168. for (i = 0; i < 6; i++) {
  169. if (s->block_last_index[i] >= 0)
  170. cbp |= 1 << (5 - i);
  171. }
  172. put_bits(&s->pb,
  173. wmv2_inter_table[w->cbp_table_index][cbp + 64][1],
  174. wmv2_inter_table[w->cbp_table_index][cbp + 64][0]);
  175. /* motion vector */
  176. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  177. msmpeg4_encode_motion(s, motion_x - pred_x,
  178. motion_y - pred_y);
  179. } else {
  180. /* compute cbp */
  181. cbp = 0;
  182. coded_cbp = 0;
  183. for (i = 0; i < 6; i++) {
  184. int val, pred;
  185. val = (s->block_last_index[i] >= 1);
  186. cbp |= val << (5 - i);
  187. if (i < 4) {
  188. /* predict value for close blocks only for luma */
  189. pred = coded_block_pred(s, i, &coded_block);
  190. *coded_block = val;
  191. val = val ^ pred;
  192. }
  193. coded_cbp |= val << (5 - i);
  194. }
  195. #if 0
  196. if (coded_cbp)
  197. printf("cbp=%x %x\n", cbp, coded_cbp);
  198. #endif
  199. if (s->pict_type == I_TYPE) {
  200. put_bits(&s->pb,
  201. ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
  202. } else {
  203. put_bits(&s->pb,
  204. wmv2_inter_table[w->cbp_table_index][cbp][1],
  205. wmv2_inter_table[w->cbp_table_index][cbp][0]);
  206. }
  207. put_bits(&s->pb, 1, 0); /* no AC prediction yet */
  208. if(s->inter_intra_pred){
  209. s->h263_aic_dir=0;
  210. put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
  211. }
  212. }
  213. for (i = 0; i < 6; i++) {
  214. msmpeg4_encode_block(s, block[i], i);
  215. }
  216. }
  217. #endif //CONFIG_ENCODERS
  218. static void parse_mb_skip(Wmv2Context * w){
  219. int mb_x, mb_y;
  220. MpegEncContext * const s= &w->s;
  221. uint32_t * const mb_type= s->current_picture_ptr->mb_type;
  222. w->skip_type= get_bits(&s->gb, 2);
  223. switch(w->skip_type){
  224. case SKIP_TYPE_NONE:
  225. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  226. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  227. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0;
  228. }
  229. }
  230. break;
  231. case SKIP_TYPE_MPEG:
  232. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  233. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  234. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  235. }
  236. }
  237. break;
  238. case SKIP_TYPE_ROW:
  239. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  240. if(get_bits1(&s->gb)){
  241. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  242. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  243. }
  244. }else{
  245. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  246. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  247. }
  248. }
  249. }
  250. break;
  251. case SKIP_TYPE_COL:
  252. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  253. if(get_bits1(&s->gb)){
  254. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  255. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  256. }
  257. }else{
  258. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  259. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  260. }
  261. }
  262. }
  263. break;
  264. }
  265. }
  266. static int decode_ext_header(Wmv2Context *w){
  267. MpegEncContext * const s= &w->s;
  268. GetBitContext gb;
  269. int fps;
  270. int code;
  271. if(s->avctx->extradata_size<4) return -1;
  272. init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
  273. fps = get_bits(&gb, 5);
  274. s->bit_rate = get_bits(&gb, 11)*1024;
  275. w->mspel_bit = get_bits1(&gb);
  276. w->flag3 = get_bits1(&gb);
  277. w->abt_flag = get_bits1(&gb);
  278. w->j_type_bit = get_bits1(&gb);
  279. w->top_left_mv_flag= get_bits1(&gb);
  280. w->per_mb_rl_bit = get_bits1(&gb);
  281. code = get_bits(&gb, 3);
  282. if(code==0) return -1;
  283. s->slice_height = s->mb_height / code;
  284. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  285. av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, flag3:%d, slices:%d\n",
  286. fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, w->flag3,
  287. code);
  288. }
  289. return 0;
  290. }
  291. int ff_wmv2_decode_picture_header(MpegEncContext * s)
  292. {
  293. Wmv2Context * const w= (Wmv2Context*)s;
  294. int code;
  295. #if 0
  296. {
  297. int i;
  298. for(i=0; i<s->gb.size*8; i++)
  299. printf("%d", get_bits1(&s->gb));
  300. // get_bits1(&s->gb);
  301. printf("END\n");
  302. return -1;
  303. }
  304. #endif
  305. if(s->picture_number==0)
  306. decode_ext_header(w);
  307. s->pict_type = get_bits(&s->gb, 1) + 1;
  308. if(s->pict_type == I_TYPE){
  309. code = get_bits(&s->gb, 7);
  310. av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code);
  311. }
  312. s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
  313. if(s->qscale < 0)
  314. return -1;
  315. return 0;
  316. }
  317. int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
  318. {
  319. Wmv2Context * const w= (Wmv2Context*)s;
  320. if (s->pict_type == I_TYPE) {
  321. if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
  322. else w->j_type= 0; //FIXME check
  323. if(!w->j_type){
  324. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  325. else s->per_mb_rl_table= 0;
  326. if(!s->per_mb_rl_table){
  327. s->rl_chroma_table_index = decode012(&s->gb);
  328. s->rl_table_index = decode012(&s->gb);
  329. }
  330. s->dc_table_index = get_bits1(&s->gb);
  331. }
  332. s->inter_intra_pred= 0;
  333. s->no_rounding = 1;
  334. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  335. av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
  336. s->qscale,
  337. s->rl_chroma_table_index,
  338. s->rl_table_index,
  339. s->dc_table_index,
  340. s->per_mb_rl_table,
  341. w->j_type);
  342. }
  343. }else{
  344. int cbp_index;
  345. w->j_type=0;
  346. parse_mb_skip(w);
  347. cbp_index= decode012(&s->gb);
  348. if(s->qscale <= 10){
  349. int map[3]= {0,2,1};
  350. w->cbp_table_index= map[cbp_index];
  351. }else if(s->qscale <= 20){
  352. int map[3]= {1,0,2};
  353. w->cbp_table_index= map[cbp_index];
  354. }else{
  355. int map[3]= {2,1,0};
  356. w->cbp_table_index= map[cbp_index];
  357. }
  358. if(w->mspel_bit) s->mspel= get_bits1(&s->gb);
  359. else s->mspel= 0; //FIXME check
  360. if(w->abt_flag){
  361. w->per_mb_abt= get_bits1(&s->gb)^1;
  362. if(!w->per_mb_abt){
  363. w->abt_type= decode012(&s->gb);
  364. }
  365. }
  366. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  367. else s->per_mb_rl_table= 0;
  368. if(!s->per_mb_rl_table){
  369. s->rl_table_index = decode012(&s->gb);
  370. s->rl_chroma_table_index = s->rl_table_index;
  371. }
  372. s->dc_table_index = get_bits1(&s->gb);
  373. s->mv_table_index = get_bits1(&s->gb);
  374. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  375. s->no_rounding ^= 1;
  376. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  377. av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
  378. s->rl_table_index,
  379. s->rl_chroma_table_index,
  380. s->dc_table_index,
  381. s->mv_table_index,
  382. s->per_mb_rl_table,
  383. s->qscale,
  384. s->mspel,
  385. w->per_mb_abt,
  386. w->abt_type,
  387. w->cbp_table_index,
  388. s->inter_intra_pred);
  389. }
  390. }
  391. s->esc3_level_length= 0;
  392. s->esc3_run_length= 0;
  393. s->picture_number++; //FIXME ?
  394. // if(w->j_type)
  395. // return wmv2_decode_j_picture(w); //FIXME
  396. if(w->j_type){
  397. av_log(s->avctx, AV_LOG_ERROR, "J-type picture is not supported\n");
  398. return -1;
  399. }
  400. return 0;
  401. }
  402. static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){
  403. MpegEncContext * const s= &w->s;
  404. int ret;
  405. ret= msmpeg4_decode_motion(s, mx_ptr, my_ptr);
  406. if(ret<0) return -1;
  407. if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel)
  408. w->hshift= get_bits1(&s->gb);
  409. else
  410. w->hshift= 0;
  411. //printf("%d %d ", *mx_ptr, *my_ptr);
  412. return 0;
  413. }
  414. static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
  415. MpegEncContext * const s= &w->s;
  416. int xy, wrap, diff, type;
  417. int16_t *A, *B, *C, *mot_val;
  418. wrap = s->b8_stride;
  419. xy = s->block_index[0];
  420. mot_val = s->current_picture.motion_val[0][xy];
  421. A = s->current_picture.motion_val[0][xy - 1];
  422. B = s->current_picture.motion_val[0][xy - wrap];
  423. C = s->current_picture.motion_val[0][xy + 2 - wrap];
  424. if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
  425. diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
  426. else
  427. diff=0;
  428. if(diff >= 8)
  429. type= get_bits1(&s->gb);
  430. else
  431. type= 2;
  432. if(type == 0){
  433. *px= A[0];
  434. *py= A[1];
  435. }else if(type == 1){
  436. *px= B[0];
  437. *py= B[1];
  438. }else{
  439. /* special case for first (slice) line */
  440. if (s->first_slice_line) {
  441. *px = A[0];
  442. *py = A[1];
  443. } else {
  444. *px = mid_pred(A[0], B[0], C[0]);
  445. *py = mid_pred(A[1], B[1], C[1]);
  446. }
  447. }
  448. return mot_val;
  449. }
  450. static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){
  451. MpegEncContext * const s= &w->s;
  452. static const int sub_cbp_table[3]= {2,3,1};
  453. int sub_cbp;
  454. if(!cbp){
  455. s->block_last_index[n] = -1;
  456. return 0;
  457. }
  458. if(w->per_block_abt)
  459. w->abt_type= decode012(&s->gb);
  460. #if 0
  461. if(w->per_block_abt)
  462. printf("B%d", w->abt_type);
  463. #endif
  464. w->abt_type_table[n]= w->abt_type;
  465. if(w->abt_type){
  466. // const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated;
  467. const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable;
  468. // const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable;
  469. sub_cbp= sub_cbp_table[ decode012(&s->gb) ];
  470. // printf("S%d", sub_cbp);
  471. if(sub_cbp&1){
  472. if (msmpeg4_decode_block(s, block, n, 1, scantable) < 0)
  473. return -1;
  474. }
  475. if(sub_cbp&2){
  476. if (msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0)
  477. return -1;
  478. }
  479. s->block_last_index[n] = 63;
  480. return 0;
  481. }else{
  482. return msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated);
  483. }
  484. }
  485. static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){
  486. MpegEncContext * const s= &w->s;
  487. if (s->block_last_index[n] >= 0) {
  488. switch(w->abt_type_table[n]){
  489. case 0:
  490. s->dsp.idct_add (dst, stride, block1);
  491. break;
  492. case 1:
  493. simple_idct84_add(dst , stride, block1);
  494. simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]);
  495. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  496. break;
  497. case 2:
  498. simple_idct48_add(dst , stride, block1);
  499. simple_idct48_add(dst + 4 , stride, w->abt_block2[n]);
  500. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  501. break;
  502. default:
  503. av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n");
  504. }
  505. }
  506. }
  507. void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){
  508. Wmv2Context * const w= (Wmv2Context*)s;
  509. wmv2_add_block(w, block1[0], dest_y , s->linesize, 0);
  510. wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1);
  511. wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2);
  512. wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3);
  513. if(s->flags&CODEC_FLAG_GRAY) return;
  514. wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4);
  515. wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5);
  516. }
  517. void ff_mspel_motion(MpegEncContext *s,
  518. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  519. uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  520. int motion_x, int motion_y, int h)
  521. {
  522. Wmv2Context * const w= (Wmv2Context*)s;
  523. uint8_t *ptr;
  524. int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize;
  525. int emu=0;
  526. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  527. dxy = 2*dxy + w->hshift;
  528. src_x = s->mb_x * 16 + (motion_x >> 1);
  529. src_y = s->mb_y * 16 + (motion_y >> 1);
  530. /* WARNING: do no forget half pels */
  531. v_edge_pos = s->v_edge_pos;
  532. src_x = av_clip(src_x, -16, s->width);
  533. src_y = av_clip(src_y, -16, s->height);
  534. if(src_x<=-16 || src_x >= s->width)
  535. dxy &= ~3;
  536. if(src_y<=-16 || src_y >= s->height)
  537. dxy &= ~4;
  538. linesize = s->linesize;
  539. uvlinesize = s->uvlinesize;
  540. ptr = ref_picture[0] + (src_y * linesize) + src_x;
  541. if(s->flags&CODEC_FLAG_EMU_EDGE){
  542. if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
  543. || src_y + h+1 >= v_edge_pos){
  544. ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
  545. src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
  546. ptr= s->edge_emu_buffer + 1 + s->linesize;
  547. emu=1;
  548. }
  549. }
  550. s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize);
  551. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize);
  552. s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize);
  553. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize);
  554. if(s->flags&CODEC_FLAG_GRAY) return;
  555. if (s->out_format == FMT_H263) {
  556. dxy = 0;
  557. if ((motion_x & 3) != 0)
  558. dxy |= 1;
  559. if ((motion_y & 3) != 0)
  560. dxy |= 2;
  561. mx = motion_x >> 2;
  562. my = motion_y >> 2;
  563. } else {
  564. mx = motion_x / 2;
  565. my = motion_y / 2;
  566. dxy = ((my & 1) << 1) | (mx & 1);
  567. mx >>= 1;
  568. my >>= 1;
  569. }
  570. src_x = s->mb_x * 8 + mx;
  571. src_y = s->mb_y * 8 + my;
  572. src_x = av_clip(src_x, -8, s->width >> 1);
  573. if (src_x == (s->width >> 1))
  574. dxy &= ~1;
  575. src_y = av_clip(src_y, -8, s->height >> 1);
  576. if (src_y == (s->height >> 1))
  577. dxy &= ~2;
  578. offset = (src_y * uvlinesize) + src_x;
  579. ptr = ref_picture[1] + offset;
  580. if(emu){
  581. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  582. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  583. ptr= s->edge_emu_buffer;
  584. }
  585. pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
  586. ptr = ref_picture[2] + offset;
  587. if(emu){
  588. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  589. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  590. ptr= s->edge_emu_buffer;
  591. }
  592. pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
  593. }
  594. static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
  595. {
  596. Wmv2Context * const w= (Wmv2Context*)s;
  597. int cbp, code, i;
  598. uint8_t *coded_val;
  599. if(w->j_type) return 0;
  600. if (s->pict_type == P_TYPE) {
  601. if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
  602. /* skip mb */
  603. s->mb_intra = 0;
  604. for(i=0;i<6;i++)
  605. s->block_last_index[i] = -1;
  606. s->mv_dir = MV_DIR_FORWARD;
  607. s->mv_type = MV_TYPE_16X16;
  608. s->mv[0][0][0] = 0;
  609. s->mv[0][0][1] = 0;
  610. s->mb_skipped = 1;
  611. w->hshift=0;
  612. return 0;
  613. }
  614. code = get_vlc2(&s->gb, mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3);
  615. if (code < 0)
  616. return -1;
  617. s->mb_intra = (~code & 0x40) >> 6;
  618. cbp = code & 0x3f;
  619. } else {
  620. s->mb_intra = 1;
  621. code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  622. if (code < 0){
  623. av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y);
  624. return -1;
  625. }
  626. /* predict coded block pattern */
  627. cbp = 0;
  628. for(i=0;i<6;i++) {
  629. int val = ((code >> (5 - i)) & 1);
  630. if (i < 4) {
  631. int pred = coded_block_pred(s, i, &coded_val);
  632. val = val ^ pred;
  633. *coded_val = val;
  634. }
  635. cbp |= val << (5 - i);
  636. }
  637. }
  638. if (!s->mb_intra) {
  639. int mx, my;
  640. //printf("P at %d %d\n", s->mb_x, s->mb_y);
  641. wmv2_pred_motion(w, &mx, &my);
  642. if(cbp){
  643. s->dsp.clear_blocks(s->block[0]);
  644. if(s->per_mb_rl_table){
  645. s->rl_table_index = decode012(&s->gb);
  646. s->rl_chroma_table_index = s->rl_table_index;
  647. }
  648. if(w->abt_flag && w->per_mb_abt){
  649. w->per_block_abt= get_bits1(&s->gb);
  650. if(!w->per_block_abt)
  651. w->abt_type= decode012(&s->gb);
  652. }else
  653. w->per_block_abt=0;
  654. }
  655. if (wmv2_decode_motion(w, &mx, &my) < 0)
  656. return -1;
  657. s->mv_dir = MV_DIR_FORWARD;
  658. s->mv_type = MV_TYPE_16X16;
  659. s->mv[0][0][0] = mx;
  660. s->mv[0][0][1] = my;
  661. for (i = 0; i < 6; i++) {
  662. if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0)
  663. {
  664. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  665. return -1;
  666. }
  667. }
  668. } else {
  669. //if(s->pict_type==P_TYPE)
  670. // printf("%d%d ", s->inter_intra_pred, cbp);
  671. //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
  672. s->ac_pred = get_bits1(&s->gb);
  673. if(s->inter_intra_pred){
  674. s->h263_aic_dir= get_vlc2(&s->gb, inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
  675. // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
  676. }
  677. if(s->per_mb_rl_table && cbp){
  678. s->rl_table_index = decode012(&s->gb);
  679. s->rl_chroma_table_index = s->rl_table_index;
  680. }
  681. s->dsp.clear_blocks(s->block[0]);
  682. for (i = 0; i < 6; i++) {
  683. if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
  684. {
  685. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  686. return -1;
  687. }
  688. }
  689. }
  690. return 0;
  691. }
  692. static int wmv2_decode_init(AVCodecContext *avctx){
  693. Wmv2Context * const w= avctx->priv_data;
  694. if(ff_h263_decode_init(avctx) < 0)
  695. return -1;
  696. wmv2_common_init(w);
  697. return 0;
  698. }
  699. AVCodec wmv2_decoder = {
  700. "wmv2",
  701. CODEC_TYPE_VIDEO,
  702. CODEC_ID_WMV2,
  703. sizeof(Wmv2Context),
  704. wmv2_decode_init,
  705. NULL,
  706. ff_h263_decode_end,
  707. ff_h263_decode_frame,
  708. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
  709. };
  710. #ifdef CONFIG_ENCODERS
  711. AVCodec wmv2_encoder = {
  712. "wmv2",
  713. CODEC_TYPE_VIDEO,
  714. CODEC_ID_WMV2,
  715. sizeof(Wmv2Context),
  716. wmv2_encode_init,
  717. MPV_encode_picture,
  718. MPV_encode_end,
  719. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  720. };
  721. #endif