You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

872 lines
25KB

  1. /*
  2. * Copyright (c) 2002 The FFmpeg Project.
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file wmv2.c
  22. * wmv2 codec.
  23. */
  24. #include "avcodec.h"
  25. #include "dsputil.h"
  26. #include "mpegvideo.h"
  27. #include "msmpeg4.h"
  28. #include "msmpeg4data.h"
  29. #include "simple_idct.h"
  30. #include "intrax8.h"
  31. #define SKIP_TYPE_NONE 0
  32. #define SKIP_TYPE_MPEG 1
  33. #define SKIP_TYPE_ROW 2
  34. #define SKIP_TYPE_COL 3
  35. typedef struct Wmv2Context{
  36. MpegEncContext s;
  37. IntraX8Context x8;
  38. int j_type_bit;
  39. int j_type;
  40. int abt_flag;
  41. int abt_type;
  42. int abt_type_table[6];
  43. int per_mb_abt;
  44. int per_block_abt;
  45. int mspel_bit;
  46. int cbp_table_index;
  47. int top_left_mv_flag;
  48. int per_mb_rl_bit;
  49. int skip_type;
  50. int hshift;
  51. ScanTable abt_scantable[2];
  52. DECLARE_ALIGNED_8(DCTELEM, abt_block2[6][64]);
  53. }Wmv2Context;
  54. static void wmv2_common_init(Wmv2Context * w){
  55. MpegEncContext * const s= &w->s;
  56. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[0], wmv2_scantableA);
  57. ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB);
  58. }
  59. #ifdef CONFIG_WMV2_ENCODER
  60. static int encode_ext_header(Wmv2Context *w){
  61. MpegEncContext * const s= &w->s;
  62. PutBitContext pb;
  63. int code;
  64. init_put_bits(&pb, s->avctx->extradata, s->avctx->extradata_size);
  65. put_bits(&pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
  66. put_bits(&pb, 11, FFMIN(s->bit_rate/1024, 2047));
  67. put_bits(&pb, 1, w->mspel_bit=1);
  68. put_bits(&pb, 1, s->loop_filter);
  69. put_bits(&pb, 1, w->abt_flag=1);
  70. put_bits(&pb, 1, w->j_type_bit=1);
  71. put_bits(&pb, 1, w->top_left_mv_flag=0);
  72. put_bits(&pb, 1, w->per_mb_rl_bit=1);
  73. put_bits(&pb, 3, code=1);
  74. flush_put_bits(&pb);
  75. s->slice_height = s->mb_height / code;
  76. return 0;
  77. }
  78. static int wmv2_encode_init(AVCodecContext *avctx){
  79. Wmv2Context * const w= avctx->priv_data;
  80. if(MPV_encode_init(avctx) < 0)
  81. return -1;
  82. wmv2_common_init(w);
  83. avctx->extradata_size= 4;
  84. avctx->extradata= av_mallocz(avctx->extradata_size + 10);
  85. encode_ext_header(w);
  86. return 0;
  87. }
  88. #if 0 /* unused, remove? */
  89. static int wmv2_encode_end(AVCodecContext *avctx){
  90. if(MPV_encode_end(avctx) < 0)
  91. return -1;
  92. avctx->extradata_size= 0;
  93. av_freep(&avctx->extradata);
  94. return 0;
  95. }
  96. #endif
  97. int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
  98. {
  99. Wmv2Context * const w= (Wmv2Context*)s;
  100. put_bits(&s->pb, 1, s->pict_type - 1);
  101. if(s->pict_type == I_TYPE){
  102. put_bits(&s->pb, 7, 0);
  103. }
  104. put_bits(&s->pb, 5, s->qscale);
  105. s->dc_table_index = 1;
  106. s->mv_table_index = 1; /* only if P frame */
  107. // s->use_skip_mb_code = 1; /* only if P frame */
  108. s->per_mb_rl_table = 0;
  109. s->mspel= 0;
  110. w->per_mb_abt=0;
  111. w->abt_type=0;
  112. w->j_type=0;
  113. assert(s->flipflop_rounding);
  114. if (s->pict_type == I_TYPE) {
  115. assert(s->no_rounding==1);
  116. if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
  117. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  118. if(!s->per_mb_rl_table){
  119. ff_msmpeg4_code012(&s->pb, s->rl_chroma_table_index);
  120. ff_msmpeg4_code012(&s->pb, s->rl_table_index);
  121. }
  122. put_bits(&s->pb, 1, s->dc_table_index);
  123. s->inter_intra_pred= 0;
  124. }else{
  125. int cbp_index;
  126. put_bits(&s->pb, 2, SKIP_TYPE_NONE);
  127. ff_msmpeg4_code012(&s->pb, cbp_index=0);
  128. if(s->qscale <= 10){
  129. int map[3]= {0,2,1};
  130. w->cbp_table_index= map[cbp_index];
  131. }else if(s->qscale <= 20){
  132. int map[3]= {1,0,2};
  133. w->cbp_table_index= map[cbp_index];
  134. }else{
  135. int map[3]= {2,1,0};
  136. w->cbp_table_index= map[cbp_index];
  137. }
  138. if(w->mspel_bit) put_bits(&s->pb, 1, s->mspel);
  139. if(w->abt_flag){
  140. put_bits(&s->pb, 1, w->per_mb_abt^1);
  141. if(!w->per_mb_abt){
  142. ff_msmpeg4_code012(&s->pb, w->abt_type);
  143. }
  144. }
  145. if(w->per_mb_rl_bit) put_bits(&s->pb, 1, s->per_mb_rl_table);
  146. if(!s->per_mb_rl_table){
  147. ff_msmpeg4_code012(&s->pb, s->rl_table_index);
  148. s->rl_chroma_table_index = s->rl_table_index;
  149. }
  150. put_bits(&s->pb, 1, s->dc_table_index);
  151. put_bits(&s->pb, 1, s->mv_table_index);
  152. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  153. }
  154. s->esc3_level_length= 0;
  155. s->esc3_run_length= 0;
  156. return 0;
  157. }
  158. /* Nearly identical to wmv1 but that is just because we do not use the
  159. * useless M$ crap features. It is duplicated here in case someone wants
  160. * to add support for these crap features. */
  161. void ff_wmv2_encode_mb(MpegEncContext * s,
  162. DCTELEM block[6][64],
  163. int motion_x, int motion_y)
  164. {
  165. Wmv2Context * const w= (Wmv2Context*)s;
  166. int cbp, coded_cbp, i;
  167. int pred_x, pred_y;
  168. uint8_t *coded_block;
  169. ff_msmpeg4_handle_slices(s);
  170. if (!s->mb_intra) {
  171. /* compute cbp */
  172. cbp = 0;
  173. for (i = 0; i < 6; i++) {
  174. if (s->block_last_index[i] >= 0)
  175. cbp |= 1 << (5 - i);
  176. }
  177. put_bits(&s->pb,
  178. wmv2_inter_table[w->cbp_table_index][cbp + 64][1],
  179. wmv2_inter_table[w->cbp_table_index][cbp + 64][0]);
  180. /* motion vector */
  181. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  182. ff_msmpeg4_encode_motion(s, motion_x - pred_x,
  183. motion_y - pred_y);
  184. } else {
  185. /* compute cbp */
  186. cbp = 0;
  187. coded_cbp = 0;
  188. for (i = 0; i < 6; i++) {
  189. int val, pred;
  190. val = (s->block_last_index[i] >= 1);
  191. cbp |= val << (5 - i);
  192. if (i < 4) {
  193. /* predict value for close blocks only for luma */
  194. pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block);
  195. *coded_block = val;
  196. val = val ^ pred;
  197. }
  198. coded_cbp |= val << (5 - i);
  199. }
  200. #if 0
  201. if (coded_cbp)
  202. printf("cbp=%x %x\n", cbp, coded_cbp);
  203. #endif
  204. if (s->pict_type == I_TYPE) {
  205. put_bits(&s->pb,
  206. ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
  207. } else {
  208. put_bits(&s->pb,
  209. wmv2_inter_table[w->cbp_table_index][cbp][1],
  210. wmv2_inter_table[w->cbp_table_index][cbp][0]);
  211. }
  212. put_bits(&s->pb, 1, 0); /* no AC prediction yet */
  213. if(s->inter_intra_pred){
  214. s->h263_aic_dir=0;
  215. put_bits(&s->pb, table_inter_intra[s->h263_aic_dir][1], table_inter_intra[s->h263_aic_dir][0]);
  216. }
  217. }
  218. for (i = 0; i < 6; i++) {
  219. ff_msmpeg4_encode_block(s, block[i], i);
  220. }
  221. }
  222. #endif //CONFIG_WMV2_ENCODER
  223. static void parse_mb_skip(Wmv2Context * w){
  224. int mb_x, mb_y;
  225. MpegEncContext * const s= &w->s;
  226. uint32_t * const mb_type= s->current_picture_ptr->mb_type;
  227. w->skip_type= get_bits(&s->gb, 2);
  228. switch(w->skip_type){
  229. case SKIP_TYPE_NONE:
  230. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  231. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  232. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_16x16 | MB_TYPE_L0;
  233. }
  234. }
  235. break;
  236. case SKIP_TYPE_MPEG:
  237. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  238. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  239. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  240. }
  241. }
  242. break;
  243. case SKIP_TYPE_ROW:
  244. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  245. if(get_bits1(&s->gb)){
  246. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  247. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  248. }
  249. }else{
  250. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  251. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  252. }
  253. }
  254. }
  255. break;
  256. case SKIP_TYPE_COL:
  257. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  258. if(get_bits1(&s->gb)){
  259. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  260. mb_type[mb_y*s->mb_stride + mb_x]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  261. }
  262. }else{
  263. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  264. mb_type[mb_y*s->mb_stride + mb_x]= (get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
  265. }
  266. }
  267. }
  268. break;
  269. }
  270. }
  271. static int decode_ext_header(Wmv2Context *w){
  272. MpegEncContext * const s= &w->s;
  273. GetBitContext gb;
  274. int fps;
  275. int code;
  276. if(s->avctx->extradata_size<4) return -1;
  277. init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
  278. fps = get_bits(&gb, 5);
  279. s->bit_rate = get_bits(&gb, 11)*1024;
  280. w->mspel_bit = get_bits1(&gb);
  281. s->loop_filter = get_bits1(&gb);
  282. w->abt_flag = get_bits1(&gb);
  283. w->j_type_bit = get_bits1(&gb);
  284. w->top_left_mv_flag= get_bits1(&gb);
  285. w->per_mb_rl_bit = get_bits1(&gb);
  286. code = get_bits(&gb, 3);
  287. if(code==0) return -1;
  288. s->slice_height = s->mb_height / code;
  289. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  290. av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, loop_filter:%d, slices:%d\n",
  291. fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, s->loop_filter,
  292. code);
  293. }
  294. return 0;
  295. }
  296. int ff_wmv2_decode_picture_header(MpegEncContext * s)
  297. {
  298. Wmv2Context * const w= (Wmv2Context*)s;
  299. int code;
  300. #if 0
  301. {
  302. int i;
  303. for(i=0; i<s->gb.size*8; i++)
  304. printf("%d", get_bits1(&s->gb));
  305. // get_bits1(&s->gb);
  306. printf("END\n");
  307. return -1;
  308. }
  309. #endif
  310. if(s->picture_number==0)
  311. decode_ext_header(w);
  312. s->pict_type = get_bits1(&s->gb) + 1;
  313. if(s->pict_type == I_TYPE){
  314. code = get_bits(&s->gb, 7);
  315. av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code);
  316. }
  317. s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
  318. if(s->qscale < 0)
  319. return -1;
  320. return 0;
  321. }
  322. int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
  323. {
  324. Wmv2Context * const w= (Wmv2Context*)s;
  325. if (s->pict_type == I_TYPE) {
  326. if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
  327. else w->j_type= 0; //FIXME check
  328. if(!w->j_type){
  329. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  330. else s->per_mb_rl_table= 0;
  331. if(!s->per_mb_rl_table){
  332. s->rl_chroma_table_index = decode012(&s->gb);
  333. s->rl_table_index = decode012(&s->gb);
  334. }
  335. s->dc_table_index = get_bits1(&s->gb);
  336. }
  337. s->inter_intra_pred= 0;
  338. s->no_rounding = 1;
  339. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  340. av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n",
  341. s->qscale,
  342. s->rl_chroma_table_index,
  343. s->rl_table_index,
  344. s->dc_table_index,
  345. s->per_mb_rl_table,
  346. w->j_type);
  347. }
  348. }else{
  349. int cbp_index;
  350. w->j_type=0;
  351. parse_mb_skip(w);
  352. cbp_index= decode012(&s->gb);
  353. if(s->qscale <= 10){
  354. int map[3]= {0,2,1};
  355. w->cbp_table_index= map[cbp_index];
  356. }else if(s->qscale <= 20){
  357. int map[3]= {1,0,2};
  358. w->cbp_table_index= map[cbp_index];
  359. }else{
  360. int map[3]= {2,1,0};
  361. w->cbp_table_index= map[cbp_index];
  362. }
  363. if(w->mspel_bit) s->mspel= get_bits1(&s->gb);
  364. else s->mspel= 0; //FIXME check
  365. if(w->abt_flag){
  366. w->per_mb_abt= get_bits1(&s->gb)^1;
  367. if(!w->per_mb_abt){
  368. w->abt_type= decode012(&s->gb);
  369. }
  370. }
  371. if(w->per_mb_rl_bit) s->per_mb_rl_table= get_bits1(&s->gb);
  372. else s->per_mb_rl_table= 0;
  373. if(!s->per_mb_rl_table){
  374. s->rl_table_index = decode012(&s->gb);
  375. s->rl_chroma_table_index = s->rl_table_index;
  376. }
  377. s->dc_table_index = get_bits1(&s->gb);
  378. s->mv_table_index = get_bits1(&s->gb);
  379. s->inter_intra_pred= 0;//(s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE);
  380. s->no_rounding ^= 1;
  381. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  382. av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n",
  383. s->rl_table_index,
  384. s->rl_chroma_table_index,
  385. s->dc_table_index,
  386. s->mv_table_index,
  387. s->per_mb_rl_table,
  388. s->qscale,
  389. s->mspel,
  390. w->per_mb_abt,
  391. w->abt_type,
  392. w->cbp_table_index,
  393. s->inter_intra_pred);
  394. }
  395. }
  396. s->esc3_level_length= 0;
  397. s->esc3_run_length= 0;
  398. s->picture_number++; //FIXME ?
  399. if(w->j_type){
  400. ff_intrax8_decode_picture(&w->x8, 2*s->qscale, (s->qscale-1)|1 );
  401. return 1;
  402. }
  403. return 0;
  404. }
  405. static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){
  406. MpegEncContext * const s= &w->s;
  407. int ret;
  408. ret= ff_msmpeg4_decode_motion(s, mx_ptr, my_ptr);
  409. if(ret<0) return -1;
  410. if((((*mx_ptr)|(*my_ptr)) & 1) && s->mspel)
  411. w->hshift= get_bits1(&s->gb);
  412. else
  413. w->hshift= 0;
  414. //printf("%d %d ", *mx_ptr, *my_ptr);
  415. return 0;
  416. }
  417. static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
  418. MpegEncContext * const s= &w->s;
  419. int xy, wrap, diff, type;
  420. int16_t *A, *B, *C, *mot_val;
  421. wrap = s->b8_stride;
  422. xy = s->block_index[0];
  423. mot_val = s->current_picture.motion_val[0][xy];
  424. A = s->current_picture.motion_val[0][xy - 1];
  425. B = s->current_picture.motion_val[0][xy - wrap];
  426. C = s->current_picture.motion_val[0][xy + 2 - wrap];
  427. if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
  428. diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
  429. else
  430. diff=0;
  431. if(diff >= 8)
  432. type= get_bits1(&s->gb);
  433. else
  434. type= 2;
  435. if(type == 0){
  436. *px= A[0];
  437. *py= A[1];
  438. }else if(type == 1){
  439. *px= B[0];
  440. *py= B[1];
  441. }else{
  442. /* special case for first (slice) line */
  443. if (s->first_slice_line) {
  444. *px = A[0];
  445. *py = A[1];
  446. } else {
  447. *px = mid_pred(A[0], B[0], C[0]);
  448. *py = mid_pred(A[1], B[1], C[1]);
  449. }
  450. }
  451. return mot_val;
  452. }
  453. static inline int wmv2_decode_inter_block(Wmv2Context *w, DCTELEM *block, int n, int cbp){
  454. MpegEncContext * const s= &w->s;
  455. static const int sub_cbp_table[3]= {2,3,1};
  456. int sub_cbp;
  457. if(!cbp){
  458. s->block_last_index[n] = -1;
  459. return 0;
  460. }
  461. if(w->per_block_abt)
  462. w->abt_type= decode012(&s->gb);
  463. #if 0
  464. if(w->per_block_abt)
  465. printf("B%d", w->abt_type);
  466. #endif
  467. w->abt_type_table[n]= w->abt_type;
  468. if(w->abt_type){
  469. // const uint8_t *scantable= w->abt_scantable[w->abt_type-1].permutated;
  470. const uint8_t *scantable= w->abt_scantable[w->abt_type-1].scantable;
  471. // const uint8_t *scantable= w->abt_type-1 ? w->abt_scantable[1].permutated : w->abt_scantable[0].scantable;
  472. sub_cbp= sub_cbp_table[ decode012(&s->gb) ];
  473. // printf("S%d", sub_cbp);
  474. if(sub_cbp&1){
  475. if (ff_msmpeg4_decode_block(s, block, n, 1, scantable) < 0)
  476. return -1;
  477. }
  478. if(sub_cbp&2){
  479. if (ff_msmpeg4_decode_block(s, w->abt_block2[n], n, 1, scantable) < 0)
  480. return -1;
  481. }
  482. s->block_last_index[n] = 63;
  483. return 0;
  484. }else{
  485. return ff_msmpeg4_decode_block(s, block, n, 1, s->inter_scantable.permutated);
  486. }
  487. }
  488. static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int stride, int n){
  489. MpegEncContext * const s= &w->s;
  490. if (s->block_last_index[n] >= 0) {
  491. switch(w->abt_type_table[n]){
  492. case 0:
  493. s->dsp.idct_add (dst, stride, block1);
  494. break;
  495. case 1:
  496. simple_idct84_add(dst , stride, block1);
  497. simple_idct84_add(dst + 4*stride, stride, w->abt_block2[n]);
  498. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  499. break;
  500. case 2:
  501. simple_idct48_add(dst , stride, block1);
  502. simple_idct48_add(dst + 4 , stride, w->abt_block2[n]);
  503. memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM));
  504. break;
  505. default:
  506. av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n");
  507. }
  508. }
  509. }
  510. void ff_wmv2_add_mb(MpegEncContext *s, DCTELEM block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr){
  511. Wmv2Context * const w= (Wmv2Context*)s;
  512. wmv2_add_block(w, block1[0], dest_y , s->linesize, 0);
  513. wmv2_add_block(w, block1[1], dest_y + 8 , s->linesize, 1);
  514. wmv2_add_block(w, block1[2], dest_y + 8*s->linesize, s->linesize, 2);
  515. wmv2_add_block(w, block1[3], dest_y + 8 + 8*s->linesize, s->linesize, 3);
  516. if(s->flags&CODEC_FLAG_GRAY) return;
  517. wmv2_add_block(w, block1[4], dest_cb , s->uvlinesize, 4);
  518. wmv2_add_block(w, block1[5], dest_cr , s->uvlinesize, 5);
  519. }
  520. void ff_mspel_motion(MpegEncContext *s,
  521. uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
  522. uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
  523. int motion_x, int motion_y, int h)
  524. {
  525. Wmv2Context * const w= (Wmv2Context*)s;
  526. uint8_t *ptr;
  527. int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize;
  528. int emu=0;
  529. dxy = ((motion_y & 1) << 1) | (motion_x & 1);
  530. dxy = 2*dxy + w->hshift;
  531. src_x = s->mb_x * 16 + (motion_x >> 1);
  532. src_y = s->mb_y * 16 + (motion_y >> 1);
  533. /* WARNING: do no forget half pels */
  534. v_edge_pos = s->v_edge_pos;
  535. src_x = av_clip(src_x, -16, s->width);
  536. src_y = av_clip(src_y, -16, s->height);
  537. if(src_x<=-16 || src_x >= s->width)
  538. dxy &= ~3;
  539. if(src_y<=-16 || src_y >= s->height)
  540. dxy &= ~4;
  541. linesize = s->linesize;
  542. uvlinesize = s->uvlinesize;
  543. ptr = ref_picture[0] + (src_y * linesize) + src_x;
  544. if(s->flags&CODEC_FLAG_EMU_EDGE){
  545. if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
  546. || src_y + h+1 >= v_edge_pos){
  547. ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
  548. src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
  549. ptr= s->edge_emu_buffer + 1 + s->linesize;
  550. emu=1;
  551. }
  552. }
  553. s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize);
  554. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize);
  555. s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize);
  556. s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize);
  557. if(s->flags&CODEC_FLAG_GRAY) return;
  558. if (s->out_format == FMT_H263) {
  559. dxy = 0;
  560. if ((motion_x & 3) != 0)
  561. dxy |= 1;
  562. if ((motion_y & 3) != 0)
  563. dxy |= 2;
  564. mx = motion_x >> 2;
  565. my = motion_y >> 2;
  566. } else {
  567. mx = motion_x / 2;
  568. my = motion_y / 2;
  569. dxy = ((my & 1) << 1) | (mx & 1);
  570. mx >>= 1;
  571. my >>= 1;
  572. }
  573. src_x = s->mb_x * 8 + mx;
  574. src_y = s->mb_y * 8 + my;
  575. src_x = av_clip(src_x, -8, s->width >> 1);
  576. if (src_x == (s->width >> 1))
  577. dxy &= ~1;
  578. src_y = av_clip(src_y, -8, s->height >> 1);
  579. if (src_y == (s->height >> 1))
  580. dxy &= ~2;
  581. offset = (src_y * uvlinesize) + src_x;
  582. ptr = ref_picture[1] + offset;
  583. if(emu){
  584. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  585. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  586. ptr= s->edge_emu_buffer;
  587. }
  588. pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
  589. ptr = ref_picture[2] + offset;
  590. if(emu){
  591. ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
  592. src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
  593. ptr= s->edge_emu_buffer;
  594. }
  595. pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
  596. }
  597. int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
  598. {
  599. Wmv2Context * const w= (Wmv2Context*)s;
  600. int cbp, code, i;
  601. uint8_t *coded_val;
  602. if(w->j_type) return 0;
  603. if (s->pict_type == P_TYPE) {
  604. if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
  605. /* skip mb */
  606. s->mb_intra = 0;
  607. for(i=0;i<6;i++)
  608. s->block_last_index[i] = -1;
  609. s->mv_dir = MV_DIR_FORWARD;
  610. s->mv_type = MV_TYPE_16X16;
  611. s->mv[0][0][0] = 0;
  612. s->mv[0][0][1] = 0;
  613. s->mb_skipped = 1;
  614. w->hshift=0;
  615. return 0;
  616. }
  617. code = get_vlc2(&s->gb, ff_mb_non_intra_vlc[w->cbp_table_index].table, MB_NON_INTRA_VLC_BITS, 3);
  618. if (code < 0)
  619. return -1;
  620. s->mb_intra = (~code & 0x40) >> 6;
  621. cbp = code & 0x3f;
  622. } else {
  623. s->mb_intra = 1;
  624. code = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  625. if (code < 0){
  626. av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y);
  627. return -1;
  628. }
  629. /* predict coded block pattern */
  630. cbp = 0;
  631. for(i=0;i<6;i++) {
  632. int val = ((code >> (5 - i)) & 1);
  633. if (i < 4) {
  634. int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_val);
  635. val = val ^ pred;
  636. *coded_val = val;
  637. }
  638. cbp |= val << (5 - i);
  639. }
  640. }
  641. if (!s->mb_intra) {
  642. int mx, my;
  643. //printf("P at %d %d\n", s->mb_x, s->mb_y);
  644. wmv2_pred_motion(w, &mx, &my);
  645. if(cbp){
  646. s->dsp.clear_blocks(s->block[0]);
  647. if(s->per_mb_rl_table){
  648. s->rl_table_index = decode012(&s->gb);
  649. s->rl_chroma_table_index = s->rl_table_index;
  650. }
  651. if(w->abt_flag && w->per_mb_abt){
  652. w->per_block_abt= get_bits1(&s->gb);
  653. if(!w->per_block_abt)
  654. w->abt_type= decode012(&s->gb);
  655. }else
  656. w->per_block_abt=0;
  657. }
  658. if (wmv2_decode_motion(w, &mx, &my) < 0)
  659. return -1;
  660. s->mv_dir = MV_DIR_FORWARD;
  661. s->mv_type = MV_TYPE_16X16;
  662. s->mv[0][0][0] = mx;
  663. s->mv[0][0][1] = my;
  664. for (i = 0; i < 6; i++) {
  665. if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0)
  666. {
  667. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  668. return -1;
  669. }
  670. }
  671. } else {
  672. //if(s->pict_type==P_TYPE)
  673. // printf("%d%d ", s->inter_intra_pred, cbp);
  674. //printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
  675. s->ac_pred = get_bits1(&s->gb);
  676. if(s->inter_intra_pred){
  677. s->h263_aic_dir= get_vlc2(&s->gb, ff_inter_intra_vlc.table, INTER_INTRA_VLC_BITS, 1);
  678. // printf("%d%d %d %d/", s->ac_pred, s->h263_aic_dir, s->mb_x, s->mb_y);
  679. }
  680. if(s->per_mb_rl_table && cbp){
  681. s->rl_table_index = decode012(&s->gb);
  682. s->rl_chroma_table_index = s->rl_table_index;
  683. }
  684. s->dsp.clear_blocks(s->block[0]);
  685. for (i = 0; i < 6; i++) {
  686. if (ff_msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0)
  687. {
  688. av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i);
  689. return -1;
  690. }
  691. }
  692. }
  693. return 0;
  694. }
  695. static int wmv2_decode_init(AVCodecContext *avctx){
  696. Wmv2Context * const w= avctx->priv_data;
  697. if(avctx->idct_algo==FF_IDCT_AUTO){
  698. avctx->idct_algo=FF_IDCT_WMV2;
  699. }
  700. if(ff_h263_decode_init(avctx) < 0)
  701. return -1;
  702. wmv2_common_init(w);
  703. ff_intrax8_common_init(&w->x8,&w->s);
  704. return 0;
  705. }
  706. #ifdef CONFIG_WMV2_DECODER
  707. AVCodec wmv2_decoder = {
  708. "wmv2",
  709. CODEC_TYPE_VIDEO,
  710. CODEC_ID_WMV2,
  711. sizeof(Wmv2Context),
  712. wmv2_decode_init,
  713. NULL,
  714. ff_h263_decode_end,
  715. ff_h263_decode_frame,
  716. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
  717. };
  718. #endif
  719. #ifdef CONFIG_WMV2_ENCODER
  720. AVCodec wmv2_encoder = {
  721. "wmv2",
  722. CODEC_TYPE_VIDEO,
  723. CODEC_ID_WMV2,
  724. sizeof(Wmv2Context),
  725. wmv2_encode_init,
  726. MPV_encode_picture,
  727. MPV_encode_end,
  728. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  729. };
  730. #endif