You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2965 lines
117KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 block decoding routines
  26. */
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "msmpeg4data.h"
  31. #include "unary.h"
  32. #include "vc1.h"
  33. #include "vc1_pred.h"
  34. #include "vc1acdata.h"
  35. #include "vc1data.h"
  36. #define MB_INTRA_VLC_BITS 9
  37. #define DC_VLC_BITS 9
  38. // offset tables for interlaced picture MVDATA decoding
  39. static const uint8_t offset_table[2][9] = {
  40. { 0, 1, 2, 4, 8, 16, 32, 64, 128 },
  41. { 0, 1, 3, 7, 15, 31, 63, 127, 255 },
  42. };
  43. /***********************************************************************/
  44. /**
  45. * @name VC-1 Bitplane decoding
  46. * @see 8.7, p56
  47. * @{
  48. */
  49. static inline void init_block_index(VC1Context *v)
  50. {
  51. MpegEncContext *s = &v->s;
  52. ff_init_block_index(s);
  53. if (v->field_mode && !(v->second_field ^ v->tff)) {
  54. s->dest[0] += s->current_picture_ptr->f->linesize[0];
  55. s->dest[1] += s->current_picture_ptr->f->linesize[1];
  56. s->dest[2] += s->current_picture_ptr->f->linesize[2];
  57. }
  58. }
  59. /** @} */ //Bitplane group
  60. static void vc1_put_signed_blocks_clamped(VC1Context *v)
  61. {
  62. MpegEncContext *s = &v->s;
  63. int topleft_mb_pos, top_mb_pos;
  64. int stride_y, fieldtx = 0;
  65. int v_dist;
  66. /* The put pixels loop is always one MB row behind the decoding loop,
  67. * because we can only put pixels when overlap filtering is done, and
  68. * for filtering of the bottom edge of a MB, we need the next MB row
  69. * present as well.
  70. * Within the row, the put pixels loop is also one MB col behind the
  71. * decoding loop. The reason for this is again, because for filtering
  72. * of the right MB edge, we need the next MB present. */
  73. if (!s->first_slice_line) {
  74. if (s->mb_x) {
  75. topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
  76. if (v->fcm == ILACE_FRAME)
  77. fieldtx = v->fieldtx_plane[topleft_mb_pos];
  78. stride_y = s->linesize << fieldtx;
  79. v_dist = (16 - fieldtx) >> (fieldtx == 0);
  80. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
  81. s->dest[0] - 16 * s->linesize - 16,
  82. stride_y);
  83. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
  84. s->dest[0] - 16 * s->linesize - 8,
  85. stride_y);
  86. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
  87. s->dest[0] - v_dist * s->linesize - 16,
  88. stride_y);
  89. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
  90. s->dest[0] - v_dist * s->linesize - 8,
  91. stride_y);
  92. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  93. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
  94. s->dest[1] - 8 * s->uvlinesize - 8,
  95. s->uvlinesize);
  96. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
  97. s->dest[2] - 8 * s->uvlinesize - 8,
  98. s->uvlinesize);
  99. }
  100. }
  101. if (s->mb_x == s->mb_width - 1) {
  102. top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
  103. if (v->fcm == ILACE_FRAME)
  104. fieldtx = v->fieldtx_plane[top_mb_pos];
  105. stride_y = s->linesize << fieldtx;
  106. v_dist = fieldtx ? 15 : 8;
  107. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
  108. s->dest[0] - 16 * s->linesize,
  109. stride_y);
  110. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
  111. s->dest[0] - 16 * s->linesize + 8,
  112. stride_y);
  113. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
  114. s->dest[0] - v_dist * s->linesize,
  115. stride_y);
  116. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
  117. s->dest[0] - v_dist * s->linesize + 8,
  118. stride_y);
  119. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  120. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
  121. s->dest[1] - 8 * s->uvlinesize,
  122. s->uvlinesize);
  123. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
  124. s->dest[2] - 8 * s->uvlinesize,
  125. s->uvlinesize);
  126. }
  127. }
  128. }
  129. #define inc_blk_idx(idx) do { \
  130. idx++; \
  131. if (idx >= v->n_allocated_blks) \
  132. idx = 0; \
  133. } while (0)
  134. inc_blk_idx(v->topleft_blk_idx);
  135. inc_blk_idx(v->top_blk_idx);
  136. inc_blk_idx(v->left_blk_idx);
  137. inc_blk_idx(v->cur_blk_idx);
  138. }
  139. /***********************************************************************/
  140. /**
  141. * @name VC-1 Block-level functions
  142. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  143. * @{
  144. */
  145. /**
  146. * @def GET_MQUANT
  147. * @brief Get macroblock-level quantizer scale
  148. */
  149. #define GET_MQUANT() \
  150. if (v->dquantfrm) { \
  151. int edges = 0; \
  152. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  153. if (v->dqbilevel) { \
  154. mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
  155. } else { \
  156. mqdiff = get_bits(gb, 3); \
  157. if (mqdiff != 7) \
  158. mquant = v->pq + mqdiff; \
  159. else \
  160. mquant = get_bits(gb, 5); \
  161. } \
  162. } \
  163. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  164. edges = 1 << v->dqsbedge; \
  165. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  166. edges = (3 << v->dqsbedge) % 15; \
  167. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  168. edges = 15; \
  169. if ((edges&1) && !s->mb_x) \
  170. mquant = v->altpq; \
  171. if ((edges&2) && s->first_slice_line) \
  172. mquant = v->altpq; \
  173. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  174. mquant = v->altpq; \
  175. if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
  176. mquant = v->altpq; \
  177. if (!mquant || mquant > 31) { \
  178. av_log(v->s.avctx, AV_LOG_ERROR, \
  179. "Overriding invalid mquant %d\n", mquant); \
  180. mquant = 1; \
  181. } \
  182. }
  183. /**
  184. * @def GET_MVDATA(_dmv_x, _dmv_y)
  185. * @brief Get MV differentials
  186. * @see MVDATA decoding from 8.3.5.2, p(1)20
  187. * @param _dmv_x Horizontal differential for decoded MV
  188. * @param _dmv_y Vertical differential for decoded MV
  189. */
  190. #define GET_MVDATA(_dmv_x, _dmv_y) \
  191. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  192. VC1_MV_DIFF_VLC_BITS, 2); \
  193. if (index > 36) { \
  194. mb_has_coeffs = 1; \
  195. index -= 37; \
  196. } else \
  197. mb_has_coeffs = 0; \
  198. s->mb_intra = 0; \
  199. if (!index) { \
  200. _dmv_x = _dmv_y = 0; \
  201. } else if (index == 35) { \
  202. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  203. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  204. } else if (index == 36) { \
  205. _dmv_x = 0; \
  206. _dmv_y = 0; \
  207. s->mb_intra = 1; \
  208. } else { \
  209. index1 = index % 6; \
  210. _dmv_x = offset_table[1][index1]; \
  211. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  212. if (val > 0) { \
  213. val = get_bits(gb, val); \
  214. sign = 0 - (val & 1); \
  215. _dmv_x = (sign ^ ((val >> 1) + _dmv_x)) - sign; \
  216. } \
  217. \
  218. index1 = index / 6; \
  219. _dmv_y = offset_table[1][index1]; \
  220. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  221. if (val > 0) { \
  222. val = get_bits(gb, val); \
  223. sign = 0 - (val & 1); \
  224. _dmv_y = (sign ^ ((val >> 1) + _dmv_y)) - sign; \
  225. } \
  226. }
  227. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  228. int *dmv_y, int *pred_flag)
  229. {
  230. int index, index1;
  231. int extend_x, extend_y;
  232. GetBitContext *gb = &v->s.gb;
  233. int bits, esc;
  234. int val, sign;
  235. if (v->numref) {
  236. bits = VC1_2REF_MVDATA_VLC_BITS;
  237. esc = 125;
  238. } else {
  239. bits = VC1_1REF_MVDATA_VLC_BITS;
  240. esc = 71;
  241. }
  242. extend_x = v->dmvrange & 1;
  243. extend_y = (v->dmvrange >> 1) & 1;
  244. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  245. if (index == esc) {
  246. *dmv_x = get_bits(gb, v->k_x);
  247. *dmv_y = get_bits(gb, v->k_y);
  248. if (v->numref) {
  249. if (pred_flag)
  250. *pred_flag = *dmv_y & 1;
  251. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  252. }
  253. }
  254. else {
  255. av_assert0(index < esc);
  256. index1 = (index + 1) % 9;
  257. if (index1 != 0) {
  258. val = get_bits(gb, index1 + extend_x);
  259. sign = 0 - (val & 1);
  260. *dmv_x = (sign ^ ((val >> 1) + offset_table[extend_x][index1])) - sign;
  261. } else
  262. *dmv_x = 0;
  263. index1 = (index + 1) / 9;
  264. if (index1 > v->numref) {
  265. val = get_bits(gb, (index1 >> v->numref) + extend_y);
  266. sign = 0 - (val & 1);
  267. *dmv_y = (sign ^ ((val >> 1) + offset_table[extend_y][index1 >> v->numref])) - sign;
  268. } else
  269. *dmv_y = 0;
  270. if (v->numref && pred_flag)
  271. *pred_flag = index1 & 1;
  272. }
  273. }
  274. /** Reconstruct motion vector for B-frame and do motion compensation
  275. */
  276. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  277. int direct, int mode)
  278. {
  279. if (direct) {
  280. ff_vc1_mc_1mv(v, 0);
  281. ff_vc1_interp_mc(v);
  282. return;
  283. }
  284. if (mode == BMV_TYPE_INTERPOLATED) {
  285. ff_vc1_mc_1mv(v, 0);
  286. ff_vc1_interp_mc(v);
  287. return;
  288. }
  289. ff_vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  290. }
  291. /** Get predicted DC value for I-frames only
  292. * prediction dir: left=0, top=1
  293. * @param s MpegEncContext
  294. * @param overlap flag indicating that overlap filtering is used
  295. * @param pq integer part of picture quantizer
  296. * @param[in] n block index in the current MB
  297. * @param dc_val_ptr Pointer to DC predictor
  298. * @param dir_ptr Prediction direction for use in AC prediction
  299. */
  300. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  301. int16_t **dc_val_ptr, int *dir_ptr)
  302. {
  303. int a, b, c, wrap, pred, scale;
  304. int16_t *dc_val;
  305. static const uint16_t dcpred[32] = {
  306. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  307. 114, 102, 93, 85, 79, 73, 68, 64,
  308. 60, 57, 54, 51, 49, 47, 45, 43,
  309. 41, 39, 38, 37, 35, 34, 33
  310. };
  311. /* find prediction - wmv3_dc_scale always used here in fact */
  312. if (n < 4) scale = s->y_dc_scale;
  313. else scale = s->c_dc_scale;
  314. wrap = s->block_wrap[n];
  315. dc_val = s->dc_val[0] + s->block_index[n];
  316. /* B A
  317. * C X
  318. */
  319. c = dc_val[ - 1];
  320. b = dc_val[ - 1 - wrap];
  321. a = dc_val[ - wrap];
  322. if (pq < 9 || !overlap) {
  323. /* Set outer values */
  324. if (s->first_slice_line && (n != 2 && n != 3))
  325. b = a = dcpred[scale];
  326. if (s->mb_x == 0 && (n != 1 && n != 3))
  327. b = c = dcpred[scale];
  328. } else {
  329. /* Set outer values */
  330. if (s->first_slice_line && (n != 2 && n != 3))
  331. b = a = 0;
  332. if (s->mb_x == 0 && (n != 1 && n != 3))
  333. b = c = 0;
  334. }
  335. if (abs(a - b) <= abs(b - c)) {
  336. pred = c;
  337. *dir_ptr = 1; // left
  338. } else {
  339. pred = a;
  340. *dir_ptr = 0; // top
  341. }
  342. /* update predictor */
  343. *dc_val_ptr = &dc_val[0];
  344. return pred;
  345. }
  346. /** Get predicted DC value
  347. * prediction dir: left=0, top=1
  348. * @param s MpegEncContext
  349. * @param overlap flag indicating that overlap filtering is used
  350. * @param pq integer part of picture quantizer
  351. * @param[in] n block index in the current MB
  352. * @param a_avail flag indicating top block availability
  353. * @param c_avail flag indicating left block availability
  354. * @param dc_val_ptr Pointer to DC predictor
  355. * @param dir_ptr Prediction direction for use in AC prediction
  356. */
  357. static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  358. int a_avail, int c_avail,
  359. int16_t **dc_val_ptr, int *dir_ptr)
  360. {
  361. int a, b, c, wrap, pred;
  362. int16_t *dc_val;
  363. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  364. int q1, q2 = 0;
  365. int dqscale_index;
  366. /* scale predictors if needed */
  367. q1 = s->current_picture.qscale_table[mb_pos];
  368. dqscale_index = s->y_dc_scale_table[q1] - 1;
  369. if (dqscale_index < 0)
  370. return 0;
  371. wrap = s->block_wrap[n];
  372. dc_val = s->dc_val[0] + s->block_index[n];
  373. /* B A
  374. * C X
  375. */
  376. c = dc_val[ - 1];
  377. b = dc_val[ - 1 - wrap];
  378. a = dc_val[ - wrap];
  379. if (c_avail && (n != 1 && n != 3)) {
  380. q2 = s->current_picture.qscale_table[mb_pos - 1];
  381. if (q2 && q2 != q1)
  382. c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  383. }
  384. if (a_avail && (n != 2 && n != 3)) {
  385. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  386. if (q2 && q2 != q1)
  387. a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  388. }
  389. if (a_avail && c_avail && (n != 3)) {
  390. int off = mb_pos;
  391. if (n != 1)
  392. off--;
  393. if (n != 2)
  394. off -= s->mb_stride;
  395. q2 = s->current_picture.qscale_table[off];
  396. if (q2 && q2 != q1)
  397. b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  398. }
  399. if (c_avail && (!a_avail || abs(a - b) <= abs(b - c))) {
  400. pred = c;
  401. *dir_ptr = 1; // left
  402. } else if (a_avail) {
  403. pred = a;
  404. *dir_ptr = 0; // top
  405. } else {
  406. pred = 0;
  407. *dir_ptr = 1; // left
  408. }
  409. /* update predictor */
  410. *dc_val_ptr = &dc_val[0];
  411. return pred;
  412. }
  413. /** @} */ // Block group
  414. /**
  415. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  416. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  417. * @{
  418. */
  419. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  420. uint8_t **coded_block_ptr)
  421. {
  422. int xy, wrap, pred, a, b, c;
  423. xy = s->block_index[n];
  424. wrap = s->b8_stride;
  425. /* B C
  426. * A X
  427. */
  428. a = s->coded_block[xy - 1 ];
  429. b = s->coded_block[xy - 1 - wrap];
  430. c = s->coded_block[xy - wrap];
  431. if (b == c) {
  432. pred = a;
  433. } else {
  434. pred = c;
  435. }
  436. /* store value */
  437. *coded_block_ptr = &s->coded_block[xy];
  438. return pred;
  439. }
  440. /**
  441. * Decode one AC coefficient
  442. * @param v The VC1 context
  443. * @param last Last coefficient
  444. * @param skip How much zero coefficients to skip
  445. * @param value Decoded AC coefficient value
  446. * @param codingset set of VLC to decode data
  447. * @see 8.1.3.4
  448. */
  449. static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  450. int *value, int codingset)
  451. {
  452. GetBitContext *gb = &v->s.gb;
  453. int index, run, level, lst, sign;
  454. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  455. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  456. run = vc1_index_decode_table[codingset][index][0];
  457. level = vc1_index_decode_table[codingset][index][1];
  458. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  459. sign = get_bits1(gb);
  460. } else {
  461. int escape = decode210(gb);
  462. if (escape != 2) {
  463. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  464. run = vc1_index_decode_table[codingset][index][0];
  465. level = vc1_index_decode_table[codingset][index][1];
  466. lst = index >= vc1_last_decode_table[codingset];
  467. if (escape == 0) {
  468. if (lst)
  469. level += vc1_last_delta_level_table[codingset][run];
  470. else
  471. level += vc1_delta_level_table[codingset][run];
  472. } else {
  473. if (lst)
  474. run += vc1_last_delta_run_table[codingset][level] + 1;
  475. else
  476. run += vc1_delta_run_table[codingset][level] + 1;
  477. }
  478. sign = get_bits1(gb);
  479. } else {
  480. lst = get_bits1(gb);
  481. if (v->s.esc3_level_length == 0) {
  482. if (v->pq < 8 || v->dquantfrm) { // table 59
  483. v->s.esc3_level_length = get_bits(gb, 3);
  484. if (!v->s.esc3_level_length)
  485. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  486. } else { // table 60
  487. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  488. }
  489. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  490. }
  491. run = get_bits(gb, v->s.esc3_run_length);
  492. sign = get_bits1(gb);
  493. level = get_bits(gb, v->s.esc3_level_length);
  494. }
  495. }
  496. *last = lst;
  497. *skip = run;
  498. *value = (level ^ -sign) + sign;
  499. }
  500. /** Decode intra block in intra frames - should be faster than decode_intra_block
  501. * @param v VC1Context
  502. * @param block block to decode
  503. * @param[in] n subblock index
  504. * @param coded are AC coeffs present or not
  505. * @param codingset set of VLC to decode data
  506. */
  507. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  508. int coded, int codingset)
  509. {
  510. GetBitContext *gb = &v->s.gb;
  511. MpegEncContext *s = &v->s;
  512. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  513. int i;
  514. int16_t *dc_val;
  515. int16_t *ac_val, *ac_val2;
  516. int dcdiff, scale;
  517. /* Get DC differential */
  518. if (n < 4) {
  519. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  520. } else {
  521. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  522. }
  523. if (dcdiff < 0) {
  524. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  525. return -1;
  526. }
  527. if (dcdiff) {
  528. const int m = (v->pq == 1 || v->pq == 2) ? 3 - v->pq : 0;
  529. if (dcdiff == 119 /* ESC index value */) {
  530. dcdiff = get_bits(gb, 8 + m);
  531. } else {
  532. if (m)
  533. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  534. }
  535. if (get_bits1(gb))
  536. dcdiff = -dcdiff;
  537. }
  538. /* Prediction */
  539. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  540. *dc_val = dcdiff;
  541. /* Store the quantized DC coeff, used for prediction */
  542. if (n < 4)
  543. scale = s->y_dc_scale;
  544. else
  545. scale = s->c_dc_scale;
  546. block[0] = dcdiff * scale;
  547. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  548. ac_val2 = ac_val;
  549. if (dc_pred_dir) // left
  550. ac_val -= 16;
  551. else // top
  552. ac_val -= 16 * s->block_wrap[n];
  553. scale = v->pq * 2 + v->halfpq;
  554. //AC Decoding
  555. i = !!coded;
  556. if (coded) {
  557. int last = 0, skip, value;
  558. const uint8_t *zz_table;
  559. int k;
  560. if (v->s.ac_pred) {
  561. if (!dc_pred_dir)
  562. zz_table = v->zz_8x8[2];
  563. else
  564. zz_table = v->zz_8x8[3];
  565. } else
  566. zz_table = v->zz_8x8[1];
  567. while (!last) {
  568. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  569. i += skip;
  570. if (i > 63)
  571. break;
  572. block[zz_table[i++]] = value;
  573. }
  574. /* apply AC prediction if needed */
  575. if (s->ac_pred) {
  576. int sh;
  577. if (dc_pred_dir) { // left
  578. sh = v->left_blk_sh;
  579. } else { // top
  580. sh = v->top_blk_sh;
  581. ac_val += 8;
  582. }
  583. for (k = 1; k < 8; k++)
  584. block[k << sh] += ac_val[k];
  585. }
  586. /* save AC coeffs for further prediction */
  587. for (k = 1; k < 8; k++) {
  588. ac_val2[k] = block[k << v->left_blk_sh];
  589. ac_val2[k + 8] = block[k << v->top_blk_sh];
  590. }
  591. /* scale AC coeffs */
  592. for (k = 1; k < 64; k++)
  593. if (block[k]) {
  594. block[k] *= scale;
  595. if (!v->pquantizer)
  596. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  597. }
  598. } else {
  599. int k;
  600. memset(ac_val2, 0, 16 * 2);
  601. /* apply AC prediction if needed */
  602. if (s->ac_pred) {
  603. int sh;
  604. if (dc_pred_dir) { //left
  605. sh = v->left_blk_sh;
  606. } else { // top
  607. sh = v->top_blk_sh;
  608. ac_val += 8;
  609. ac_val2 += 8;
  610. }
  611. memcpy(ac_val2, ac_val, 8 * 2);
  612. for (k = 1; k < 8; k++) {
  613. block[k << sh] = ac_val[k] * scale;
  614. if (!v->pquantizer && block[k << sh])
  615. block[k << sh] += (block[k << sh] < 0) ? -v->pq : v->pq;
  616. }
  617. }
  618. }
  619. if (s->ac_pred) i = 63;
  620. s->block_last_index[n] = i;
  621. return 0;
  622. }
  623. /** Decode intra block in intra frames - should be faster than decode_intra_block
  624. * @param v VC1Context
  625. * @param block block to decode
  626. * @param[in] n subblock number
  627. * @param coded are AC coeffs present or not
  628. * @param codingset set of VLC to decode data
  629. * @param mquant quantizer value for this macroblock
  630. */
  631. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  632. int coded, int codingset, int mquant)
  633. {
  634. GetBitContext *gb = &v->s.gb;
  635. MpegEncContext *s = &v->s;
  636. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  637. int i;
  638. int16_t *dc_val = NULL;
  639. int16_t *ac_val, *ac_val2;
  640. int dcdiff;
  641. int a_avail = v->a_avail, c_avail = v->c_avail;
  642. int use_pred = s->ac_pred;
  643. int scale;
  644. int q1, q2 = 0;
  645. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  646. /* Get DC differential */
  647. if (n < 4) {
  648. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  649. } else {
  650. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  651. }
  652. if (dcdiff < 0) {
  653. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  654. return -1;
  655. }
  656. if (dcdiff) {
  657. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  658. if (dcdiff == 119 /* ESC index value */) {
  659. dcdiff = get_bits(gb, 8 + m);
  660. } else {
  661. if (m)
  662. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  663. }
  664. if (get_bits1(gb))
  665. dcdiff = -dcdiff;
  666. }
  667. /* Prediction */
  668. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  669. *dc_val = dcdiff;
  670. /* Store the quantized DC coeff, used for prediction */
  671. if (n < 4)
  672. scale = s->y_dc_scale;
  673. else
  674. scale = s->c_dc_scale;
  675. block[0] = dcdiff * scale;
  676. /* check if AC is needed at all */
  677. if (!a_avail && !c_avail)
  678. use_pred = 0;
  679. scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
  680. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  681. ac_val2 = ac_val;
  682. if (dc_pred_dir) // left
  683. ac_val -= 16;
  684. else // top
  685. ac_val -= 16 * s->block_wrap[n];
  686. q1 = s->current_picture.qscale_table[mb_pos];
  687. if (n == 3)
  688. q2 = q1;
  689. else if (dc_pred_dir) {
  690. if (n == 1)
  691. q2 = q1;
  692. else if (c_avail && mb_pos)
  693. q2 = s->current_picture.qscale_table[mb_pos - 1];
  694. } else {
  695. if (n == 2)
  696. q2 = q1;
  697. else if (a_avail && mb_pos >= s->mb_stride)
  698. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  699. }
  700. //AC Decoding
  701. i = 1;
  702. if (coded) {
  703. int last = 0, skip, value;
  704. const uint8_t *zz_table;
  705. int k;
  706. if (v->s.ac_pred) {
  707. if (!use_pred && v->fcm == ILACE_FRAME) {
  708. zz_table = v->zzi_8x8;
  709. } else {
  710. if (!dc_pred_dir) // top
  711. zz_table = v->zz_8x8[2];
  712. else // left
  713. zz_table = v->zz_8x8[3];
  714. }
  715. } else {
  716. if (v->fcm != ILACE_FRAME)
  717. zz_table = v->zz_8x8[1];
  718. else
  719. zz_table = v->zzi_8x8;
  720. }
  721. while (!last) {
  722. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  723. i += skip;
  724. if (i > 63)
  725. break;
  726. block[zz_table[i++]] = value;
  727. }
  728. /* apply AC prediction if needed */
  729. if (use_pred) {
  730. int sh;
  731. if (dc_pred_dir) { // left
  732. sh = v->left_blk_sh;
  733. } else { // top
  734. sh = v->top_blk_sh;
  735. ac_val += 8;
  736. }
  737. /* scale predictors if needed*/
  738. if (q2 && q1 != q2) {
  739. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  740. if (q1 < 1)
  741. return AVERROR_INVALIDDATA;
  742. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  743. for (k = 1; k < 8; k++)
  744. block[k << sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  745. } else {
  746. for (k = 1; k < 8; k++)
  747. block[k << sh] += ac_val[k];
  748. }
  749. }
  750. /* save AC coeffs for further prediction */
  751. for (k = 1; k < 8; k++) {
  752. ac_val2[k ] = block[k << v->left_blk_sh];
  753. ac_val2[k + 8] = block[k << v->top_blk_sh];
  754. }
  755. /* scale AC coeffs */
  756. for (k = 1; k < 64; k++)
  757. if (block[k]) {
  758. block[k] *= scale;
  759. if (!v->pquantizer)
  760. block[k] += (block[k] < 0) ? -mquant : mquant;
  761. }
  762. } else { // no AC coeffs
  763. int k;
  764. memset(ac_val2, 0, 16 * 2);
  765. /* apply AC prediction if needed */
  766. if (use_pred) {
  767. int sh;
  768. if (dc_pred_dir) { // left
  769. sh = v->left_blk_sh;
  770. } else { // top
  771. sh = v->top_blk_sh;
  772. ac_val += 8;
  773. ac_val2 += 8;
  774. }
  775. memcpy(ac_val2, ac_val, 8 * 2);
  776. if (q2 && q1 != q2) {
  777. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  778. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  779. if (q1 < 1)
  780. return AVERROR_INVALIDDATA;
  781. for (k = 1; k < 8; k++)
  782. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  783. }
  784. for (k = 1; k < 8; k++) {
  785. block[k << sh] = ac_val2[k] * scale;
  786. if (!v->pquantizer && block[k << sh])
  787. block[k << sh] += (block[k << sh] < 0) ? -mquant : mquant;
  788. }
  789. }
  790. }
  791. if (use_pred) i = 63;
  792. s->block_last_index[n] = i;
  793. return 0;
  794. }
  795. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  796. * @param v VC1Context
  797. * @param block block to decode
  798. * @param[in] n subblock index
  799. * @param coded are AC coeffs present or not
  800. * @param mquant block quantizer
  801. * @param codingset set of VLC to decode data
  802. */
  803. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  804. int coded, int mquant, int codingset)
  805. {
  806. GetBitContext *gb = &v->s.gb;
  807. MpegEncContext *s = &v->s;
  808. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  809. int i;
  810. int16_t *dc_val = NULL;
  811. int16_t *ac_val, *ac_val2;
  812. int dcdiff;
  813. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  814. int a_avail = v->a_avail, c_avail = v->c_avail;
  815. int use_pred = s->ac_pred;
  816. int scale;
  817. int q1, q2 = 0;
  818. s->bdsp.clear_block(block);
  819. /* XXX: Guard against dumb values of mquant */
  820. mquant = av_clip_uintp2(mquant, 5);
  821. /* Set DC scale - y and c use the same */
  822. s->y_dc_scale = s->y_dc_scale_table[mquant];
  823. s->c_dc_scale = s->c_dc_scale_table[mquant];
  824. /* Get DC differential */
  825. if (n < 4) {
  826. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  827. } else {
  828. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  829. }
  830. if (dcdiff < 0) {
  831. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  832. return -1;
  833. }
  834. if (dcdiff) {
  835. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  836. if (dcdiff == 119 /* ESC index value */) {
  837. dcdiff = get_bits(gb, 8 + m);
  838. } else {
  839. if (m)
  840. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  841. }
  842. if (get_bits1(gb))
  843. dcdiff = -dcdiff;
  844. }
  845. /* Prediction */
  846. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  847. *dc_val = dcdiff;
  848. /* Store the quantized DC coeff, used for prediction */
  849. if (n < 4) {
  850. block[0] = dcdiff * s->y_dc_scale;
  851. } else {
  852. block[0] = dcdiff * s->c_dc_scale;
  853. }
  854. //AC Decoding
  855. i = 1;
  856. /* check if AC is needed at all and adjust direction if needed */
  857. if (!a_avail) dc_pred_dir = 1;
  858. if (!c_avail) dc_pred_dir = 0;
  859. if (!a_avail && !c_avail) use_pred = 0;
  860. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  861. ac_val2 = ac_val;
  862. scale = mquant * 2 + v->halfpq;
  863. if (dc_pred_dir) //left
  864. ac_val -= 16;
  865. else //top
  866. ac_val -= 16 * s->block_wrap[n];
  867. q1 = s->current_picture.qscale_table[mb_pos];
  868. if (dc_pred_dir && c_avail && mb_pos)
  869. q2 = s->current_picture.qscale_table[mb_pos - 1];
  870. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  871. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  872. if (dc_pred_dir && n == 1)
  873. q2 = q1;
  874. if (!dc_pred_dir && n == 2)
  875. q2 = q1;
  876. if (n == 3) q2 = q1;
  877. if (coded) {
  878. int last = 0, skip, value;
  879. int k;
  880. while (!last) {
  881. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  882. i += skip;
  883. if (i > 63)
  884. break;
  885. if (v->fcm == PROGRESSIVE)
  886. block[v->zz_8x8[0][i++]] = value;
  887. else {
  888. if (use_pred && (v->fcm == ILACE_FRAME)) {
  889. if (!dc_pred_dir) // top
  890. block[v->zz_8x8[2][i++]] = value;
  891. else // left
  892. block[v->zz_8x8[3][i++]] = value;
  893. } else {
  894. block[v->zzi_8x8[i++]] = value;
  895. }
  896. }
  897. }
  898. /* apply AC prediction if needed */
  899. if (use_pred) {
  900. /* scale predictors if needed*/
  901. if (q2 && q1 != q2) {
  902. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  903. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  904. if (q1 < 1)
  905. return AVERROR_INVALIDDATA;
  906. if (dc_pred_dir) { // left
  907. for (k = 1; k < 8; k++)
  908. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  909. } else { //top
  910. for (k = 1; k < 8; k++)
  911. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  912. }
  913. } else {
  914. if (dc_pred_dir) { // left
  915. for (k = 1; k < 8; k++)
  916. block[k << v->left_blk_sh] += ac_val[k];
  917. } else { // top
  918. for (k = 1; k < 8; k++)
  919. block[k << v->top_blk_sh] += ac_val[k + 8];
  920. }
  921. }
  922. }
  923. /* save AC coeffs for further prediction */
  924. for (k = 1; k < 8; k++) {
  925. ac_val2[k ] = block[k << v->left_blk_sh];
  926. ac_val2[k + 8] = block[k << v->top_blk_sh];
  927. }
  928. /* scale AC coeffs */
  929. for (k = 1; k < 64; k++)
  930. if (block[k]) {
  931. block[k] *= scale;
  932. if (!v->pquantizer)
  933. block[k] += (block[k] < 0) ? -mquant : mquant;
  934. }
  935. if (use_pred) i = 63;
  936. } else { // no AC coeffs
  937. int k;
  938. memset(ac_val2, 0, 16 * 2);
  939. if (dc_pred_dir) { // left
  940. if (use_pred) {
  941. memcpy(ac_val2, ac_val, 8 * 2);
  942. if (q2 && q1 != q2) {
  943. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  944. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  945. if (q1 < 1)
  946. return AVERROR_INVALIDDATA;
  947. for (k = 1; k < 8; k++)
  948. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  949. }
  950. }
  951. } else { // top
  952. if (use_pred) {
  953. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  954. if (q2 && q1 != q2) {
  955. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  956. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  957. if (q1 < 1)
  958. return AVERROR_INVALIDDATA;
  959. for (k = 1; k < 8; k++)
  960. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  961. }
  962. }
  963. }
  964. /* apply AC prediction if needed */
  965. if (use_pred) {
  966. if (dc_pred_dir) { // left
  967. for (k = 1; k < 8; k++) {
  968. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  969. if (!v->pquantizer && block[k << v->left_blk_sh])
  970. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  971. }
  972. } else { // top
  973. for (k = 1; k < 8; k++) {
  974. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  975. if (!v->pquantizer && block[k << v->top_blk_sh])
  976. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  977. }
  978. }
  979. i = 63;
  980. }
  981. }
  982. s->block_last_index[n] = i;
  983. return 0;
  984. }
  985. /** Decode P block
  986. */
  987. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  988. int mquant, int ttmb, int first_block,
  989. uint8_t *dst, int linesize, int skip_block,
  990. int *ttmb_out)
  991. {
  992. MpegEncContext *s = &v->s;
  993. GetBitContext *gb = &s->gb;
  994. int i, j;
  995. int subblkpat = 0;
  996. int scale, off, idx, last, skip, value;
  997. int ttblk = ttmb & 7;
  998. int pat = 0;
  999. s->bdsp.clear_block(block);
  1000. if (ttmb == -1) {
  1001. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  1002. }
  1003. if (ttblk == TT_4X4) {
  1004. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  1005. }
  1006. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  1007. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  1008. || (!v->res_rtm_flag && !first_block))) {
  1009. subblkpat = decode012(gb);
  1010. if (subblkpat)
  1011. subblkpat ^= 3; // swap decoded pattern bits
  1012. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  1013. ttblk = TT_8X4;
  1014. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  1015. ttblk = TT_4X8;
  1016. }
  1017. scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
  1018. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  1019. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  1020. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  1021. ttblk = TT_8X4;
  1022. }
  1023. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  1024. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  1025. ttblk = TT_4X8;
  1026. }
  1027. switch (ttblk) {
  1028. case TT_8X8:
  1029. pat = 0xF;
  1030. i = 0;
  1031. last = 0;
  1032. while (!last) {
  1033. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1034. i += skip;
  1035. if (i > 63)
  1036. break;
  1037. if (!v->fcm)
  1038. idx = v->zz_8x8[0][i++];
  1039. else
  1040. idx = v->zzi_8x8[i++];
  1041. block[idx] = value * scale;
  1042. if (!v->pquantizer)
  1043. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1044. }
  1045. if (!skip_block) {
  1046. if (i == 1)
  1047. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  1048. else {
  1049. v->vc1dsp.vc1_inv_trans_8x8(block);
  1050. s->idsp.add_pixels_clamped(block, dst, linesize);
  1051. }
  1052. }
  1053. break;
  1054. case TT_4X4:
  1055. pat = ~subblkpat & 0xF;
  1056. for (j = 0; j < 4; j++) {
  1057. last = subblkpat & (1 << (3 - j));
  1058. i = 0;
  1059. off = (j & 1) * 4 + (j & 2) * 16;
  1060. while (!last) {
  1061. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1062. i += skip;
  1063. if (i > 15)
  1064. break;
  1065. if (!v->fcm)
  1066. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  1067. else
  1068. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  1069. block[idx + off] = value * scale;
  1070. if (!v->pquantizer)
  1071. block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
  1072. }
  1073. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  1074. if (i == 1)
  1075. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1076. else
  1077. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1078. }
  1079. }
  1080. break;
  1081. case TT_8X4:
  1082. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  1083. for (j = 0; j < 2; j++) {
  1084. last = subblkpat & (1 << (1 - j));
  1085. i = 0;
  1086. off = j * 32;
  1087. while (!last) {
  1088. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1089. i += skip;
  1090. if (i > 31)
  1091. break;
  1092. if (!v->fcm)
  1093. idx = v->zz_8x4[i++] + off;
  1094. else
  1095. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  1096. block[idx] = value * scale;
  1097. if (!v->pquantizer)
  1098. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1099. }
  1100. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1101. if (i == 1)
  1102. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  1103. else
  1104. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  1105. }
  1106. }
  1107. break;
  1108. case TT_4X8:
  1109. pat = ~(subblkpat * 5) & 0xF;
  1110. for (j = 0; j < 2; j++) {
  1111. last = subblkpat & (1 << (1 - j));
  1112. i = 0;
  1113. off = j * 4;
  1114. while (!last) {
  1115. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1116. i += skip;
  1117. if (i > 31)
  1118. break;
  1119. if (!v->fcm)
  1120. idx = v->zz_4x8[i++] + off;
  1121. else
  1122. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  1123. block[idx] = value * scale;
  1124. if (!v->pquantizer)
  1125. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1126. }
  1127. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1128. if (i == 1)
  1129. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  1130. else
  1131. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  1132. }
  1133. }
  1134. break;
  1135. }
  1136. if (ttmb_out)
  1137. *ttmb_out |= ttblk << (n * 4);
  1138. return pat;
  1139. }
  1140. /** @} */ // Macroblock group
  1141. static const uint8_t size_table[6] = { 0, 2, 3, 4, 5, 8 };
  1142. /** Decode one P-frame MB
  1143. */
  1144. static int vc1_decode_p_mb(VC1Context *v)
  1145. {
  1146. MpegEncContext *s = &v->s;
  1147. GetBitContext *gb = &s->gb;
  1148. int i, j;
  1149. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1150. int cbp; /* cbp decoding stuff */
  1151. int mqdiff, mquant; /* MB quantization */
  1152. int ttmb = v->ttfrm; /* MB Transform type */
  1153. int mb_has_coeffs = 1; /* last_flag */
  1154. int dmv_x, dmv_y; /* Differential MV components */
  1155. int index, index1; /* LUT indexes */
  1156. int val, sign; /* temp values */
  1157. int first_block = 1;
  1158. int dst_idx, off;
  1159. int skipped, fourmv;
  1160. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  1161. mquant = v->pq; /* lossy initialization */
  1162. if (v->mv_type_is_raw)
  1163. fourmv = get_bits1(gb);
  1164. else
  1165. fourmv = v->mv_type_mb_plane[mb_pos];
  1166. if (v->skip_is_raw)
  1167. skipped = get_bits1(gb);
  1168. else
  1169. skipped = v->s.mbskip_table[mb_pos];
  1170. if (!fourmv) { /* 1MV mode */
  1171. if (!skipped) {
  1172. GET_MVDATA(dmv_x, dmv_y);
  1173. if (s->mb_intra) {
  1174. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1175. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1176. }
  1177. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  1178. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1179. /* FIXME Set DC val for inter block ? */
  1180. if (s->mb_intra && !mb_has_coeffs) {
  1181. GET_MQUANT();
  1182. s->ac_pred = get_bits1(gb);
  1183. cbp = 0;
  1184. } else if (mb_has_coeffs) {
  1185. if (s->mb_intra)
  1186. s->ac_pred = get_bits1(gb);
  1187. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1188. GET_MQUANT();
  1189. } else {
  1190. mquant = v->pq;
  1191. cbp = 0;
  1192. }
  1193. s->current_picture.qscale_table[mb_pos] = mquant;
  1194. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1195. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  1196. VC1_TTMB_VLC_BITS, 2);
  1197. if (!s->mb_intra) ff_vc1_mc_1mv(v, 0);
  1198. dst_idx = 0;
  1199. for (i = 0; i < 6; i++) {
  1200. s->dc_val[0][s->block_index[i]] = 0;
  1201. dst_idx += i >> 2;
  1202. val = ((cbp >> (5 - i)) & 1);
  1203. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1204. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1205. if (s->mb_intra) {
  1206. /* check if prediction blocks A and C are available */
  1207. v->a_avail = v->c_avail = 0;
  1208. if (i == 2 || i == 3 || !s->first_slice_line)
  1209. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1210. if (i == 1 || i == 3 || s->mb_x)
  1211. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1212. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1213. (i & 4) ? v->codingset2 : v->codingset);
  1214. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1215. continue;
  1216. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1217. if (v->rangeredfrm)
  1218. for (j = 0; j < 64; j++)
  1219. s->block[i][j] <<= 1;
  1220. s->idsp.put_signed_pixels_clamped(s->block[i],
  1221. s->dest[dst_idx] + off,
  1222. i & 4 ? s->uvlinesize
  1223. : s->linesize);
  1224. if (v->pq >= 9 && v->overlap) {
  1225. if (v->c_avail)
  1226. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1227. if (v->a_avail)
  1228. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1229. }
  1230. block_cbp |= 0xF << (i << 2);
  1231. block_intra |= 1 << i;
  1232. } else if (val) {
  1233. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
  1234. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  1235. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1236. block_cbp |= pat << (i << 2);
  1237. if (!v->ttmbf && ttmb < 8)
  1238. ttmb = -1;
  1239. first_block = 0;
  1240. }
  1241. }
  1242. } else { // skipped
  1243. s->mb_intra = 0;
  1244. for (i = 0; i < 6; i++) {
  1245. v->mb_type[0][s->block_index[i]] = 0;
  1246. s->dc_val[0][s->block_index[i]] = 0;
  1247. }
  1248. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1249. s->current_picture.qscale_table[mb_pos] = 0;
  1250. ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1251. ff_vc1_mc_1mv(v, 0);
  1252. }
  1253. } else { // 4MV mode
  1254. if (!skipped /* unskipped MB */) {
  1255. int intra_count = 0, coded_inter = 0;
  1256. int is_intra[6], is_coded[6];
  1257. /* Get CBPCY */
  1258. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1259. for (i = 0; i < 6; i++) {
  1260. val = ((cbp >> (5 - i)) & 1);
  1261. s->dc_val[0][s->block_index[i]] = 0;
  1262. s->mb_intra = 0;
  1263. if (i < 4) {
  1264. dmv_x = dmv_y = 0;
  1265. s->mb_intra = 0;
  1266. mb_has_coeffs = 0;
  1267. if (val) {
  1268. GET_MVDATA(dmv_x, dmv_y);
  1269. }
  1270. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1271. if (!s->mb_intra)
  1272. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1273. intra_count += s->mb_intra;
  1274. is_intra[i] = s->mb_intra;
  1275. is_coded[i] = mb_has_coeffs;
  1276. }
  1277. if (i & 4) {
  1278. is_intra[i] = (intra_count >= 3);
  1279. is_coded[i] = val;
  1280. }
  1281. if (i == 4)
  1282. ff_vc1_mc_4mv_chroma(v, 0);
  1283. v->mb_type[0][s->block_index[i]] = is_intra[i];
  1284. if (!coded_inter)
  1285. coded_inter = !is_intra[i] & is_coded[i];
  1286. }
  1287. // if there are no coded blocks then don't do anything more
  1288. dst_idx = 0;
  1289. if (!intra_count && !coded_inter)
  1290. goto end;
  1291. GET_MQUANT();
  1292. s->current_picture.qscale_table[mb_pos] = mquant;
  1293. /* test if block is intra and has pred */
  1294. {
  1295. int intrapred = 0;
  1296. for (i = 0; i < 6; i++)
  1297. if (is_intra[i]) {
  1298. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  1299. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  1300. intrapred = 1;
  1301. break;
  1302. }
  1303. }
  1304. if (intrapred)
  1305. s->ac_pred = get_bits1(gb);
  1306. else
  1307. s->ac_pred = 0;
  1308. }
  1309. if (!v->ttmbf && coded_inter)
  1310. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1311. for (i = 0; i < 6; i++) {
  1312. dst_idx += i >> 2;
  1313. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1314. s->mb_intra = is_intra[i];
  1315. if (is_intra[i]) {
  1316. /* check if prediction blocks A and C are available */
  1317. v->a_avail = v->c_avail = 0;
  1318. if (i == 2 || i == 3 || !s->first_slice_line)
  1319. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1320. if (i == 1 || i == 3 || s->mb_x)
  1321. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1322. vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
  1323. (i & 4) ? v->codingset2 : v->codingset);
  1324. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1325. continue;
  1326. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1327. if (v->rangeredfrm)
  1328. for (j = 0; j < 64; j++)
  1329. s->block[i][j] <<= 1;
  1330. s->idsp.put_signed_pixels_clamped(s->block[i],
  1331. s->dest[dst_idx] + off,
  1332. (i & 4) ? s->uvlinesize
  1333. : s->linesize);
  1334. if (v->pq >= 9 && v->overlap) {
  1335. if (v->c_avail)
  1336. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1337. if (v->a_avail)
  1338. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1339. }
  1340. block_cbp |= 0xF << (i << 2);
  1341. block_intra |= 1 << i;
  1342. } else if (is_coded[i]) {
  1343. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1344. first_block, s->dest[dst_idx] + off,
  1345. (i & 4) ? s->uvlinesize : s->linesize,
  1346. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1347. &block_tt);
  1348. block_cbp |= pat << (i << 2);
  1349. if (!v->ttmbf && ttmb < 8)
  1350. ttmb = -1;
  1351. first_block = 0;
  1352. }
  1353. }
  1354. } else { // skipped MB
  1355. s->mb_intra = 0;
  1356. s->current_picture.qscale_table[mb_pos] = 0;
  1357. for (i = 0; i < 6; i++) {
  1358. v->mb_type[0][s->block_index[i]] = 0;
  1359. s->dc_val[0][s->block_index[i]] = 0;
  1360. }
  1361. for (i = 0; i < 4; i++) {
  1362. ff_vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1363. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1364. }
  1365. ff_vc1_mc_4mv_chroma(v, 0);
  1366. s->current_picture.qscale_table[mb_pos] = 0;
  1367. }
  1368. }
  1369. end:
  1370. v->cbp[s->mb_x] = block_cbp;
  1371. v->ttblk[s->mb_x] = block_tt;
  1372. v->is_intra[s->mb_x] = block_intra;
  1373. return 0;
  1374. }
  1375. /* Decode one macroblock in an interlaced frame p picture */
  1376. static int vc1_decode_p_mb_intfr(VC1Context *v)
  1377. {
  1378. MpegEncContext *s = &v->s;
  1379. GetBitContext *gb = &s->gb;
  1380. int i;
  1381. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1382. int cbp = 0; /* cbp decoding stuff */
  1383. int mqdiff, mquant; /* MB quantization */
  1384. int ttmb = v->ttfrm; /* MB Transform type */
  1385. int mb_has_coeffs = 1; /* last_flag */
  1386. int dmv_x, dmv_y; /* Differential MV components */
  1387. int val; /* temp value */
  1388. int first_block = 1;
  1389. int dst_idx, off;
  1390. int skipped, fourmv = 0, twomv = 0;
  1391. int block_cbp = 0, pat, block_tt = 0;
  1392. int idx_mbmode = 0, mvbp;
  1393. int stride_y, fieldtx;
  1394. mquant = v->pq; /* Lossy initialization */
  1395. if (v->skip_is_raw)
  1396. skipped = get_bits1(gb);
  1397. else
  1398. skipped = v->s.mbskip_table[mb_pos];
  1399. if (!skipped) {
  1400. if (v->fourmvswitch)
  1401. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  1402. else
  1403. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  1404. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  1405. /* store the motion vector type in a flag (useful later) */
  1406. case MV_PMODE_INTFR_4MV:
  1407. fourmv = 1;
  1408. v->blk_mv_type[s->block_index[0]] = 0;
  1409. v->blk_mv_type[s->block_index[1]] = 0;
  1410. v->blk_mv_type[s->block_index[2]] = 0;
  1411. v->blk_mv_type[s->block_index[3]] = 0;
  1412. break;
  1413. case MV_PMODE_INTFR_4MV_FIELD:
  1414. fourmv = 1;
  1415. v->blk_mv_type[s->block_index[0]] = 1;
  1416. v->blk_mv_type[s->block_index[1]] = 1;
  1417. v->blk_mv_type[s->block_index[2]] = 1;
  1418. v->blk_mv_type[s->block_index[3]] = 1;
  1419. break;
  1420. case MV_PMODE_INTFR_2MV_FIELD:
  1421. twomv = 1;
  1422. v->blk_mv_type[s->block_index[0]] = 1;
  1423. v->blk_mv_type[s->block_index[1]] = 1;
  1424. v->blk_mv_type[s->block_index[2]] = 1;
  1425. v->blk_mv_type[s->block_index[3]] = 1;
  1426. break;
  1427. case MV_PMODE_INTFR_1MV:
  1428. v->blk_mv_type[s->block_index[0]] = 0;
  1429. v->blk_mv_type[s->block_index[1]] = 0;
  1430. v->blk_mv_type[s->block_index[2]] = 0;
  1431. v->blk_mv_type[s->block_index[3]] = 0;
  1432. break;
  1433. }
  1434. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  1435. for (i = 0; i < 4; i++) {
  1436. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  1437. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  1438. }
  1439. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1440. s->mb_intra = 1;
  1441. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  1442. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  1443. mb_has_coeffs = get_bits1(gb);
  1444. if (mb_has_coeffs)
  1445. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1446. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1447. GET_MQUANT();
  1448. s->current_picture.qscale_table[mb_pos] = mquant;
  1449. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1450. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1451. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1452. dst_idx = 0;
  1453. for (i = 0; i < 6; i++) {
  1454. v->a_avail = v->c_avail = 0;
  1455. v->mb_type[0][s->block_index[i]] = 1;
  1456. s->dc_val[0][s->block_index[i]] = 0;
  1457. dst_idx += i >> 2;
  1458. val = ((cbp >> (5 - i)) & 1);
  1459. if (i == 2 || i == 3 || !s->first_slice_line)
  1460. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1461. if (i == 1 || i == 3 || s->mb_x)
  1462. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1463. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1464. (i & 4) ? v->codingset2 : v->codingset);
  1465. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1466. continue;
  1467. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1468. if (i < 4) {
  1469. stride_y = s->linesize << fieldtx;
  1470. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  1471. } else {
  1472. stride_y = s->uvlinesize;
  1473. off = 0;
  1474. }
  1475. s->idsp.put_signed_pixels_clamped(s->block[i],
  1476. s->dest[dst_idx] + off,
  1477. stride_y);
  1478. //TODO: loop filter
  1479. }
  1480. } else { // inter MB
  1481. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  1482. if (mb_has_coeffs)
  1483. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1484. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  1485. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  1486. } else {
  1487. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  1488. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  1489. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1490. }
  1491. }
  1492. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1493. for (i = 0; i < 6; i++)
  1494. v->mb_type[0][s->block_index[i]] = 0;
  1495. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  1496. /* for all motion vector read MVDATA and motion compensate each block */
  1497. dst_idx = 0;
  1498. if (fourmv) {
  1499. mvbp = v->fourmvbp;
  1500. for (i = 0; i < 4; i++) {
  1501. dmv_x = dmv_y = 0;
  1502. if (mvbp & (8 >> i))
  1503. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1504. ff_vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  1505. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1506. }
  1507. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1508. } else if (twomv) {
  1509. mvbp = v->twomvbp;
  1510. dmv_x = dmv_y = 0;
  1511. if (mvbp & 2) {
  1512. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1513. }
  1514. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1515. ff_vc1_mc_4mv_luma(v, 0, 0, 0);
  1516. ff_vc1_mc_4mv_luma(v, 1, 0, 0);
  1517. dmv_x = dmv_y = 0;
  1518. if (mvbp & 1) {
  1519. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1520. }
  1521. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1522. ff_vc1_mc_4mv_luma(v, 2, 0, 0);
  1523. ff_vc1_mc_4mv_luma(v, 3, 0, 0);
  1524. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1525. } else {
  1526. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  1527. dmv_x = dmv_y = 0;
  1528. if (mvbp) {
  1529. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1530. }
  1531. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1532. ff_vc1_mc_1mv(v, 0);
  1533. }
  1534. if (cbp)
  1535. GET_MQUANT(); // p. 227
  1536. s->current_picture.qscale_table[mb_pos] = mquant;
  1537. if (!v->ttmbf && cbp)
  1538. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1539. for (i = 0; i < 6; i++) {
  1540. s->dc_val[0][s->block_index[i]] = 0;
  1541. dst_idx += i >> 2;
  1542. val = ((cbp >> (5 - i)) & 1);
  1543. if (!fieldtx)
  1544. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1545. else
  1546. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  1547. if (val) {
  1548. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1549. first_block, s->dest[dst_idx] + off,
  1550. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  1551. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1552. block_cbp |= pat << (i << 2);
  1553. if (!v->ttmbf && ttmb < 8)
  1554. ttmb = -1;
  1555. first_block = 0;
  1556. }
  1557. }
  1558. }
  1559. } else { // skipped
  1560. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1561. for (i = 0; i < 6; i++) {
  1562. v->mb_type[0][s->block_index[i]] = 0;
  1563. s->dc_val[0][s->block_index[i]] = 0;
  1564. }
  1565. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1566. s->current_picture.qscale_table[mb_pos] = 0;
  1567. v->blk_mv_type[s->block_index[0]] = 0;
  1568. v->blk_mv_type[s->block_index[1]] = 0;
  1569. v->blk_mv_type[s->block_index[2]] = 0;
  1570. v->blk_mv_type[s->block_index[3]] = 0;
  1571. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1572. ff_vc1_mc_1mv(v, 0);
  1573. }
  1574. if (s->mb_x == s->mb_width - 1)
  1575. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
  1576. return 0;
  1577. }
  1578. static int vc1_decode_p_mb_intfi(VC1Context *v)
  1579. {
  1580. MpegEncContext *s = &v->s;
  1581. GetBitContext *gb = &s->gb;
  1582. int i;
  1583. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1584. int cbp = 0; /* cbp decoding stuff */
  1585. int mqdiff, mquant; /* MB quantization */
  1586. int ttmb = v->ttfrm; /* MB Transform type */
  1587. int mb_has_coeffs = 1; /* last_flag */
  1588. int dmv_x, dmv_y; /* Differential MV components */
  1589. int val; /* temp values */
  1590. int first_block = 1;
  1591. int dst_idx, off;
  1592. int pred_flag = 0;
  1593. int block_cbp = 0, pat, block_tt = 0;
  1594. int idx_mbmode = 0;
  1595. mquant = v->pq; /* Lossy initialization */
  1596. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1597. if (idx_mbmode <= 1) { // intra MB
  1598. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1599. s->mb_intra = 1;
  1600. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  1601. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  1602. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1603. GET_MQUANT();
  1604. s->current_picture.qscale_table[mb_pos] = mquant;
  1605. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1606. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1607. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1608. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1609. mb_has_coeffs = idx_mbmode & 1;
  1610. if (mb_has_coeffs)
  1611. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1612. dst_idx = 0;
  1613. for (i = 0; i < 6; i++) {
  1614. v->a_avail = v->c_avail = 0;
  1615. v->mb_type[0][s->block_index[i]] = 1;
  1616. s->dc_val[0][s->block_index[i]] = 0;
  1617. dst_idx += i >> 2;
  1618. val = ((cbp >> (5 - i)) & 1);
  1619. if (i == 2 || i == 3 || !s->first_slice_line)
  1620. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1621. if (i == 1 || i == 3 || s->mb_x)
  1622. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1623. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1624. (i & 4) ? v->codingset2 : v->codingset);
  1625. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1626. continue;
  1627. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1628. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1629. s->idsp.put_signed_pixels_clamped(s->block[i],
  1630. s->dest[dst_idx] + off,
  1631. (i & 4) ? s->uvlinesize
  1632. : s->linesize);
  1633. // TODO: loop filter
  1634. }
  1635. } else {
  1636. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1637. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1638. for (i = 0; i < 6; i++)
  1639. v->mb_type[0][s->block_index[i]] = 0;
  1640. if (idx_mbmode <= 5) { // 1-MV
  1641. dmv_x = dmv_y = pred_flag = 0;
  1642. if (idx_mbmode & 1) {
  1643. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1644. }
  1645. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1646. ff_vc1_mc_1mv(v, 0);
  1647. mb_has_coeffs = !(idx_mbmode & 2);
  1648. } else { // 4-MV
  1649. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1650. for (i = 0; i < 4; i++) {
  1651. dmv_x = dmv_y = pred_flag = 0;
  1652. if (v->fourmvbp & (8 >> i))
  1653. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1654. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1655. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1656. }
  1657. ff_vc1_mc_4mv_chroma(v, 0);
  1658. mb_has_coeffs = idx_mbmode & 1;
  1659. }
  1660. if (mb_has_coeffs)
  1661. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1662. if (cbp) {
  1663. GET_MQUANT();
  1664. }
  1665. s->current_picture.qscale_table[mb_pos] = mquant;
  1666. if (!v->ttmbf && cbp) {
  1667. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1668. }
  1669. dst_idx = 0;
  1670. for (i = 0; i < 6; i++) {
  1671. s->dc_val[0][s->block_index[i]] = 0;
  1672. dst_idx += i >> 2;
  1673. val = ((cbp >> (5 - i)) & 1);
  1674. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1675. if (val) {
  1676. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1677. first_block, s->dest[dst_idx] + off,
  1678. (i & 4) ? s->uvlinesize : s->linesize,
  1679. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1680. &block_tt);
  1681. block_cbp |= pat << (i << 2);
  1682. if (!v->ttmbf && ttmb < 8)
  1683. ttmb = -1;
  1684. first_block = 0;
  1685. }
  1686. }
  1687. }
  1688. if (s->mb_x == s->mb_width - 1)
  1689. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  1690. return 0;
  1691. }
  1692. /** Decode one B-frame MB (in Main profile)
  1693. */
  1694. static void vc1_decode_b_mb(VC1Context *v)
  1695. {
  1696. MpegEncContext *s = &v->s;
  1697. GetBitContext *gb = &s->gb;
  1698. int i, j;
  1699. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1700. int cbp = 0; /* cbp decoding stuff */
  1701. int mqdiff, mquant; /* MB quantization */
  1702. int ttmb = v->ttfrm; /* MB Transform type */
  1703. int mb_has_coeffs = 0; /* last_flag */
  1704. int index, index1; /* LUT indexes */
  1705. int val, sign; /* temp values */
  1706. int first_block = 1;
  1707. int dst_idx, off;
  1708. int skipped, direct;
  1709. int dmv_x[2], dmv_y[2];
  1710. int bmvtype = BMV_TYPE_BACKWARD;
  1711. mquant = v->pq; /* lossy initialization */
  1712. s->mb_intra = 0;
  1713. if (v->dmb_is_raw)
  1714. direct = get_bits1(gb);
  1715. else
  1716. direct = v->direct_mb_plane[mb_pos];
  1717. if (v->skip_is_raw)
  1718. skipped = get_bits1(gb);
  1719. else
  1720. skipped = v->s.mbskip_table[mb_pos];
  1721. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1722. for (i = 0; i < 6; i++) {
  1723. v->mb_type[0][s->block_index[i]] = 0;
  1724. s->dc_val[0][s->block_index[i]] = 0;
  1725. }
  1726. s->current_picture.qscale_table[mb_pos] = 0;
  1727. if (!direct) {
  1728. if (!skipped) {
  1729. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1730. dmv_x[1] = dmv_x[0];
  1731. dmv_y[1] = dmv_y[0];
  1732. }
  1733. if (skipped || !s->mb_intra) {
  1734. bmvtype = decode012(gb);
  1735. switch (bmvtype) {
  1736. case 0:
  1737. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  1738. break;
  1739. case 1:
  1740. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  1741. break;
  1742. case 2:
  1743. bmvtype = BMV_TYPE_INTERPOLATED;
  1744. dmv_x[0] = dmv_y[0] = 0;
  1745. }
  1746. }
  1747. }
  1748. for (i = 0; i < 6; i++)
  1749. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1750. if (skipped) {
  1751. if (direct)
  1752. bmvtype = BMV_TYPE_INTERPOLATED;
  1753. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1754. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1755. return;
  1756. }
  1757. if (direct) {
  1758. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1759. GET_MQUANT();
  1760. s->mb_intra = 0;
  1761. s->current_picture.qscale_table[mb_pos] = mquant;
  1762. if (!v->ttmbf)
  1763. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1764. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  1765. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1766. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1767. } else {
  1768. if (!mb_has_coeffs && !s->mb_intra) {
  1769. /* no coded blocks - effectively skipped */
  1770. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1771. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1772. return;
  1773. }
  1774. if (s->mb_intra && !mb_has_coeffs) {
  1775. GET_MQUANT();
  1776. s->current_picture.qscale_table[mb_pos] = mquant;
  1777. s->ac_pred = get_bits1(gb);
  1778. cbp = 0;
  1779. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1780. } else {
  1781. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  1782. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1783. if (!mb_has_coeffs) {
  1784. /* interpolated skipped block */
  1785. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1786. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1787. return;
  1788. }
  1789. }
  1790. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1791. if (!s->mb_intra) {
  1792. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1793. }
  1794. if (s->mb_intra)
  1795. s->ac_pred = get_bits1(gb);
  1796. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1797. GET_MQUANT();
  1798. s->current_picture.qscale_table[mb_pos] = mquant;
  1799. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1800. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1801. }
  1802. }
  1803. dst_idx = 0;
  1804. for (i = 0; i < 6; i++) {
  1805. s->dc_val[0][s->block_index[i]] = 0;
  1806. dst_idx += i >> 2;
  1807. val = ((cbp >> (5 - i)) & 1);
  1808. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1809. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1810. if (s->mb_intra) {
  1811. /* check if prediction blocks A and C are available */
  1812. v->a_avail = v->c_avail = 0;
  1813. if (i == 2 || i == 3 || !s->first_slice_line)
  1814. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1815. if (i == 1 || i == 3 || s->mb_x)
  1816. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1817. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1818. (i & 4) ? v->codingset2 : v->codingset);
  1819. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1820. continue;
  1821. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1822. if (v->rangeredfrm)
  1823. for (j = 0; j < 64; j++)
  1824. s->block[i][j] <<= 1;
  1825. s->idsp.put_signed_pixels_clamped(s->block[i],
  1826. s->dest[dst_idx] + off,
  1827. i & 4 ? s->uvlinesize
  1828. : s->linesize);
  1829. } else if (val) {
  1830. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1831. first_block, s->dest[dst_idx] + off,
  1832. (i & 4) ? s->uvlinesize : s->linesize,
  1833. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  1834. if (!v->ttmbf && ttmb < 8)
  1835. ttmb = -1;
  1836. first_block = 0;
  1837. }
  1838. }
  1839. }
  1840. /** Decode one B-frame MB (in interlaced field B picture)
  1841. */
  1842. static void vc1_decode_b_mb_intfi(VC1Context *v)
  1843. {
  1844. MpegEncContext *s = &v->s;
  1845. GetBitContext *gb = &s->gb;
  1846. int i, j;
  1847. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1848. int cbp = 0; /* cbp decoding stuff */
  1849. int mqdiff, mquant; /* MB quantization */
  1850. int ttmb = v->ttfrm; /* MB Transform type */
  1851. int mb_has_coeffs = 0; /* last_flag */
  1852. int val; /* temp value */
  1853. int first_block = 1;
  1854. int dst_idx, off;
  1855. int fwd;
  1856. int dmv_x[2], dmv_y[2], pred_flag[2];
  1857. int bmvtype = BMV_TYPE_BACKWARD;
  1858. int idx_mbmode;
  1859. mquant = v->pq; /* Lossy initialization */
  1860. s->mb_intra = 0;
  1861. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1862. if (idx_mbmode <= 1) { // intra MB
  1863. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1864. s->mb_intra = 1;
  1865. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1866. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1867. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1868. GET_MQUANT();
  1869. s->current_picture.qscale_table[mb_pos] = mquant;
  1870. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1871. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1872. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1873. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1874. mb_has_coeffs = idx_mbmode & 1;
  1875. if (mb_has_coeffs)
  1876. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1877. dst_idx = 0;
  1878. for (i = 0; i < 6; i++) {
  1879. v->a_avail = v->c_avail = 0;
  1880. v->mb_type[0][s->block_index[i]] = 1;
  1881. s->dc_val[0][s->block_index[i]] = 0;
  1882. dst_idx += i >> 2;
  1883. val = ((cbp >> (5 - i)) & 1);
  1884. if (i == 2 || i == 3 || !s->first_slice_line)
  1885. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1886. if (i == 1 || i == 3 || s->mb_x)
  1887. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1888. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1889. (i & 4) ? v->codingset2 : v->codingset);
  1890. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1891. continue;
  1892. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1893. if (v->rangeredfrm)
  1894. for (j = 0; j < 64; j++)
  1895. s->block[i][j] <<= 1;
  1896. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1897. s->idsp.put_signed_pixels_clamped(s->block[i],
  1898. s->dest[dst_idx] + off,
  1899. (i & 4) ? s->uvlinesize
  1900. : s->linesize);
  1901. // TODO: yet to perform loop filter
  1902. }
  1903. } else {
  1904. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1905. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1906. for (i = 0; i < 6; i++)
  1907. v->mb_type[0][s->block_index[i]] = 0;
  1908. if (v->fmb_is_raw)
  1909. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  1910. else
  1911. fwd = v->forward_mb_plane[mb_pos];
  1912. if (idx_mbmode <= 5) { // 1-MV
  1913. int interpmvp = 0;
  1914. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1915. pred_flag[0] = pred_flag[1] = 0;
  1916. if (fwd)
  1917. bmvtype = BMV_TYPE_FORWARD;
  1918. else {
  1919. bmvtype = decode012(gb);
  1920. switch (bmvtype) {
  1921. case 0:
  1922. bmvtype = BMV_TYPE_BACKWARD;
  1923. break;
  1924. case 1:
  1925. bmvtype = BMV_TYPE_DIRECT;
  1926. break;
  1927. case 2:
  1928. bmvtype = BMV_TYPE_INTERPOLATED;
  1929. interpmvp = get_bits1(gb);
  1930. }
  1931. }
  1932. v->bmvtype = bmvtype;
  1933. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  1934. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1935. }
  1936. if (interpmvp) {
  1937. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  1938. }
  1939. if (bmvtype == BMV_TYPE_DIRECT) {
  1940. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1941. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  1942. if (!s->next_picture_ptr->field_picture) {
  1943. av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
  1944. return;
  1945. }
  1946. }
  1947. ff_vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  1948. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  1949. mb_has_coeffs = !(idx_mbmode & 2);
  1950. } else { // 4-MV
  1951. if (fwd)
  1952. bmvtype = BMV_TYPE_FORWARD;
  1953. v->bmvtype = bmvtype;
  1954. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1955. for (i = 0; i < 4; i++) {
  1956. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1957. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  1958. if (v->fourmvbp & (8 >> i)) {
  1959. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  1960. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  1961. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1962. }
  1963. ff_vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  1964. ff_vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  1965. }
  1966. ff_vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  1967. mb_has_coeffs = idx_mbmode & 1;
  1968. }
  1969. if (mb_has_coeffs)
  1970. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1971. if (cbp) {
  1972. GET_MQUANT();
  1973. }
  1974. s->current_picture.qscale_table[mb_pos] = mquant;
  1975. if (!v->ttmbf && cbp) {
  1976. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1977. }
  1978. dst_idx = 0;
  1979. for (i = 0; i < 6; i++) {
  1980. s->dc_val[0][s->block_index[i]] = 0;
  1981. dst_idx += i >> 2;
  1982. val = ((cbp >> (5 - i)) & 1);
  1983. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1984. if (val) {
  1985. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1986. first_block, s->dest[dst_idx] + off,
  1987. (i & 4) ? s->uvlinesize : s->linesize,
  1988. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  1989. if (!v->ttmbf && ttmb < 8)
  1990. ttmb = -1;
  1991. first_block = 0;
  1992. }
  1993. }
  1994. }
  1995. }
  1996. /** Decode one B-frame MB (in interlaced frame B picture)
  1997. */
  1998. static int vc1_decode_b_mb_intfr(VC1Context *v)
  1999. {
  2000. MpegEncContext *s = &v->s;
  2001. GetBitContext *gb = &s->gb;
  2002. int i, j;
  2003. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2004. int cbp = 0; /* cbp decoding stuff */
  2005. int mqdiff, mquant; /* MB quantization */
  2006. int ttmb = v->ttfrm; /* MB Transform type */
  2007. int mvsw = 0; /* motion vector switch */
  2008. int mb_has_coeffs = 1; /* last_flag */
  2009. int dmv_x, dmv_y; /* Differential MV components */
  2010. int val; /* temp value */
  2011. int first_block = 1;
  2012. int dst_idx, off;
  2013. int skipped, direct, twomv = 0;
  2014. int block_cbp = 0, pat, block_tt = 0;
  2015. int idx_mbmode = 0, mvbp;
  2016. int stride_y, fieldtx;
  2017. int bmvtype = BMV_TYPE_BACKWARD;
  2018. int dir, dir2;
  2019. mquant = v->pq; /* Lossy initialization */
  2020. s->mb_intra = 0;
  2021. if (v->skip_is_raw)
  2022. skipped = get_bits1(gb);
  2023. else
  2024. skipped = v->s.mbskip_table[mb_pos];
  2025. if (!skipped) {
  2026. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  2027. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  2028. twomv = 1;
  2029. v->blk_mv_type[s->block_index[0]] = 1;
  2030. v->blk_mv_type[s->block_index[1]] = 1;
  2031. v->blk_mv_type[s->block_index[2]] = 1;
  2032. v->blk_mv_type[s->block_index[3]] = 1;
  2033. } else {
  2034. v->blk_mv_type[s->block_index[0]] = 0;
  2035. v->blk_mv_type[s->block_index[1]] = 0;
  2036. v->blk_mv_type[s->block_index[2]] = 0;
  2037. v->blk_mv_type[s->block_index[3]] = 0;
  2038. }
  2039. }
  2040. if (v->dmb_is_raw)
  2041. direct = get_bits1(gb);
  2042. else
  2043. direct = v->direct_mb_plane[mb_pos];
  2044. if (direct) {
  2045. if (s->next_picture_ptr->field_picture)
  2046. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  2047. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  2048. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  2049. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  2050. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  2051. if (twomv) {
  2052. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  2053. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  2054. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  2055. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  2056. for (i = 1; i < 4; i += 2) {
  2057. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  2058. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  2059. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  2060. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  2061. }
  2062. } else {
  2063. for (i = 1; i < 4; i++) {
  2064. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  2065. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  2066. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  2067. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  2068. }
  2069. }
  2070. }
  2071. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  2072. for (i = 0; i < 4; i++) {
  2073. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  2074. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  2075. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2076. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2077. }
  2078. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  2079. s->mb_intra = 1;
  2080. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2081. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  2082. mb_has_coeffs = get_bits1(gb);
  2083. if (mb_has_coeffs)
  2084. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2085. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  2086. GET_MQUANT();
  2087. s->current_picture.qscale_table[mb_pos] = mquant;
  2088. /* Set DC scale - y and c use the same (not sure if necessary here) */
  2089. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2090. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2091. dst_idx = 0;
  2092. for (i = 0; i < 6; i++) {
  2093. v->a_avail = v->c_avail = 0;
  2094. v->mb_type[0][s->block_index[i]] = 1;
  2095. s->dc_val[0][s->block_index[i]] = 0;
  2096. dst_idx += i >> 2;
  2097. val = ((cbp >> (5 - i)) & 1);
  2098. if (i == 2 || i == 3 || !s->first_slice_line)
  2099. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  2100. if (i == 1 || i == 3 || s->mb_x)
  2101. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  2102. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  2103. (i & 4) ? v->codingset2 : v->codingset);
  2104. if (CONFIG_GRAY && i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2105. continue;
  2106. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  2107. if (i < 4) {
  2108. stride_y = s->linesize << fieldtx;
  2109. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  2110. } else {
  2111. stride_y = s->uvlinesize;
  2112. off = 0;
  2113. }
  2114. s->idsp.put_signed_pixels_clamped(s->block[i],
  2115. s->dest[dst_idx] + off,
  2116. stride_y);
  2117. }
  2118. } else {
  2119. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2120. if (!direct) {
  2121. if (skipped || !s->mb_intra) {
  2122. bmvtype = decode012(gb);
  2123. switch (bmvtype) {
  2124. case 0:
  2125. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  2126. break;
  2127. case 1:
  2128. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  2129. break;
  2130. case 2:
  2131. bmvtype = BMV_TYPE_INTERPOLATED;
  2132. }
  2133. }
  2134. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  2135. mvsw = get_bits1(gb);
  2136. }
  2137. if (!skipped) { // inter MB
  2138. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  2139. if (mb_has_coeffs)
  2140. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2141. if (!direct) {
  2142. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  2143. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2144. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  2145. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  2146. }
  2147. }
  2148. for (i = 0; i < 6; i++)
  2149. v->mb_type[0][s->block_index[i]] = 0;
  2150. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  2151. /* for all motion vector read MVDATA and motion compensate each block */
  2152. dst_idx = 0;
  2153. if (direct) {
  2154. if (twomv) {
  2155. for (i = 0; i < 4; i++) {
  2156. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  2157. ff_vc1_mc_4mv_luma(v, i, 1, 1);
  2158. }
  2159. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2160. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2161. } else {
  2162. ff_vc1_mc_1mv(v, 0);
  2163. ff_vc1_interp_mc(v);
  2164. }
  2165. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  2166. mvbp = v->fourmvbp;
  2167. for (i = 0; i < 4; i++) {
  2168. dir = i==1 || i==3;
  2169. dmv_x = dmv_y = 0;
  2170. val = ((mvbp >> (3 - i)) & 1);
  2171. if (val)
  2172. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2173. j = i > 1 ? 2 : 0;
  2174. ff_vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2175. ff_vc1_mc_4mv_luma(v, j, dir, dir);
  2176. ff_vc1_mc_4mv_luma(v, j+1, dir, dir);
  2177. }
  2178. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2179. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2180. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2181. mvbp = v->twomvbp;
  2182. dmv_x = dmv_y = 0;
  2183. if (mvbp & 2)
  2184. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2185. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2186. ff_vc1_mc_1mv(v, 0);
  2187. dmv_x = dmv_y = 0;
  2188. if (mvbp & 1)
  2189. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2190. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2191. ff_vc1_interp_mc(v);
  2192. } else if (twomv) {
  2193. dir = bmvtype == BMV_TYPE_BACKWARD;
  2194. dir2 = dir;
  2195. if (mvsw)
  2196. dir2 = !dir;
  2197. mvbp = v->twomvbp;
  2198. dmv_x = dmv_y = 0;
  2199. if (mvbp & 2)
  2200. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2201. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2202. dmv_x = dmv_y = 0;
  2203. if (mvbp & 1)
  2204. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2205. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  2206. if (mvsw) {
  2207. for (i = 0; i < 2; i++) {
  2208. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2209. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2210. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2211. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2212. }
  2213. } else {
  2214. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2215. ff_vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2216. }
  2217. ff_vc1_mc_4mv_luma(v, 0, dir, 0);
  2218. ff_vc1_mc_4mv_luma(v, 1, dir, 0);
  2219. ff_vc1_mc_4mv_luma(v, 2, dir2, 0);
  2220. ff_vc1_mc_4mv_luma(v, 3, dir2, 0);
  2221. ff_vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  2222. } else {
  2223. dir = bmvtype == BMV_TYPE_BACKWARD;
  2224. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  2225. dmv_x = dmv_y = 0;
  2226. if (mvbp)
  2227. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2228. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2229. v->blk_mv_type[s->block_index[0]] = 1;
  2230. v->blk_mv_type[s->block_index[1]] = 1;
  2231. v->blk_mv_type[s->block_index[2]] = 1;
  2232. v->blk_mv_type[s->block_index[3]] = 1;
  2233. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2234. for (i = 0; i < 2; i++) {
  2235. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2236. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2237. }
  2238. ff_vc1_mc_1mv(v, dir);
  2239. }
  2240. if (cbp)
  2241. GET_MQUANT(); // p. 227
  2242. s->current_picture.qscale_table[mb_pos] = mquant;
  2243. if (!v->ttmbf && cbp)
  2244. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2245. for (i = 0; i < 6; i++) {
  2246. s->dc_val[0][s->block_index[i]] = 0;
  2247. dst_idx += i >> 2;
  2248. val = ((cbp >> (5 - i)) & 1);
  2249. if (!fieldtx)
  2250. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  2251. else
  2252. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  2253. if (val) {
  2254. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2255. first_block, s->dest[dst_idx] + off,
  2256. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  2257. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  2258. block_cbp |= pat << (i << 2);
  2259. if (!v->ttmbf && ttmb < 8)
  2260. ttmb = -1;
  2261. first_block = 0;
  2262. }
  2263. }
  2264. } else { // skipped
  2265. dir = 0;
  2266. for (i = 0; i < 6; i++) {
  2267. v->mb_type[0][s->block_index[i]] = 0;
  2268. s->dc_val[0][s->block_index[i]] = 0;
  2269. }
  2270. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  2271. s->current_picture.qscale_table[mb_pos] = 0;
  2272. v->blk_mv_type[s->block_index[0]] = 0;
  2273. v->blk_mv_type[s->block_index[1]] = 0;
  2274. v->blk_mv_type[s->block_index[2]] = 0;
  2275. v->blk_mv_type[s->block_index[3]] = 0;
  2276. if (!direct) {
  2277. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2278. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2279. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2280. } else {
  2281. dir = bmvtype == BMV_TYPE_BACKWARD;
  2282. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2283. if (mvsw) {
  2284. int dir2 = dir;
  2285. if (mvsw)
  2286. dir2 = !dir;
  2287. for (i = 0; i < 2; i++) {
  2288. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2289. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2290. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2291. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2292. }
  2293. } else {
  2294. v->blk_mv_type[s->block_index[0]] = 1;
  2295. v->blk_mv_type[s->block_index[1]] = 1;
  2296. v->blk_mv_type[s->block_index[2]] = 1;
  2297. v->blk_mv_type[s->block_index[3]] = 1;
  2298. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2299. for (i = 0; i < 2; i++) {
  2300. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2301. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2302. }
  2303. }
  2304. }
  2305. }
  2306. ff_vc1_mc_1mv(v, dir);
  2307. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  2308. ff_vc1_interp_mc(v);
  2309. }
  2310. }
  2311. }
  2312. if (s->mb_x == s->mb_width - 1)
  2313. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2314. v->cbp[s->mb_x] = block_cbp;
  2315. v->ttblk[s->mb_x] = block_tt;
  2316. return 0;
  2317. }
  2318. /** Decode blocks of I-frame
  2319. */
  2320. static void vc1_decode_i_blocks(VC1Context *v)
  2321. {
  2322. int k, j;
  2323. MpegEncContext *s = &v->s;
  2324. int cbp, val;
  2325. uint8_t *coded_val;
  2326. int mb_pos;
  2327. /* select codingmode used for VLC tables selection */
  2328. switch (v->y_ac_table_index) {
  2329. case 0:
  2330. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2331. break;
  2332. case 1:
  2333. v->codingset = CS_HIGH_MOT_INTRA;
  2334. break;
  2335. case 2:
  2336. v->codingset = CS_MID_RATE_INTRA;
  2337. break;
  2338. }
  2339. switch (v->c_ac_table_index) {
  2340. case 0:
  2341. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2342. break;
  2343. case 1:
  2344. v->codingset2 = CS_HIGH_MOT_INTER;
  2345. break;
  2346. case 2:
  2347. v->codingset2 = CS_MID_RATE_INTER;
  2348. break;
  2349. }
  2350. /* Set DC scale - y and c use the same */
  2351. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  2352. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  2353. //do frame decode
  2354. s->mb_x = s->mb_y = 0;
  2355. s->mb_intra = 1;
  2356. s->first_slice_line = 1;
  2357. for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
  2358. s->mb_x = 0;
  2359. init_block_index(v);
  2360. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  2361. uint8_t *dst[6];
  2362. ff_update_block_index(s);
  2363. dst[0] = s->dest[0];
  2364. dst[1] = dst[0] + 8;
  2365. dst[2] = s->dest[0] + s->linesize * 8;
  2366. dst[3] = dst[2] + 8;
  2367. dst[4] = s->dest[1];
  2368. dst[5] = s->dest[2];
  2369. s->bdsp.clear_blocks(s->block[0]);
  2370. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  2371. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2372. s->current_picture.qscale_table[mb_pos] = v->pq;
  2373. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  2374. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  2375. // do actual MB decoding and displaying
  2376. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2377. v->s.ac_pred = get_bits1(&v->s.gb);
  2378. for (k = 0; k < 6; k++) {
  2379. val = ((cbp >> (5 - k)) & 1);
  2380. if (k < 4) {
  2381. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2382. val = val ^ pred;
  2383. *coded_val = val;
  2384. }
  2385. cbp |= val << (5 - k);
  2386. vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
  2387. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2388. continue;
  2389. v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
  2390. if (v->pq >= 9 && v->overlap) {
  2391. if (v->rangeredfrm)
  2392. for (j = 0; j < 64; j++)
  2393. s->block[k][j] <<= 1;
  2394. s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
  2395. k & 4 ? s->uvlinesize
  2396. : s->linesize);
  2397. } else {
  2398. if (v->rangeredfrm)
  2399. for (j = 0; j < 64; j++)
  2400. s->block[k][j] = (s->block[k][j] - 64) << 1;
  2401. s->idsp.put_pixels_clamped(s->block[k], dst[k],
  2402. k & 4 ? s->uvlinesize
  2403. : s->linesize);
  2404. }
  2405. }
  2406. if (v->pq >= 9 && v->overlap) {
  2407. if (s->mb_x) {
  2408. v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
  2409. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2410. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2411. v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
  2412. v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
  2413. }
  2414. }
  2415. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
  2416. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2417. if (!s->first_slice_line) {
  2418. v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
  2419. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
  2420. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2421. v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
  2422. v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
  2423. }
  2424. }
  2425. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2426. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2427. }
  2428. if (v->s.loop_filter)
  2429. ff_vc1_loop_filter_iblk(v, v->pq);
  2430. if (get_bits_count(&s->gb) > v->bits) {
  2431. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  2432. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2433. get_bits_count(&s->gb), v->bits);
  2434. return;
  2435. }
  2436. }
  2437. if (!v->s.loop_filter)
  2438. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2439. else if (s->mb_y)
  2440. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2441. s->first_slice_line = 0;
  2442. }
  2443. if (v->s.loop_filter)
  2444. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2445. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  2446. * profile, these only differ are when decoding MSS2 rectangles. */
  2447. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  2448. }
  2449. /** Decode blocks of I-frame for advanced profile
  2450. */
  2451. static void vc1_decode_i_blocks_adv(VC1Context *v)
  2452. {
  2453. int k;
  2454. MpegEncContext *s = &v->s;
  2455. int cbp, val;
  2456. uint8_t *coded_val;
  2457. int mb_pos;
  2458. int mquant = v->pq;
  2459. int mqdiff;
  2460. GetBitContext *gb = &s->gb;
  2461. /* select codingmode used for VLC tables selection */
  2462. switch (v->y_ac_table_index) {
  2463. case 0:
  2464. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2465. break;
  2466. case 1:
  2467. v->codingset = CS_HIGH_MOT_INTRA;
  2468. break;
  2469. case 2:
  2470. v->codingset = CS_MID_RATE_INTRA;
  2471. break;
  2472. }
  2473. switch (v->c_ac_table_index) {
  2474. case 0:
  2475. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2476. break;
  2477. case 1:
  2478. v->codingset2 = CS_HIGH_MOT_INTER;
  2479. break;
  2480. case 2:
  2481. v->codingset2 = CS_MID_RATE_INTER;
  2482. break;
  2483. }
  2484. // do frame decode
  2485. s->mb_x = s->mb_y = 0;
  2486. s->mb_intra = 1;
  2487. s->first_slice_line = 1;
  2488. s->mb_y = s->start_mb_y;
  2489. if (s->start_mb_y) {
  2490. s->mb_x = 0;
  2491. init_block_index(v);
  2492. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  2493. (1 + s->b8_stride) * sizeof(*s->coded_block));
  2494. }
  2495. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  2496. s->mb_x = 0;
  2497. init_block_index(v);
  2498. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2499. int16_t (*block)[64] = v->block[v->cur_blk_idx];
  2500. ff_update_block_index(s);
  2501. s->bdsp.clear_blocks(block[0]);
  2502. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2503. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  2504. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  2505. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  2506. // do actual MB decoding and displaying
  2507. if (v->fieldtx_is_raw)
  2508. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  2509. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2510. if (v->acpred_is_raw)
  2511. v->s.ac_pred = get_bits1(&v->s.gb);
  2512. else
  2513. v->s.ac_pred = v->acpred_plane[mb_pos];
  2514. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  2515. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  2516. GET_MQUANT();
  2517. s->current_picture.qscale_table[mb_pos] = mquant;
  2518. /* Set DC scale - y and c use the same */
  2519. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2520. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2521. for (k = 0; k < 6; k++) {
  2522. val = ((cbp >> (5 - k)) & 1);
  2523. if (k < 4) {
  2524. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2525. val = val ^ pred;
  2526. *coded_val = val;
  2527. }
  2528. cbp |= val << (5 - k);
  2529. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  2530. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  2531. vc1_decode_i_block_adv(v, block[k], k, val,
  2532. (k < 4) ? v->codingset : v->codingset2, mquant);
  2533. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2534. continue;
  2535. v->vc1dsp.vc1_inv_trans_8x8(block[k]);
  2536. }
  2537. ff_vc1_smooth_overlap_filter_iblk(v);
  2538. vc1_put_signed_blocks_clamped(v);
  2539. if (v->s.loop_filter)
  2540. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2541. if (get_bits_count(&s->gb) > v->bits) {
  2542. // TODO: may need modification to handle slice coding
  2543. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2544. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2545. get_bits_count(&s->gb), v->bits);
  2546. return;
  2547. }
  2548. }
  2549. if (!v->s.loop_filter)
  2550. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2551. else if (s->mb_y)
  2552. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  2553. s->first_slice_line = 0;
  2554. }
  2555. /* raw bottom MB row */
  2556. s->mb_x = 0;
  2557. init_block_index(v);
  2558. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2559. ff_update_block_index(s);
  2560. vc1_put_signed_blocks_clamped(v);
  2561. if (v->s.loop_filter)
  2562. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2563. }
  2564. if (v->s.loop_filter)
  2565. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2566. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2567. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2568. }
  2569. static void vc1_decode_p_blocks(VC1Context *v)
  2570. {
  2571. MpegEncContext *s = &v->s;
  2572. int apply_loop_filter;
  2573. /* select codingmode used for VLC tables selection */
  2574. switch (v->c_ac_table_index) {
  2575. case 0:
  2576. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2577. break;
  2578. case 1:
  2579. v->codingset = CS_HIGH_MOT_INTRA;
  2580. break;
  2581. case 2:
  2582. v->codingset = CS_MID_RATE_INTRA;
  2583. break;
  2584. }
  2585. switch (v->c_ac_table_index) {
  2586. case 0:
  2587. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2588. break;
  2589. case 1:
  2590. v->codingset2 = CS_HIGH_MOT_INTER;
  2591. break;
  2592. case 2:
  2593. v->codingset2 = CS_MID_RATE_INTER;
  2594. break;
  2595. }
  2596. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
  2597. v->fcm == PROGRESSIVE;
  2598. s->first_slice_line = 1;
  2599. memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
  2600. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2601. s->mb_x = 0;
  2602. init_block_index(v);
  2603. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2604. ff_update_block_index(s);
  2605. if (v->fcm == ILACE_FIELD)
  2606. vc1_decode_p_mb_intfi(v);
  2607. else if (v->fcm == ILACE_FRAME)
  2608. vc1_decode_p_mb_intfr(v);
  2609. else vc1_decode_p_mb(v);
  2610. if (s->mb_y != s->start_mb_y && apply_loop_filter)
  2611. ff_vc1_apply_p_loop_filter(v);
  2612. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2613. // TODO: may need modification to handle slice coding
  2614. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2615. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2616. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2617. return;
  2618. }
  2619. }
  2620. memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
  2621. memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
  2622. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2623. memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
  2624. if (s->mb_y != s->start_mb_y)
  2625. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2626. s->first_slice_line = 0;
  2627. }
  2628. if (apply_loop_filter) {
  2629. s->mb_x = 0;
  2630. init_block_index(v);
  2631. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2632. ff_update_block_index(s);
  2633. ff_vc1_apply_p_loop_filter(v);
  2634. }
  2635. }
  2636. if (s->end_mb_y >= s->start_mb_y)
  2637. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2638. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2639. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2640. }
  2641. static void vc1_decode_b_blocks(VC1Context *v)
  2642. {
  2643. MpegEncContext *s = &v->s;
  2644. /* select codingmode used for VLC tables selection */
  2645. switch (v->c_ac_table_index) {
  2646. case 0:
  2647. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2648. break;
  2649. case 1:
  2650. v->codingset = CS_HIGH_MOT_INTRA;
  2651. break;
  2652. case 2:
  2653. v->codingset = CS_MID_RATE_INTRA;
  2654. break;
  2655. }
  2656. switch (v->c_ac_table_index) {
  2657. case 0:
  2658. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2659. break;
  2660. case 1:
  2661. v->codingset2 = CS_HIGH_MOT_INTER;
  2662. break;
  2663. case 2:
  2664. v->codingset2 = CS_MID_RATE_INTER;
  2665. break;
  2666. }
  2667. s->first_slice_line = 1;
  2668. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2669. s->mb_x = 0;
  2670. init_block_index(v);
  2671. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2672. ff_update_block_index(s);
  2673. if (v->fcm == ILACE_FIELD)
  2674. vc1_decode_b_mb_intfi(v);
  2675. else if (v->fcm == ILACE_FRAME)
  2676. vc1_decode_b_mb_intfr(v);
  2677. else
  2678. vc1_decode_b_mb(v);
  2679. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2680. // TODO: may need modification to handle slice coding
  2681. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2682. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2683. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2684. return;
  2685. }
  2686. if (v->s.loop_filter)
  2687. ff_vc1_loop_filter_iblk(v, v->pq);
  2688. }
  2689. if (!v->s.loop_filter)
  2690. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2691. else if (s->mb_y)
  2692. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2693. s->first_slice_line = 0;
  2694. }
  2695. if (v->s.loop_filter)
  2696. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2697. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2698. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2699. }
  2700. static void vc1_decode_skip_blocks(VC1Context *v)
  2701. {
  2702. MpegEncContext *s = &v->s;
  2703. if (!v->s.last_picture.f->data[0])
  2704. return;
  2705. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  2706. s->first_slice_line = 1;
  2707. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2708. s->mb_x = 0;
  2709. init_block_index(v);
  2710. ff_update_block_index(s);
  2711. memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  2712. memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2713. memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2714. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2715. s->first_slice_line = 0;
  2716. }
  2717. s->pict_type = AV_PICTURE_TYPE_P;
  2718. }
  2719. void ff_vc1_decode_blocks(VC1Context *v)
  2720. {
  2721. v->s.esc3_level_length = 0;
  2722. if (v->x8_type) {
  2723. ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
  2724. } else {
  2725. v->cur_blk_idx = 0;
  2726. v->left_blk_idx = -1;
  2727. v->topleft_blk_idx = 1;
  2728. v->top_blk_idx = 2;
  2729. switch (v->s.pict_type) {
  2730. case AV_PICTURE_TYPE_I:
  2731. if (v->profile == PROFILE_ADVANCED)
  2732. vc1_decode_i_blocks_adv(v);
  2733. else
  2734. vc1_decode_i_blocks(v);
  2735. break;
  2736. case AV_PICTURE_TYPE_P:
  2737. if (v->p_frame_skipped)
  2738. vc1_decode_skip_blocks(v);
  2739. else
  2740. vc1_decode_p_blocks(v);
  2741. break;
  2742. case AV_PICTURE_TYPE_B:
  2743. if (v->bi_type) {
  2744. if (v->profile == PROFILE_ADVANCED)
  2745. vc1_decode_i_blocks_adv(v);
  2746. else
  2747. vc1_decode_i_blocks(v);
  2748. } else
  2749. vc1_decode_b_blocks(v);
  2750. break;
  2751. }
  2752. }
  2753. }