You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2990 lines
117KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 block decoding routines
  26. */
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "msmpeg4data.h"
  31. #include "unary.h"
  32. #include "vc1.h"
  33. #include "vc1_pred.h"
  34. #include "vc1acdata.h"
  35. #include "vc1data.h"
  36. #define MB_INTRA_VLC_BITS 9
  37. #define DC_VLC_BITS 9
  38. // offset tables for interlaced picture MVDATA decoding
  39. static const uint8_t offset_table[2][9] = {
  40. { 0, 1, 2, 4, 8, 16, 32, 64, 128 },
  41. { 0, 1, 3, 7, 15, 31, 63, 127, 255 },
  42. };
  43. /***********************************************************************/
  44. /**
  45. * @name VC-1 Bitplane decoding
  46. * @see 8.7, p56
  47. * @{
  48. */
  49. static inline void init_block_index(VC1Context *v)
  50. {
  51. MpegEncContext *s = &v->s;
  52. ff_init_block_index(s);
  53. if (v->field_mode && !(v->second_field ^ v->tff)) {
  54. s->dest[0] += s->current_picture_ptr->f->linesize[0];
  55. s->dest[1] += s->current_picture_ptr->f->linesize[1];
  56. s->dest[2] += s->current_picture_ptr->f->linesize[2];
  57. }
  58. }
  59. /** @} */ //Bitplane group
  60. static void vc1_put_signed_blocks_clamped(VC1Context *v)
  61. {
  62. MpegEncContext *s = &v->s;
  63. int topleft_mb_pos, top_mb_pos;
  64. int stride_y, fieldtx = 0;
  65. int v_dist;
  66. /* The put pixels loop is always one MB row behind the decoding loop,
  67. * because we can only put pixels when overlap filtering is done, and
  68. * for filtering of the bottom edge of a MB, we need the next MB row
  69. * present as well.
  70. * Within the row, the put pixels loop is also one MB col behind the
  71. * decoding loop. The reason for this is again, because for filtering
  72. * of the right MB edge, we need the next MB present. */
  73. if (!s->first_slice_line) {
  74. if (s->mb_x) {
  75. topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
  76. if (v->fcm == ILACE_FRAME)
  77. fieldtx = v->fieldtx_plane[topleft_mb_pos];
  78. stride_y = s->linesize << fieldtx;
  79. v_dist = (16 - fieldtx) >> (fieldtx == 0);
  80. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
  81. s->dest[0] - 16 * s->linesize - 16,
  82. stride_y);
  83. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
  84. s->dest[0] - 16 * s->linesize - 8,
  85. stride_y);
  86. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
  87. s->dest[0] - v_dist * s->linesize - 16,
  88. stride_y);
  89. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
  90. s->dest[0] - v_dist * s->linesize - 8,
  91. stride_y);
  92. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  93. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
  94. s->dest[1] - 8 * s->uvlinesize - 8,
  95. s->uvlinesize);
  96. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
  97. s->dest[2] - 8 * s->uvlinesize - 8,
  98. s->uvlinesize);
  99. }
  100. }
  101. if (s->mb_x == s->mb_width - 1) {
  102. top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
  103. if (v->fcm == ILACE_FRAME)
  104. fieldtx = v->fieldtx_plane[top_mb_pos];
  105. stride_y = s->linesize << fieldtx;
  106. v_dist = fieldtx ? 15 : 8;
  107. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
  108. s->dest[0] - 16 * s->linesize,
  109. stride_y);
  110. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
  111. s->dest[0] - 16 * s->linesize + 8,
  112. stride_y);
  113. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
  114. s->dest[0] - v_dist * s->linesize,
  115. stride_y);
  116. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
  117. s->dest[0] - v_dist * s->linesize + 8,
  118. stride_y);
  119. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  120. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
  121. s->dest[1] - 8 * s->uvlinesize,
  122. s->uvlinesize);
  123. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
  124. s->dest[2] - 8 * s->uvlinesize,
  125. s->uvlinesize);
  126. }
  127. }
  128. }
  129. #define inc_blk_idx(idx) do { \
  130. idx++; \
  131. if (idx >= v->n_allocated_blks) \
  132. idx = 0; \
  133. } while (0)
  134. inc_blk_idx(v->topleft_blk_idx);
  135. inc_blk_idx(v->top_blk_idx);
  136. inc_blk_idx(v->left_blk_idx);
  137. inc_blk_idx(v->cur_blk_idx);
  138. }
  139. /***********************************************************************/
  140. /**
  141. * @name VC-1 Block-level functions
  142. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  143. * @{
  144. */
  145. /**
  146. * @def GET_MQUANT
  147. * @brief Get macroblock-level quantizer scale
  148. */
  149. #define GET_MQUANT() \
  150. if (v->dquantfrm) { \
  151. int edges = 0; \
  152. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  153. if (v->dqbilevel) { \
  154. mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
  155. } else { \
  156. mqdiff = get_bits(gb, 3); \
  157. if (mqdiff != 7) \
  158. mquant = v->pq + mqdiff; \
  159. else \
  160. mquant = get_bits(gb, 5); \
  161. } \
  162. } \
  163. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  164. edges = 1 << v->dqsbedge; \
  165. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  166. edges = (3 << v->dqsbedge) % 15; \
  167. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  168. edges = 15; \
  169. if ((edges&1) && !s->mb_x) \
  170. mquant = v->altpq; \
  171. if ((edges&2) && s->first_slice_line) \
  172. mquant = v->altpq; \
  173. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  174. mquant = v->altpq; \
  175. if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
  176. mquant = v->altpq; \
  177. if (!mquant || mquant > 31) { \
  178. av_log(v->s.avctx, AV_LOG_ERROR, \
  179. "Overriding invalid mquant %d\n", mquant); \
  180. mquant = 1; \
  181. } \
  182. }
  183. /**
  184. * @def GET_MVDATA(_dmv_x, _dmv_y)
  185. * @brief Get MV differentials
  186. * @see MVDATA decoding from 8.3.5.2, p(1)20
  187. * @param _dmv_x Horizontal differential for decoded MV
  188. * @param _dmv_y Vertical differential for decoded MV
  189. */
  190. #define GET_MVDATA(_dmv_x, _dmv_y) \
  191. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  192. VC1_MV_DIFF_VLC_BITS, 2); \
  193. if (index > 36) { \
  194. mb_has_coeffs = 1; \
  195. index -= 37; \
  196. } else \
  197. mb_has_coeffs = 0; \
  198. s->mb_intra = 0; \
  199. if (!index) { \
  200. _dmv_x = _dmv_y = 0; \
  201. } else if (index == 35) { \
  202. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  203. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  204. } else if (index == 36) { \
  205. _dmv_x = 0; \
  206. _dmv_y = 0; \
  207. s->mb_intra = 1; \
  208. } else { \
  209. index1 = index % 6; \
  210. _dmv_x = offset_table[1][index1]; \
  211. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  212. if (val > 0) { \
  213. val = get_bits(gb, val); \
  214. sign = 0 - (val & 1); \
  215. _dmv_x = (sign ^ ((val >> 1) + _dmv_x)) - sign; \
  216. } \
  217. \
  218. index1 = index / 6; \
  219. _dmv_y = offset_table[1][index1]; \
  220. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  221. if (val > 0) { \
  222. val = get_bits(gb, val); \
  223. sign = 0 - (val & 1); \
  224. _dmv_y = (sign ^ ((val >> 1) + _dmv_y)) - sign; \
  225. } \
  226. }
  227. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  228. int *dmv_y, int *pred_flag)
  229. {
  230. int index, index1;
  231. int extend_x, extend_y;
  232. GetBitContext *gb = &v->s.gb;
  233. int bits, esc;
  234. int val, sign;
  235. if (v->numref) {
  236. bits = VC1_2REF_MVDATA_VLC_BITS;
  237. esc = 125;
  238. } else {
  239. bits = VC1_1REF_MVDATA_VLC_BITS;
  240. esc = 71;
  241. }
  242. extend_x = v->dmvrange & 1;
  243. extend_y = (v->dmvrange >> 1) & 1;
  244. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  245. if (index == esc) {
  246. *dmv_x = get_bits(gb, v->k_x);
  247. *dmv_y = get_bits(gb, v->k_y);
  248. if (v->numref) {
  249. if (pred_flag)
  250. *pred_flag = *dmv_y & 1;
  251. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  252. }
  253. }
  254. else {
  255. av_assert0(index < esc);
  256. index1 = (index + 1) % 9;
  257. if (index1 != 0) {
  258. val = get_bits(gb, index1 + extend_x);
  259. sign = 0 - (val & 1);
  260. *dmv_x = (sign ^ ((val >> 1) + offset_table[extend_x][index1])) - sign;
  261. } else
  262. *dmv_x = 0;
  263. index1 = (index + 1) / 9;
  264. if (index1 > v->numref) {
  265. val = get_bits(gb, (index1 >> v->numref) + extend_y);
  266. sign = 0 - (val & 1);
  267. *dmv_y = (sign ^ ((val >> 1) + offset_table[extend_y][index1 >> v->numref])) - sign;
  268. } else
  269. *dmv_y = 0;
  270. if (v->numref && pred_flag)
  271. *pred_flag = index1 & 1;
  272. }
  273. }
  274. /** Reconstruct motion vector for B-frame and do motion compensation
  275. */
  276. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  277. int direct, int mode)
  278. {
  279. if (direct) {
  280. ff_vc1_mc_1mv(v, 0);
  281. ff_vc1_interp_mc(v);
  282. return;
  283. }
  284. if (mode == BMV_TYPE_INTERPOLATED) {
  285. ff_vc1_mc_1mv(v, 0);
  286. ff_vc1_interp_mc(v);
  287. return;
  288. }
  289. ff_vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  290. }
  291. /** Get predicted DC value for I-frames only
  292. * prediction dir: left=0, top=1
  293. * @param s MpegEncContext
  294. * @param overlap flag indicating that overlap filtering is used
  295. * @param pq integer part of picture quantizer
  296. * @param[in] n block index in the current MB
  297. * @param dc_val_ptr Pointer to DC predictor
  298. * @param dir_ptr Prediction direction for use in AC prediction
  299. */
  300. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  301. int16_t **dc_val_ptr, int *dir_ptr)
  302. {
  303. int a, b, c, wrap, pred, scale;
  304. int16_t *dc_val;
  305. static const uint16_t dcpred[32] = {
  306. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  307. 114, 102, 93, 85, 79, 73, 68, 64,
  308. 60, 57, 54, 51, 49, 47, 45, 43,
  309. 41, 39, 38, 37, 35, 34, 33
  310. };
  311. /* find prediction - wmv3_dc_scale always used here in fact */
  312. if (n < 4) scale = s->y_dc_scale;
  313. else scale = s->c_dc_scale;
  314. wrap = s->block_wrap[n];
  315. dc_val = s->dc_val[0] + s->block_index[n];
  316. /* B A
  317. * C X
  318. */
  319. c = dc_val[ - 1];
  320. b = dc_val[ - 1 - wrap];
  321. a = dc_val[ - wrap];
  322. if (pq < 9 || !overlap) {
  323. /* Set outer values */
  324. if (s->first_slice_line && (n != 2 && n != 3))
  325. b = a = dcpred[scale];
  326. if (s->mb_x == 0 && (n != 1 && n != 3))
  327. b = c = dcpred[scale];
  328. } else {
  329. /* Set outer values */
  330. if (s->first_slice_line && (n != 2 && n != 3))
  331. b = a = 0;
  332. if (s->mb_x == 0 && (n != 1 && n != 3))
  333. b = c = 0;
  334. }
  335. if (abs(a - b) <= abs(b - c)) {
  336. pred = c;
  337. *dir_ptr = 1; // left
  338. } else {
  339. pred = a;
  340. *dir_ptr = 0; // top
  341. }
  342. /* update predictor */
  343. *dc_val_ptr = &dc_val[0];
  344. return pred;
  345. }
  346. /** Get predicted DC value
  347. * prediction dir: left=0, top=1
  348. * @param s MpegEncContext
  349. * @param overlap flag indicating that overlap filtering is used
  350. * @param pq integer part of picture quantizer
  351. * @param[in] n block index in the current MB
  352. * @param a_avail flag indicating top block availability
  353. * @param c_avail flag indicating left block availability
  354. * @param dc_val_ptr Pointer to DC predictor
  355. * @param dir_ptr Prediction direction for use in AC prediction
  356. */
  357. static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  358. int a_avail, int c_avail,
  359. int16_t **dc_val_ptr, int *dir_ptr)
  360. {
  361. int a, b, c, wrap, pred;
  362. int16_t *dc_val;
  363. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  364. int q1, q2 = 0;
  365. int dqscale_index;
  366. /* scale predictors if needed */
  367. q1 = s->current_picture.qscale_table[mb_pos];
  368. dqscale_index = s->y_dc_scale_table[q1] - 1;
  369. if (dqscale_index < 0)
  370. return 0;
  371. wrap = s->block_wrap[n];
  372. dc_val = s->dc_val[0] + s->block_index[n];
  373. /* B A
  374. * C X
  375. */
  376. c = dc_val[ - 1];
  377. b = dc_val[ - 1 - wrap];
  378. a = dc_val[ - wrap];
  379. if (c_avail && (n != 1 && n != 3)) {
  380. q2 = s->current_picture.qscale_table[mb_pos - 1];
  381. if (q2 && q2 != q1)
  382. c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  383. }
  384. if (a_avail && (n != 2 && n != 3)) {
  385. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  386. if (q2 && q2 != q1)
  387. a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  388. }
  389. if (a_avail && c_avail && (n != 3)) {
  390. int off = mb_pos;
  391. if (n != 1)
  392. off--;
  393. if (n != 2)
  394. off -= s->mb_stride;
  395. q2 = s->current_picture.qscale_table[off];
  396. if (q2 && q2 != q1)
  397. b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  398. }
  399. if (c_avail && (!a_avail || abs(a - b) <= abs(b - c))) {
  400. pred = c;
  401. *dir_ptr = 1; // left
  402. } else if (a_avail) {
  403. pred = a;
  404. *dir_ptr = 0; // top
  405. } else {
  406. pred = 0;
  407. *dir_ptr = 1; // left
  408. }
  409. /* update predictor */
  410. *dc_val_ptr = &dc_val[0];
  411. return pred;
  412. }
  413. /** @} */ // Block group
  414. /**
  415. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  416. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  417. * @{
  418. */
  419. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  420. uint8_t **coded_block_ptr)
  421. {
  422. int xy, wrap, pred, a, b, c;
  423. xy = s->block_index[n];
  424. wrap = s->b8_stride;
  425. /* B C
  426. * A X
  427. */
  428. a = s->coded_block[xy - 1 ];
  429. b = s->coded_block[xy - 1 - wrap];
  430. c = s->coded_block[xy - wrap];
  431. if (b == c) {
  432. pred = a;
  433. } else {
  434. pred = c;
  435. }
  436. /* store value */
  437. *coded_block_ptr = &s->coded_block[xy];
  438. return pred;
  439. }
  440. /**
  441. * Decode one AC coefficient
  442. * @param v The VC1 context
  443. * @param last Last coefficient
  444. * @param skip How much zero coefficients to skip
  445. * @param value Decoded AC coefficient value
  446. * @param codingset set of VLC to decode data
  447. * @see 8.1.3.4
  448. */
  449. static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  450. int *value, int codingset)
  451. {
  452. GetBitContext *gb = &v->s.gb;
  453. int index, run, level, lst, sign;
  454. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  455. if (index < 0)
  456. return index;
  457. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  458. run = vc1_index_decode_table[codingset][index][0];
  459. level = vc1_index_decode_table[codingset][index][1];
  460. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  461. sign = get_bits1(gb);
  462. } else {
  463. int escape = decode210(gb);
  464. if (escape != 2) {
  465. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  466. run = vc1_index_decode_table[codingset][index][0];
  467. level = vc1_index_decode_table[codingset][index][1];
  468. lst = index >= vc1_last_decode_table[codingset];
  469. if (escape == 0) {
  470. if (lst)
  471. level += vc1_last_delta_level_table[codingset][run];
  472. else
  473. level += vc1_delta_level_table[codingset][run];
  474. } else {
  475. if (lst)
  476. run += vc1_last_delta_run_table[codingset][level] + 1;
  477. else
  478. run += vc1_delta_run_table[codingset][level] + 1;
  479. }
  480. sign = get_bits1(gb);
  481. } else {
  482. lst = get_bits1(gb);
  483. if (v->s.esc3_level_length == 0) {
  484. if (v->pq < 8 || v->dquantfrm) { // table 59
  485. v->s.esc3_level_length = get_bits(gb, 3);
  486. if (!v->s.esc3_level_length)
  487. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  488. } else { // table 60
  489. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  490. }
  491. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  492. }
  493. run = get_bits(gb, v->s.esc3_run_length);
  494. sign = get_bits1(gb);
  495. level = get_bits(gb, v->s.esc3_level_length);
  496. }
  497. }
  498. *last = lst;
  499. *skip = run;
  500. *value = (level ^ -sign) + sign;
  501. return 0;
  502. }
  503. /** Decode intra block in intra frames - should be faster than decode_intra_block
  504. * @param v VC1Context
  505. * @param block block to decode
  506. * @param[in] n subblock index
  507. * @param coded are AC coeffs present or not
  508. * @param codingset set of VLC to decode data
  509. */
  510. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  511. int coded, int codingset)
  512. {
  513. GetBitContext *gb = &v->s.gb;
  514. MpegEncContext *s = &v->s;
  515. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  516. int i;
  517. int16_t *dc_val;
  518. int16_t *ac_val, *ac_val2;
  519. int dcdiff, scale;
  520. /* Get DC differential */
  521. if (n < 4) {
  522. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  523. } else {
  524. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  525. }
  526. if (dcdiff < 0) {
  527. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  528. return -1;
  529. }
  530. if (dcdiff) {
  531. const int m = (v->pq == 1 || v->pq == 2) ? 3 - v->pq : 0;
  532. if (dcdiff == 119 /* ESC index value */) {
  533. dcdiff = get_bits(gb, 8 + m);
  534. } else {
  535. if (m)
  536. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  537. }
  538. if (get_bits1(gb))
  539. dcdiff = -dcdiff;
  540. }
  541. /* Prediction */
  542. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  543. *dc_val = dcdiff;
  544. /* Store the quantized DC coeff, used for prediction */
  545. if (n < 4)
  546. scale = s->y_dc_scale;
  547. else
  548. scale = s->c_dc_scale;
  549. block[0] = dcdiff * scale;
  550. ac_val = s->ac_val[0][s->block_index[n]];
  551. ac_val2 = ac_val;
  552. if (dc_pred_dir) // left
  553. ac_val -= 16;
  554. else // top
  555. ac_val -= 16 * s->block_wrap[n];
  556. scale = v->pq * 2 + v->halfpq;
  557. //AC Decoding
  558. i = !!coded;
  559. if (coded) {
  560. int last = 0, skip, value;
  561. const uint8_t *zz_table;
  562. int k;
  563. if (v->s.ac_pred) {
  564. if (!dc_pred_dir)
  565. zz_table = v->zz_8x8[2];
  566. else
  567. zz_table = v->zz_8x8[3];
  568. } else
  569. zz_table = v->zz_8x8[1];
  570. while (!last) {
  571. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  572. if (ret < 0)
  573. return ret;
  574. i += skip;
  575. if (i > 63)
  576. break;
  577. block[zz_table[i++]] = value;
  578. }
  579. /* apply AC prediction if needed */
  580. if (s->ac_pred) {
  581. int sh;
  582. if (dc_pred_dir) { // left
  583. sh = v->left_blk_sh;
  584. } else { // top
  585. sh = v->top_blk_sh;
  586. ac_val += 8;
  587. }
  588. for (k = 1; k < 8; k++)
  589. block[k << sh] += ac_val[k];
  590. }
  591. /* save AC coeffs for further prediction */
  592. for (k = 1; k < 8; k++) {
  593. ac_val2[k] = block[k << v->left_blk_sh];
  594. ac_val2[k + 8] = block[k << v->top_blk_sh];
  595. }
  596. /* scale AC coeffs */
  597. for (k = 1; k < 64; k++)
  598. if (block[k]) {
  599. block[k] *= scale;
  600. if (!v->pquantizer)
  601. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  602. }
  603. } else {
  604. int k;
  605. memset(ac_val2, 0, 16 * 2);
  606. /* apply AC prediction if needed */
  607. if (s->ac_pred) {
  608. int sh;
  609. if (dc_pred_dir) { //left
  610. sh = v->left_blk_sh;
  611. } else { // top
  612. sh = v->top_blk_sh;
  613. ac_val += 8;
  614. ac_val2 += 8;
  615. }
  616. memcpy(ac_val2, ac_val, 8 * 2);
  617. for (k = 1; k < 8; k++) {
  618. block[k << sh] = ac_val[k] * scale;
  619. if (!v->pquantizer && block[k << sh])
  620. block[k << sh] += (block[k << sh] < 0) ? -v->pq : v->pq;
  621. }
  622. }
  623. }
  624. if (s->ac_pred) i = 63;
  625. s->block_last_index[n] = i;
  626. return 0;
  627. }
  628. /** Decode intra block in intra frames - should be faster than decode_intra_block
  629. * @param v VC1Context
  630. * @param block block to decode
  631. * @param[in] n subblock number
  632. * @param coded are AC coeffs present or not
  633. * @param codingset set of VLC to decode data
  634. * @param mquant quantizer value for this macroblock
  635. */
  636. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  637. int coded, int codingset, int mquant)
  638. {
  639. GetBitContext *gb = &v->s.gb;
  640. MpegEncContext *s = &v->s;
  641. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  642. int i;
  643. int16_t *dc_val = NULL;
  644. int16_t *ac_val, *ac_val2;
  645. int dcdiff;
  646. int a_avail = v->a_avail, c_avail = v->c_avail;
  647. int use_pred = s->ac_pred;
  648. int scale;
  649. int q1, q2 = 0;
  650. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  651. /* Get DC differential */
  652. if (n < 4) {
  653. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  654. } else {
  655. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  656. }
  657. if (dcdiff < 0) {
  658. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  659. return -1;
  660. }
  661. if (dcdiff) {
  662. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  663. if (dcdiff == 119 /* ESC index value */) {
  664. dcdiff = get_bits(gb, 8 + m);
  665. } else {
  666. if (m)
  667. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  668. }
  669. if (get_bits1(gb))
  670. dcdiff = -dcdiff;
  671. }
  672. /* Prediction */
  673. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  674. *dc_val = dcdiff;
  675. /* Store the quantized DC coeff, used for prediction */
  676. if (n < 4)
  677. scale = s->y_dc_scale;
  678. else
  679. scale = s->c_dc_scale;
  680. block[0] = dcdiff * scale;
  681. /* check if AC is needed at all */
  682. if (!a_avail && !c_avail)
  683. use_pred = 0;
  684. scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
  685. ac_val = s->ac_val[0][s->block_index[n]];
  686. ac_val2 = ac_val;
  687. if (dc_pred_dir) // left
  688. ac_val -= 16;
  689. else // top
  690. ac_val -= 16 * s->block_wrap[n];
  691. q1 = s->current_picture.qscale_table[mb_pos];
  692. if (n == 3)
  693. q2 = q1;
  694. else if (dc_pred_dir) {
  695. if (n == 1)
  696. q2 = q1;
  697. else if (c_avail && mb_pos)
  698. q2 = s->current_picture.qscale_table[mb_pos - 1];
  699. } else {
  700. if (n == 2)
  701. q2 = q1;
  702. else if (a_avail && mb_pos >= s->mb_stride)
  703. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  704. }
  705. //AC Decoding
  706. i = 1;
  707. if (coded) {
  708. int last = 0, skip, value;
  709. const uint8_t *zz_table;
  710. int k;
  711. if (v->s.ac_pred) {
  712. if (!use_pred && v->fcm == ILACE_FRAME) {
  713. zz_table = v->zzi_8x8;
  714. } else {
  715. if (!dc_pred_dir) // top
  716. zz_table = v->zz_8x8[2];
  717. else // left
  718. zz_table = v->zz_8x8[3];
  719. }
  720. } else {
  721. if (v->fcm != ILACE_FRAME)
  722. zz_table = v->zz_8x8[1];
  723. else
  724. zz_table = v->zzi_8x8;
  725. }
  726. while (!last) {
  727. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  728. if (ret < 0)
  729. return ret;
  730. i += skip;
  731. if (i > 63)
  732. break;
  733. block[zz_table[i++]] = value;
  734. }
  735. /* apply AC prediction if needed */
  736. if (use_pred) {
  737. int sh;
  738. if (dc_pred_dir) { // left
  739. sh = v->left_blk_sh;
  740. } else { // top
  741. sh = v->top_blk_sh;
  742. ac_val += 8;
  743. }
  744. /* scale predictors if needed*/
  745. if (q2 && q1 != q2) {
  746. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  747. if (q1 < 1)
  748. return AVERROR_INVALIDDATA;
  749. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  750. for (k = 1; k < 8; k++)
  751. block[k << sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  752. } else {
  753. for (k = 1; k < 8; k++)
  754. block[k << sh] += ac_val[k];
  755. }
  756. }
  757. /* save AC coeffs for further prediction */
  758. for (k = 1; k < 8; k++) {
  759. ac_val2[k ] = block[k << v->left_blk_sh];
  760. ac_val2[k + 8] = block[k << v->top_blk_sh];
  761. }
  762. /* scale AC coeffs */
  763. for (k = 1; k < 64; k++)
  764. if (block[k]) {
  765. block[k] *= scale;
  766. if (!v->pquantizer)
  767. block[k] += (block[k] < 0) ? -mquant : mquant;
  768. }
  769. } else { // no AC coeffs
  770. int k;
  771. memset(ac_val2, 0, 16 * 2);
  772. /* apply AC prediction if needed */
  773. if (use_pred) {
  774. int sh;
  775. if (dc_pred_dir) { // left
  776. sh = v->left_blk_sh;
  777. } else { // top
  778. sh = v->top_blk_sh;
  779. ac_val += 8;
  780. ac_val2 += 8;
  781. }
  782. memcpy(ac_val2, ac_val, 8 * 2);
  783. if (q2 && q1 != q2) {
  784. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  785. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  786. if (q1 < 1)
  787. return AVERROR_INVALIDDATA;
  788. for (k = 1; k < 8; k++)
  789. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  790. }
  791. for (k = 1; k < 8; k++) {
  792. block[k << sh] = ac_val2[k] * scale;
  793. if (!v->pquantizer && block[k << sh])
  794. block[k << sh] += (block[k << sh] < 0) ? -mquant : mquant;
  795. }
  796. }
  797. }
  798. if (use_pred) i = 63;
  799. s->block_last_index[n] = i;
  800. return 0;
  801. }
  802. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  803. * @param v VC1Context
  804. * @param block block to decode
  805. * @param[in] n subblock index
  806. * @param coded are AC coeffs present or not
  807. * @param mquant block quantizer
  808. * @param codingset set of VLC to decode data
  809. */
  810. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  811. int coded, int mquant, int codingset)
  812. {
  813. GetBitContext *gb = &v->s.gb;
  814. MpegEncContext *s = &v->s;
  815. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  816. int i;
  817. int16_t *dc_val = NULL;
  818. int16_t *ac_val, *ac_val2;
  819. int dcdiff;
  820. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  821. int a_avail = v->a_avail, c_avail = v->c_avail;
  822. int use_pred = s->ac_pred;
  823. int scale;
  824. int q1, q2 = 0;
  825. s->bdsp.clear_block(block);
  826. /* XXX: Guard against dumb values of mquant */
  827. mquant = av_clip_uintp2(mquant, 5);
  828. /* Set DC scale - y and c use the same */
  829. s->y_dc_scale = s->y_dc_scale_table[mquant];
  830. s->c_dc_scale = s->c_dc_scale_table[mquant];
  831. /* Get DC differential */
  832. if (n < 4) {
  833. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  834. } else {
  835. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  836. }
  837. if (dcdiff < 0) {
  838. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  839. return -1;
  840. }
  841. if (dcdiff) {
  842. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  843. if (dcdiff == 119 /* ESC index value */) {
  844. dcdiff = get_bits(gb, 8 + m);
  845. } else {
  846. if (m)
  847. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  848. }
  849. if (get_bits1(gb))
  850. dcdiff = -dcdiff;
  851. }
  852. /* Prediction */
  853. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  854. *dc_val = dcdiff;
  855. /* Store the quantized DC coeff, used for prediction */
  856. if (n < 4) {
  857. block[0] = dcdiff * s->y_dc_scale;
  858. } else {
  859. block[0] = dcdiff * s->c_dc_scale;
  860. }
  861. //AC Decoding
  862. i = 1;
  863. /* check if AC is needed at all and adjust direction if needed */
  864. if (!a_avail) dc_pred_dir = 1;
  865. if (!c_avail) dc_pred_dir = 0;
  866. if (!a_avail && !c_avail) use_pred = 0;
  867. ac_val = s->ac_val[0][s->block_index[n]];
  868. ac_val2 = ac_val;
  869. scale = mquant * 2 + v->halfpq;
  870. if (dc_pred_dir) //left
  871. ac_val -= 16;
  872. else //top
  873. ac_val -= 16 * s->block_wrap[n];
  874. q1 = s->current_picture.qscale_table[mb_pos];
  875. if (dc_pred_dir && c_avail && mb_pos)
  876. q2 = s->current_picture.qscale_table[mb_pos - 1];
  877. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  878. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  879. if (dc_pred_dir && n == 1)
  880. q2 = q1;
  881. if (!dc_pred_dir && n == 2)
  882. q2 = q1;
  883. if (n == 3) q2 = q1;
  884. if (coded) {
  885. int last = 0, skip, value;
  886. int k;
  887. while (!last) {
  888. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  889. if (ret < 0)
  890. return ret;
  891. i += skip;
  892. if (i > 63)
  893. break;
  894. if (v->fcm == PROGRESSIVE)
  895. block[v->zz_8x8[0][i++]] = value;
  896. else {
  897. if (use_pred && (v->fcm == ILACE_FRAME)) {
  898. if (!dc_pred_dir) // top
  899. block[v->zz_8x8[2][i++]] = value;
  900. else // left
  901. block[v->zz_8x8[3][i++]] = value;
  902. } else {
  903. block[v->zzi_8x8[i++]] = value;
  904. }
  905. }
  906. }
  907. /* apply AC prediction if needed */
  908. if (use_pred) {
  909. /* scale predictors if needed*/
  910. if (q2 && q1 != q2) {
  911. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  912. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  913. if (q1 < 1)
  914. return AVERROR_INVALIDDATA;
  915. if (dc_pred_dir) { // left
  916. for (k = 1; k < 8; k++)
  917. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  918. } else { //top
  919. for (k = 1; k < 8; k++)
  920. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  921. }
  922. } else {
  923. if (dc_pred_dir) { // left
  924. for (k = 1; k < 8; k++)
  925. block[k << v->left_blk_sh] += ac_val[k];
  926. } else { // top
  927. for (k = 1; k < 8; k++)
  928. block[k << v->top_blk_sh] += ac_val[k + 8];
  929. }
  930. }
  931. }
  932. /* save AC coeffs for further prediction */
  933. for (k = 1; k < 8; k++) {
  934. ac_val2[k ] = block[k << v->left_blk_sh];
  935. ac_val2[k + 8] = block[k << v->top_blk_sh];
  936. }
  937. /* scale AC coeffs */
  938. for (k = 1; k < 64; k++)
  939. if (block[k]) {
  940. block[k] *= scale;
  941. if (!v->pquantizer)
  942. block[k] += (block[k] < 0) ? -mquant : mquant;
  943. }
  944. if (use_pred) i = 63;
  945. } else { // no AC coeffs
  946. int k;
  947. memset(ac_val2, 0, 16 * 2);
  948. if (dc_pred_dir) { // left
  949. if (use_pred) {
  950. memcpy(ac_val2, ac_val, 8 * 2);
  951. if (q2 && q1 != q2) {
  952. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  953. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  954. if (q1 < 1)
  955. return AVERROR_INVALIDDATA;
  956. for (k = 1; k < 8; k++)
  957. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  958. }
  959. }
  960. } else { // top
  961. if (use_pred) {
  962. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  963. if (q2 && q1 != q2) {
  964. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  965. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  966. if (q1 < 1)
  967. return AVERROR_INVALIDDATA;
  968. for (k = 1; k < 8; k++)
  969. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  970. }
  971. }
  972. }
  973. /* apply AC prediction if needed */
  974. if (use_pred) {
  975. if (dc_pred_dir) { // left
  976. for (k = 1; k < 8; k++) {
  977. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  978. if (!v->pquantizer && block[k << v->left_blk_sh])
  979. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  980. }
  981. } else { // top
  982. for (k = 1; k < 8; k++) {
  983. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  984. if (!v->pquantizer && block[k << v->top_blk_sh])
  985. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  986. }
  987. }
  988. i = 63;
  989. }
  990. }
  991. s->block_last_index[n] = i;
  992. return 0;
  993. }
  994. /** Decode P block
  995. */
  996. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  997. int mquant, int ttmb, int first_block,
  998. uint8_t *dst, int linesize, int skip_block,
  999. int *ttmb_out)
  1000. {
  1001. MpegEncContext *s = &v->s;
  1002. GetBitContext *gb = &s->gb;
  1003. int i, j;
  1004. int subblkpat = 0;
  1005. int scale, off, idx, last, skip, value;
  1006. int ttblk = ttmb & 7;
  1007. int pat = 0;
  1008. s->bdsp.clear_block(block);
  1009. if (ttmb == -1) {
  1010. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  1011. }
  1012. if (ttblk == TT_4X4) {
  1013. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  1014. }
  1015. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  1016. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  1017. || (!v->res_rtm_flag && !first_block))) {
  1018. subblkpat = decode012(gb);
  1019. if (subblkpat)
  1020. subblkpat ^= 3; // swap decoded pattern bits
  1021. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  1022. ttblk = TT_8X4;
  1023. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  1024. ttblk = TT_4X8;
  1025. }
  1026. scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
  1027. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  1028. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  1029. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  1030. ttblk = TT_8X4;
  1031. }
  1032. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  1033. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  1034. ttblk = TT_4X8;
  1035. }
  1036. switch (ttblk) {
  1037. case TT_8X8:
  1038. pat = 0xF;
  1039. i = 0;
  1040. last = 0;
  1041. while (!last) {
  1042. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1043. if (ret < 0)
  1044. return ret;
  1045. i += skip;
  1046. if (i > 63)
  1047. break;
  1048. if (!v->fcm)
  1049. idx = v->zz_8x8[0][i++];
  1050. else
  1051. idx = v->zzi_8x8[i++];
  1052. block[idx] = value * scale;
  1053. if (!v->pquantizer)
  1054. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1055. }
  1056. if (!skip_block) {
  1057. if (i == 1)
  1058. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  1059. else {
  1060. v->vc1dsp.vc1_inv_trans_8x8(block);
  1061. s->idsp.add_pixels_clamped(block, dst, linesize);
  1062. }
  1063. }
  1064. break;
  1065. case TT_4X4:
  1066. pat = ~subblkpat & 0xF;
  1067. for (j = 0; j < 4; j++) {
  1068. last = subblkpat & (1 << (3 - j));
  1069. i = 0;
  1070. off = (j & 1) * 4 + (j & 2) * 16;
  1071. while (!last) {
  1072. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1073. if (ret < 0)
  1074. return ret;
  1075. i += skip;
  1076. if (i > 15)
  1077. break;
  1078. if (!v->fcm)
  1079. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  1080. else
  1081. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  1082. block[idx + off] = value * scale;
  1083. if (!v->pquantizer)
  1084. block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
  1085. }
  1086. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  1087. if (i == 1)
  1088. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1089. else
  1090. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1091. }
  1092. }
  1093. break;
  1094. case TT_8X4:
  1095. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  1096. for (j = 0; j < 2; j++) {
  1097. last = subblkpat & (1 << (1 - j));
  1098. i = 0;
  1099. off = j * 32;
  1100. while (!last) {
  1101. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1102. if (ret < 0)
  1103. return ret;
  1104. i += skip;
  1105. if (i > 31)
  1106. break;
  1107. if (!v->fcm)
  1108. idx = v->zz_8x4[i++] + off;
  1109. else
  1110. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  1111. block[idx] = value * scale;
  1112. if (!v->pquantizer)
  1113. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1114. }
  1115. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1116. if (i == 1)
  1117. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  1118. else
  1119. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  1120. }
  1121. }
  1122. break;
  1123. case TT_4X8:
  1124. pat = ~(subblkpat * 5) & 0xF;
  1125. for (j = 0; j < 2; j++) {
  1126. last = subblkpat & (1 << (1 - j));
  1127. i = 0;
  1128. off = j * 4;
  1129. while (!last) {
  1130. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1131. if (ret < 0)
  1132. return ret;
  1133. i += skip;
  1134. if (i > 31)
  1135. break;
  1136. if (!v->fcm)
  1137. idx = v->zz_4x8[i++] + off;
  1138. else
  1139. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  1140. block[idx] = value * scale;
  1141. if (!v->pquantizer)
  1142. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1143. }
  1144. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1145. if (i == 1)
  1146. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  1147. else
  1148. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  1149. }
  1150. }
  1151. break;
  1152. }
  1153. if (ttmb_out)
  1154. *ttmb_out |= ttblk << (n * 4);
  1155. return pat;
  1156. }
  1157. /** @} */ // Macroblock group
  1158. static const uint8_t size_table[6] = { 0, 2, 3, 4, 5, 8 };
  1159. /** Decode one P-frame MB
  1160. */
  1161. static int vc1_decode_p_mb(VC1Context *v)
  1162. {
  1163. MpegEncContext *s = &v->s;
  1164. GetBitContext *gb = &s->gb;
  1165. int i, j;
  1166. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1167. int cbp; /* cbp decoding stuff */
  1168. int mqdiff, mquant; /* MB quantization */
  1169. int ttmb = v->ttfrm; /* MB Transform type */
  1170. int mb_has_coeffs = 1; /* last_flag */
  1171. int dmv_x, dmv_y; /* Differential MV components */
  1172. int index, index1; /* LUT indexes */
  1173. int val, sign; /* temp values */
  1174. int first_block = 1;
  1175. int dst_idx, off;
  1176. int skipped, fourmv;
  1177. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  1178. mquant = v->pq; /* lossy initialization */
  1179. if (v->mv_type_is_raw)
  1180. fourmv = get_bits1(gb);
  1181. else
  1182. fourmv = v->mv_type_mb_plane[mb_pos];
  1183. if (v->skip_is_raw)
  1184. skipped = get_bits1(gb);
  1185. else
  1186. skipped = v->s.mbskip_table[mb_pos];
  1187. if (!fourmv) { /* 1MV mode */
  1188. if (!skipped) {
  1189. GET_MVDATA(dmv_x, dmv_y);
  1190. if (s->mb_intra) {
  1191. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1192. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1193. }
  1194. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  1195. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1196. /* FIXME Set DC val for inter block ? */
  1197. if (s->mb_intra && !mb_has_coeffs) {
  1198. GET_MQUANT();
  1199. s->ac_pred = get_bits1(gb);
  1200. cbp = 0;
  1201. } else if (mb_has_coeffs) {
  1202. if (s->mb_intra)
  1203. s->ac_pred = get_bits1(gb);
  1204. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1205. GET_MQUANT();
  1206. } else {
  1207. mquant = v->pq;
  1208. cbp = 0;
  1209. }
  1210. s->current_picture.qscale_table[mb_pos] = mquant;
  1211. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1212. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  1213. VC1_TTMB_VLC_BITS, 2);
  1214. if (!s->mb_intra) ff_vc1_mc_1mv(v, 0);
  1215. dst_idx = 0;
  1216. for (i = 0; i < 6; i++) {
  1217. s->dc_val[0][s->block_index[i]] = 0;
  1218. dst_idx += i >> 2;
  1219. val = ((cbp >> (5 - i)) & 1);
  1220. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1221. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1222. if (s->mb_intra) {
  1223. /* check if prediction blocks A and C are available */
  1224. v->a_avail = v->c_avail = 0;
  1225. if (i == 2 || i == 3 || !s->first_slice_line)
  1226. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1227. if (i == 1 || i == 3 || s->mb_x)
  1228. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1229. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1230. (i & 4) ? v->codingset2 : v->codingset);
  1231. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1232. continue;
  1233. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1234. if (v->rangeredfrm)
  1235. for (j = 0; j < 64; j++)
  1236. s->block[i][j] <<= 1;
  1237. s->idsp.put_signed_pixels_clamped(s->block[i],
  1238. s->dest[dst_idx] + off,
  1239. i & 4 ? s->uvlinesize
  1240. : s->linesize);
  1241. if (v->pq >= 9 && v->overlap) {
  1242. if (v->c_avail)
  1243. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1244. if (v->a_avail)
  1245. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1246. }
  1247. block_cbp |= 0xF << (i << 2);
  1248. block_intra |= 1 << i;
  1249. } else if (val) {
  1250. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
  1251. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  1252. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1253. block_cbp |= pat << (i << 2);
  1254. if (!v->ttmbf && ttmb < 8)
  1255. ttmb = -1;
  1256. first_block = 0;
  1257. }
  1258. }
  1259. } else { // skipped
  1260. s->mb_intra = 0;
  1261. for (i = 0; i < 6; i++) {
  1262. v->mb_type[0][s->block_index[i]] = 0;
  1263. s->dc_val[0][s->block_index[i]] = 0;
  1264. }
  1265. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1266. s->current_picture.qscale_table[mb_pos] = 0;
  1267. ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1268. ff_vc1_mc_1mv(v, 0);
  1269. }
  1270. } else { // 4MV mode
  1271. if (!skipped /* unskipped MB */) {
  1272. int intra_count = 0, coded_inter = 0;
  1273. int is_intra[6], is_coded[6];
  1274. /* Get CBPCY */
  1275. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1276. for (i = 0; i < 6; i++) {
  1277. val = ((cbp >> (5 - i)) & 1);
  1278. s->dc_val[0][s->block_index[i]] = 0;
  1279. s->mb_intra = 0;
  1280. if (i < 4) {
  1281. dmv_x = dmv_y = 0;
  1282. s->mb_intra = 0;
  1283. mb_has_coeffs = 0;
  1284. if (val) {
  1285. GET_MVDATA(dmv_x, dmv_y);
  1286. }
  1287. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1288. if (!s->mb_intra)
  1289. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1290. intra_count += s->mb_intra;
  1291. is_intra[i] = s->mb_intra;
  1292. is_coded[i] = mb_has_coeffs;
  1293. }
  1294. if (i & 4) {
  1295. is_intra[i] = (intra_count >= 3);
  1296. is_coded[i] = val;
  1297. }
  1298. if (i == 4)
  1299. ff_vc1_mc_4mv_chroma(v, 0);
  1300. v->mb_type[0][s->block_index[i]] = is_intra[i];
  1301. if (!coded_inter)
  1302. coded_inter = !is_intra[i] & is_coded[i];
  1303. }
  1304. // if there are no coded blocks then don't do anything more
  1305. dst_idx = 0;
  1306. if (!intra_count && !coded_inter)
  1307. goto end;
  1308. GET_MQUANT();
  1309. s->current_picture.qscale_table[mb_pos] = mquant;
  1310. /* test if block is intra and has pred */
  1311. {
  1312. int intrapred = 0;
  1313. for (i = 0; i < 6; i++)
  1314. if (is_intra[i]) {
  1315. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  1316. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  1317. intrapred = 1;
  1318. break;
  1319. }
  1320. }
  1321. if (intrapred)
  1322. s->ac_pred = get_bits1(gb);
  1323. else
  1324. s->ac_pred = 0;
  1325. }
  1326. if (!v->ttmbf && coded_inter)
  1327. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1328. for (i = 0; i < 6; i++) {
  1329. dst_idx += i >> 2;
  1330. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1331. s->mb_intra = is_intra[i];
  1332. if (is_intra[i]) {
  1333. /* check if prediction blocks A and C are available */
  1334. v->a_avail = v->c_avail = 0;
  1335. if (i == 2 || i == 3 || !s->first_slice_line)
  1336. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1337. if (i == 1 || i == 3 || s->mb_x)
  1338. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1339. vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
  1340. (i & 4) ? v->codingset2 : v->codingset);
  1341. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1342. continue;
  1343. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1344. if (v->rangeredfrm)
  1345. for (j = 0; j < 64; j++)
  1346. s->block[i][j] <<= 1;
  1347. s->idsp.put_signed_pixels_clamped(s->block[i],
  1348. s->dest[dst_idx] + off,
  1349. (i & 4) ? s->uvlinesize
  1350. : s->linesize);
  1351. if (v->pq >= 9 && v->overlap) {
  1352. if (v->c_avail)
  1353. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1354. if (v->a_avail)
  1355. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1356. }
  1357. block_cbp |= 0xF << (i << 2);
  1358. block_intra |= 1 << i;
  1359. } else if (is_coded[i]) {
  1360. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1361. first_block, s->dest[dst_idx] + off,
  1362. (i & 4) ? s->uvlinesize : s->linesize,
  1363. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1364. &block_tt);
  1365. block_cbp |= pat << (i << 2);
  1366. if (!v->ttmbf && ttmb < 8)
  1367. ttmb = -1;
  1368. first_block = 0;
  1369. }
  1370. }
  1371. } else { // skipped MB
  1372. s->mb_intra = 0;
  1373. s->current_picture.qscale_table[mb_pos] = 0;
  1374. for (i = 0; i < 6; i++) {
  1375. v->mb_type[0][s->block_index[i]] = 0;
  1376. s->dc_val[0][s->block_index[i]] = 0;
  1377. }
  1378. for (i = 0; i < 4; i++) {
  1379. ff_vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1380. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1381. }
  1382. ff_vc1_mc_4mv_chroma(v, 0);
  1383. s->current_picture.qscale_table[mb_pos] = 0;
  1384. }
  1385. }
  1386. end:
  1387. v->cbp[s->mb_x] = block_cbp;
  1388. v->ttblk[s->mb_x] = block_tt;
  1389. v->is_intra[s->mb_x] = block_intra;
  1390. return 0;
  1391. }
  1392. /* Decode one macroblock in an interlaced frame p picture */
  1393. static int vc1_decode_p_mb_intfr(VC1Context *v)
  1394. {
  1395. MpegEncContext *s = &v->s;
  1396. GetBitContext *gb = &s->gb;
  1397. int i;
  1398. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1399. int cbp = 0; /* cbp decoding stuff */
  1400. int mqdiff, mquant; /* MB quantization */
  1401. int ttmb = v->ttfrm; /* MB Transform type */
  1402. int mb_has_coeffs = 1; /* last_flag */
  1403. int dmv_x, dmv_y; /* Differential MV components */
  1404. int val; /* temp value */
  1405. int first_block = 1;
  1406. int dst_idx, off;
  1407. int skipped, fourmv = 0, twomv = 0;
  1408. int block_cbp = 0, pat, block_tt = 0;
  1409. int idx_mbmode = 0, mvbp;
  1410. int stride_y, fieldtx;
  1411. mquant = v->pq; /* Lossy initialization */
  1412. if (v->skip_is_raw)
  1413. skipped = get_bits1(gb);
  1414. else
  1415. skipped = v->s.mbskip_table[mb_pos];
  1416. if (!skipped) {
  1417. if (v->fourmvswitch)
  1418. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  1419. else
  1420. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  1421. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  1422. /* store the motion vector type in a flag (useful later) */
  1423. case MV_PMODE_INTFR_4MV:
  1424. fourmv = 1;
  1425. v->blk_mv_type[s->block_index[0]] = 0;
  1426. v->blk_mv_type[s->block_index[1]] = 0;
  1427. v->blk_mv_type[s->block_index[2]] = 0;
  1428. v->blk_mv_type[s->block_index[3]] = 0;
  1429. break;
  1430. case MV_PMODE_INTFR_4MV_FIELD:
  1431. fourmv = 1;
  1432. v->blk_mv_type[s->block_index[0]] = 1;
  1433. v->blk_mv_type[s->block_index[1]] = 1;
  1434. v->blk_mv_type[s->block_index[2]] = 1;
  1435. v->blk_mv_type[s->block_index[3]] = 1;
  1436. break;
  1437. case MV_PMODE_INTFR_2MV_FIELD:
  1438. twomv = 1;
  1439. v->blk_mv_type[s->block_index[0]] = 1;
  1440. v->blk_mv_type[s->block_index[1]] = 1;
  1441. v->blk_mv_type[s->block_index[2]] = 1;
  1442. v->blk_mv_type[s->block_index[3]] = 1;
  1443. break;
  1444. case MV_PMODE_INTFR_1MV:
  1445. v->blk_mv_type[s->block_index[0]] = 0;
  1446. v->blk_mv_type[s->block_index[1]] = 0;
  1447. v->blk_mv_type[s->block_index[2]] = 0;
  1448. v->blk_mv_type[s->block_index[3]] = 0;
  1449. break;
  1450. }
  1451. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  1452. for (i = 0; i < 4; i++) {
  1453. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  1454. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  1455. }
  1456. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1457. s->mb_intra = 1;
  1458. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  1459. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  1460. mb_has_coeffs = get_bits1(gb);
  1461. if (mb_has_coeffs)
  1462. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1463. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1464. GET_MQUANT();
  1465. s->current_picture.qscale_table[mb_pos] = mquant;
  1466. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1467. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1468. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1469. dst_idx = 0;
  1470. for (i = 0; i < 6; i++) {
  1471. v->a_avail = v->c_avail = 0;
  1472. v->mb_type[0][s->block_index[i]] = 1;
  1473. s->dc_val[0][s->block_index[i]] = 0;
  1474. dst_idx += i >> 2;
  1475. val = ((cbp >> (5 - i)) & 1);
  1476. if (i == 2 || i == 3 || !s->first_slice_line)
  1477. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1478. if (i == 1 || i == 3 || s->mb_x)
  1479. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1480. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1481. (i & 4) ? v->codingset2 : v->codingset);
  1482. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1483. continue;
  1484. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1485. if (i < 4) {
  1486. stride_y = s->linesize << fieldtx;
  1487. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  1488. } else {
  1489. stride_y = s->uvlinesize;
  1490. off = 0;
  1491. }
  1492. s->idsp.put_signed_pixels_clamped(s->block[i],
  1493. s->dest[dst_idx] + off,
  1494. stride_y);
  1495. //TODO: loop filter
  1496. }
  1497. } else { // inter MB
  1498. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  1499. if (mb_has_coeffs)
  1500. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1501. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  1502. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  1503. } else {
  1504. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  1505. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  1506. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1507. }
  1508. }
  1509. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1510. for (i = 0; i < 6; i++)
  1511. v->mb_type[0][s->block_index[i]] = 0;
  1512. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  1513. /* for all motion vector read MVDATA and motion compensate each block */
  1514. dst_idx = 0;
  1515. if (fourmv) {
  1516. mvbp = v->fourmvbp;
  1517. for (i = 0; i < 4; i++) {
  1518. dmv_x = dmv_y = 0;
  1519. if (mvbp & (8 >> i))
  1520. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1521. ff_vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  1522. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1523. }
  1524. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1525. } else if (twomv) {
  1526. mvbp = v->twomvbp;
  1527. dmv_x = dmv_y = 0;
  1528. if (mvbp & 2) {
  1529. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1530. }
  1531. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1532. ff_vc1_mc_4mv_luma(v, 0, 0, 0);
  1533. ff_vc1_mc_4mv_luma(v, 1, 0, 0);
  1534. dmv_x = dmv_y = 0;
  1535. if (mvbp & 1) {
  1536. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1537. }
  1538. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1539. ff_vc1_mc_4mv_luma(v, 2, 0, 0);
  1540. ff_vc1_mc_4mv_luma(v, 3, 0, 0);
  1541. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1542. } else {
  1543. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  1544. dmv_x = dmv_y = 0;
  1545. if (mvbp) {
  1546. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1547. }
  1548. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1549. ff_vc1_mc_1mv(v, 0);
  1550. }
  1551. if (cbp)
  1552. GET_MQUANT(); // p. 227
  1553. s->current_picture.qscale_table[mb_pos] = mquant;
  1554. if (!v->ttmbf && cbp)
  1555. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1556. for (i = 0; i < 6; i++) {
  1557. s->dc_val[0][s->block_index[i]] = 0;
  1558. dst_idx += i >> 2;
  1559. val = ((cbp >> (5 - i)) & 1);
  1560. if (!fieldtx)
  1561. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1562. else
  1563. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  1564. if (val) {
  1565. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1566. first_block, s->dest[dst_idx] + off,
  1567. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  1568. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1569. block_cbp |= pat << (i << 2);
  1570. if (!v->ttmbf && ttmb < 8)
  1571. ttmb = -1;
  1572. first_block = 0;
  1573. }
  1574. }
  1575. }
  1576. } else { // skipped
  1577. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1578. for (i = 0; i < 6; i++) {
  1579. v->mb_type[0][s->block_index[i]] = 0;
  1580. s->dc_val[0][s->block_index[i]] = 0;
  1581. }
  1582. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1583. s->current_picture.qscale_table[mb_pos] = 0;
  1584. v->blk_mv_type[s->block_index[0]] = 0;
  1585. v->blk_mv_type[s->block_index[1]] = 0;
  1586. v->blk_mv_type[s->block_index[2]] = 0;
  1587. v->blk_mv_type[s->block_index[3]] = 0;
  1588. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1589. ff_vc1_mc_1mv(v, 0);
  1590. }
  1591. if (s->mb_x == s->mb_width - 1)
  1592. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
  1593. return 0;
  1594. }
  1595. static int vc1_decode_p_mb_intfi(VC1Context *v)
  1596. {
  1597. MpegEncContext *s = &v->s;
  1598. GetBitContext *gb = &s->gb;
  1599. int i;
  1600. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1601. int cbp = 0; /* cbp decoding stuff */
  1602. int mqdiff, mquant; /* MB quantization */
  1603. int ttmb = v->ttfrm; /* MB Transform type */
  1604. int mb_has_coeffs = 1; /* last_flag */
  1605. int dmv_x, dmv_y; /* Differential MV components */
  1606. int val; /* temp values */
  1607. int first_block = 1;
  1608. int dst_idx, off;
  1609. int pred_flag = 0;
  1610. int block_cbp = 0, pat, block_tt = 0;
  1611. int idx_mbmode = 0;
  1612. mquant = v->pq; /* Lossy initialization */
  1613. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1614. if (idx_mbmode <= 1) { // intra MB
  1615. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1616. s->mb_intra = 1;
  1617. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  1618. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  1619. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1620. GET_MQUANT();
  1621. s->current_picture.qscale_table[mb_pos] = mquant;
  1622. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1623. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1624. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1625. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1626. mb_has_coeffs = idx_mbmode & 1;
  1627. if (mb_has_coeffs)
  1628. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1629. dst_idx = 0;
  1630. for (i = 0; i < 6; i++) {
  1631. v->a_avail = v->c_avail = 0;
  1632. v->mb_type[0][s->block_index[i]] = 1;
  1633. s->dc_val[0][s->block_index[i]] = 0;
  1634. dst_idx += i >> 2;
  1635. val = ((cbp >> (5 - i)) & 1);
  1636. if (i == 2 || i == 3 || !s->first_slice_line)
  1637. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1638. if (i == 1 || i == 3 || s->mb_x)
  1639. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1640. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1641. (i & 4) ? v->codingset2 : v->codingset);
  1642. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1643. continue;
  1644. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1645. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1646. s->idsp.put_signed_pixels_clamped(s->block[i],
  1647. s->dest[dst_idx] + off,
  1648. (i & 4) ? s->uvlinesize
  1649. : s->linesize);
  1650. // TODO: loop filter
  1651. }
  1652. } else {
  1653. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1654. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1655. for (i = 0; i < 6; i++)
  1656. v->mb_type[0][s->block_index[i]] = 0;
  1657. if (idx_mbmode <= 5) { // 1-MV
  1658. dmv_x = dmv_y = pred_flag = 0;
  1659. if (idx_mbmode & 1) {
  1660. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1661. }
  1662. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1663. ff_vc1_mc_1mv(v, 0);
  1664. mb_has_coeffs = !(idx_mbmode & 2);
  1665. } else { // 4-MV
  1666. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1667. for (i = 0; i < 4; i++) {
  1668. dmv_x = dmv_y = pred_flag = 0;
  1669. if (v->fourmvbp & (8 >> i))
  1670. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1671. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1672. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1673. }
  1674. ff_vc1_mc_4mv_chroma(v, 0);
  1675. mb_has_coeffs = idx_mbmode & 1;
  1676. }
  1677. if (mb_has_coeffs)
  1678. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1679. if (cbp) {
  1680. GET_MQUANT();
  1681. }
  1682. s->current_picture.qscale_table[mb_pos] = mquant;
  1683. if (!v->ttmbf && cbp) {
  1684. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1685. }
  1686. dst_idx = 0;
  1687. for (i = 0; i < 6; i++) {
  1688. s->dc_val[0][s->block_index[i]] = 0;
  1689. dst_idx += i >> 2;
  1690. val = ((cbp >> (5 - i)) & 1);
  1691. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1692. if (val) {
  1693. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1694. first_block, s->dest[dst_idx] + off,
  1695. (i & 4) ? s->uvlinesize : s->linesize,
  1696. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1697. &block_tt);
  1698. block_cbp |= pat << (i << 2);
  1699. if (!v->ttmbf && ttmb < 8)
  1700. ttmb = -1;
  1701. first_block = 0;
  1702. }
  1703. }
  1704. }
  1705. if (s->mb_x == s->mb_width - 1)
  1706. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  1707. return 0;
  1708. }
  1709. /** Decode one B-frame MB (in Main profile)
  1710. */
  1711. static void vc1_decode_b_mb(VC1Context *v)
  1712. {
  1713. MpegEncContext *s = &v->s;
  1714. GetBitContext *gb = &s->gb;
  1715. int i, j;
  1716. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1717. int cbp = 0; /* cbp decoding stuff */
  1718. int mqdiff, mquant; /* MB quantization */
  1719. int ttmb = v->ttfrm; /* MB Transform type */
  1720. int mb_has_coeffs = 0; /* last_flag */
  1721. int index, index1; /* LUT indexes */
  1722. int val, sign; /* temp values */
  1723. int first_block = 1;
  1724. int dst_idx, off;
  1725. int skipped, direct;
  1726. int dmv_x[2], dmv_y[2];
  1727. int bmvtype = BMV_TYPE_BACKWARD;
  1728. mquant = v->pq; /* lossy initialization */
  1729. s->mb_intra = 0;
  1730. if (v->dmb_is_raw)
  1731. direct = get_bits1(gb);
  1732. else
  1733. direct = v->direct_mb_plane[mb_pos];
  1734. if (v->skip_is_raw)
  1735. skipped = get_bits1(gb);
  1736. else
  1737. skipped = v->s.mbskip_table[mb_pos];
  1738. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1739. for (i = 0; i < 6; i++) {
  1740. v->mb_type[0][s->block_index[i]] = 0;
  1741. s->dc_val[0][s->block_index[i]] = 0;
  1742. }
  1743. s->current_picture.qscale_table[mb_pos] = 0;
  1744. if (!direct) {
  1745. if (!skipped) {
  1746. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1747. dmv_x[1] = dmv_x[0];
  1748. dmv_y[1] = dmv_y[0];
  1749. }
  1750. if (skipped || !s->mb_intra) {
  1751. bmvtype = decode012(gb);
  1752. switch (bmvtype) {
  1753. case 0:
  1754. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  1755. break;
  1756. case 1:
  1757. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  1758. break;
  1759. case 2:
  1760. bmvtype = BMV_TYPE_INTERPOLATED;
  1761. dmv_x[0] = dmv_y[0] = 0;
  1762. }
  1763. }
  1764. }
  1765. for (i = 0; i < 6; i++)
  1766. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1767. if (skipped) {
  1768. if (direct)
  1769. bmvtype = BMV_TYPE_INTERPOLATED;
  1770. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1771. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1772. return;
  1773. }
  1774. if (direct) {
  1775. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1776. GET_MQUANT();
  1777. s->mb_intra = 0;
  1778. s->current_picture.qscale_table[mb_pos] = mquant;
  1779. if (!v->ttmbf)
  1780. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1781. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  1782. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1783. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1784. } else {
  1785. if (!mb_has_coeffs && !s->mb_intra) {
  1786. /* no coded blocks - effectively skipped */
  1787. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1788. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1789. return;
  1790. }
  1791. if (s->mb_intra && !mb_has_coeffs) {
  1792. GET_MQUANT();
  1793. s->current_picture.qscale_table[mb_pos] = mquant;
  1794. s->ac_pred = get_bits1(gb);
  1795. cbp = 0;
  1796. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1797. } else {
  1798. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  1799. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1800. if (!mb_has_coeffs) {
  1801. /* interpolated skipped block */
  1802. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1803. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1804. return;
  1805. }
  1806. }
  1807. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1808. if (!s->mb_intra) {
  1809. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1810. }
  1811. if (s->mb_intra)
  1812. s->ac_pred = get_bits1(gb);
  1813. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1814. GET_MQUANT();
  1815. s->current_picture.qscale_table[mb_pos] = mquant;
  1816. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1817. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1818. }
  1819. }
  1820. dst_idx = 0;
  1821. for (i = 0; i < 6; i++) {
  1822. s->dc_val[0][s->block_index[i]] = 0;
  1823. dst_idx += i >> 2;
  1824. val = ((cbp >> (5 - i)) & 1);
  1825. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1826. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1827. if (s->mb_intra) {
  1828. /* check if prediction blocks A and C are available */
  1829. v->a_avail = v->c_avail = 0;
  1830. if (i == 2 || i == 3 || !s->first_slice_line)
  1831. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1832. if (i == 1 || i == 3 || s->mb_x)
  1833. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1834. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1835. (i & 4) ? v->codingset2 : v->codingset);
  1836. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1837. continue;
  1838. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1839. if (v->rangeredfrm)
  1840. for (j = 0; j < 64; j++)
  1841. s->block[i][j] <<= 1;
  1842. s->idsp.put_signed_pixels_clamped(s->block[i],
  1843. s->dest[dst_idx] + off,
  1844. i & 4 ? s->uvlinesize
  1845. : s->linesize);
  1846. } else if (val) {
  1847. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1848. first_block, s->dest[dst_idx] + off,
  1849. (i & 4) ? s->uvlinesize : s->linesize,
  1850. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  1851. if (!v->ttmbf && ttmb < 8)
  1852. ttmb = -1;
  1853. first_block = 0;
  1854. }
  1855. }
  1856. }
  1857. /** Decode one B-frame MB (in interlaced field B picture)
  1858. */
  1859. static void vc1_decode_b_mb_intfi(VC1Context *v)
  1860. {
  1861. MpegEncContext *s = &v->s;
  1862. GetBitContext *gb = &s->gb;
  1863. int i, j;
  1864. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1865. int cbp = 0; /* cbp decoding stuff */
  1866. int mqdiff, mquant; /* MB quantization */
  1867. int ttmb = v->ttfrm; /* MB Transform type */
  1868. int mb_has_coeffs = 0; /* last_flag */
  1869. int val; /* temp value */
  1870. int first_block = 1;
  1871. int dst_idx, off;
  1872. int fwd;
  1873. int dmv_x[2], dmv_y[2], pred_flag[2];
  1874. int bmvtype = BMV_TYPE_BACKWARD;
  1875. int idx_mbmode;
  1876. mquant = v->pq; /* Lossy initialization */
  1877. s->mb_intra = 0;
  1878. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1879. if (idx_mbmode <= 1) { // intra MB
  1880. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1881. s->mb_intra = 1;
  1882. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1883. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1884. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1885. GET_MQUANT();
  1886. s->current_picture.qscale_table[mb_pos] = mquant;
  1887. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1888. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1889. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1890. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1891. mb_has_coeffs = idx_mbmode & 1;
  1892. if (mb_has_coeffs)
  1893. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1894. dst_idx = 0;
  1895. for (i = 0; i < 6; i++) {
  1896. v->a_avail = v->c_avail = 0;
  1897. v->mb_type[0][s->block_index[i]] = 1;
  1898. s->dc_val[0][s->block_index[i]] = 0;
  1899. dst_idx += i >> 2;
  1900. val = ((cbp >> (5 - i)) & 1);
  1901. if (i == 2 || i == 3 || !s->first_slice_line)
  1902. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1903. if (i == 1 || i == 3 || s->mb_x)
  1904. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1905. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1906. (i & 4) ? v->codingset2 : v->codingset);
  1907. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1908. continue;
  1909. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1910. if (v->rangeredfrm)
  1911. for (j = 0; j < 64; j++)
  1912. s->block[i][j] <<= 1;
  1913. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1914. s->idsp.put_signed_pixels_clamped(s->block[i],
  1915. s->dest[dst_idx] + off,
  1916. (i & 4) ? s->uvlinesize
  1917. : s->linesize);
  1918. // TODO: yet to perform loop filter
  1919. }
  1920. } else {
  1921. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1922. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1923. for (i = 0; i < 6; i++)
  1924. v->mb_type[0][s->block_index[i]] = 0;
  1925. if (v->fmb_is_raw)
  1926. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  1927. else
  1928. fwd = v->forward_mb_plane[mb_pos];
  1929. if (idx_mbmode <= 5) { // 1-MV
  1930. int interpmvp = 0;
  1931. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1932. pred_flag[0] = pred_flag[1] = 0;
  1933. if (fwd)
  1934. bmvtype = BMV_TYPE_FORWARD;
  1935. else {
  1936. bmvtype = decode012(gb);
  1937. switch (bmvtype) {
  1938. case 0:
  1939. bmvtype = BMV_TYPE_BACKWARD;
  1940. break;
  1941. case 1:
  1942. bmvtype = BMV_TYPE_DIRECT;
  1943. break;
  1944. case 2:
  1945. bmvtype = BMV_TYPE_INTERPOLATED;
  1946. interpmvp = get_bits1(gb);
  1947. }
  1948. }
  1949. v->bmvtype = bmvtype;
  1950. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  1951. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1952. }
  1953. if (interpmvp) {
  1954. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  1955. }
  1956. if (bmvtype == BMV_TYPE_DIRECT) {
  1957. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1958. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  1959. if (!s->next_picture_ptr->field_picture) {
  1960. av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
  1961. return;
  1962. }
  1963. }
  1964. ff_vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  1965. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  1966. mb_has_coeffs = !(idx_mbmode & 2);
  1967. } else { // 4-MV
  1968. if (fwd)
  1969. bmvtype = BMV_TYPE_FORWARD;
  1970. v->bmvtype = bmvtype;
  1971. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1972. for (i = 0; i < 4; i++) {
  1973. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1974. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  1975. if (v->fourmvbp & (8 >> i)) {
  1976. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  1977. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  1978. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1979. }
  1980. ff_vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  1981. ff_vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  1982. }
  1983. ff_vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  1984. mb_has_coeffs = idx_mbmode & 1;
  1985. }
  1986. if (mb_has_coeffs)
  1987. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1988. if (cbp) {
  1989. GET_MQUANT();
  1990. }
  1991. s->current_picture.qscale_table[mb_pos] = mquant;
  1992. if (!v->ttmbf && cbp) {
  1993. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1994. }
  1995. dst_idx = 0;
  1996. for (i = 0; i < 6; i++) {
  1997. s->dc_val[0][s->block_index[i]] = 0;
  1998. dst_idx += i >> 2;
  1999. val = ((cbp >> (5 - i)) & 1);
  2000. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  2001. if (val) {
  2002. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2003. first_block, s->dest[dst_idx] + off,
  2004. (i & 4) ? s->uvlinesize : s->linesize,
  2005. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  2006. if (!v->ttmbf && ttmb < 8)
  2007. ttmb = -1;
  2008. first_block = 0;
  2009. }
  2010. }
  2011. }
  2012. }
  2013. /** Decode one B-frame MB (in interlaced frame B picture)
  2014. */
  2015. static int vc1_decode_b_mb_intfr(VC1Context *v)
  2016. {
  2017. MpegEncContext *s = &v->s;
  2018. GetBitContext *gb = &s->gb;
  2019. int i, j;
  2020. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2021. int cbp = 0; /* cbp decoding stuff */
  2022. int mqdiff, mquant; /* MB quantization */
  2023. int ttmb = v->ttfrm; /* MB Transform type */
  2024. int mvsw = 0; /* motion vector switch */
  2025. int mb_has_coeffs = 1; /* last_flag */
  2026. int dmv_x, dmv_y; /* Differential MV components */
  2027. int val; /* temp value */
  2028. int first_block = 1;
  2029. int dst_idx, off;
  2030. int skipped, direct, twomv = 0;
  2031. int block_cbp = 0, pat, block_tt = 0;
  2032. int idx_mbmode = 0, mvbp;
  2033. int stride_y, fieldtx;
  2034. int bmvtype = BMV_TYPE_BACKWARD;
  2035. int dir, dir2;
  2036. mquant = v->pq; /* Lossy initialization */
  2037. s->mb_intra = 0;
  2038. if (v->skip_is_raw)
  2039. skipped = get_bits1(gb);
  2040. else
  2041. skipped = v->s.mbskip_table[mb_pos];
  2042. if (!skipped) {
  2043. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  2044. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  2045. twomv = 1;
  2046. v->blk_mv_type[s->block_index[0]] = 1;
  2047. v->blk_mv_type[s->block_index[1]] = 1;
  2048. v->blk_mv_type[s->block_index[2]] = 1;
  2049. v->blk_mv_type[s->block_index[3]] = 1;
  2050. } else {
  2051. v->blk_mv_type[s->block_index[0]] = 0;
  2052. v->blk_mv_type[s->block_index[1]] = 0;
  2053. v->blk_mv_type[s->block_index[2]] = 0;
  2054. v->blk_mv_type[s->block_index[3]] = 0;
  2055. }
  2056. }
  2057. if (v->dmb_is_raw)
  2058. direct = get_bits1(gb);
  2059. else
  2060. direct = v->direct_mb_plane[mb_pos];
  2061. if (direct) {
  2062. if (s->next_picture_ptr->field_picture)
  2063. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  2064. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  2065. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  2066. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  2067. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  2068. if (twomv) {
  2069. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  2070. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  2071. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  2072. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  2073. for (i = 1; i < 4; i += 2) {
  2074. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  2075. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  2076. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  2077. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  2078. }
  2079. } else {
  2080. for (i = 1; i < 4; i++) {
  2081. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  2082. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  2083. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  2084. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  2085. }
  2086. }
  2087. }
  2088. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  2089. for (i = 0; i < 4; i++) {
  2090. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  2091. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  2092. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2093. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2094. }
  2095. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  2096. s->mb_intra = 1;
  2097. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2098. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  2099. mb_has_coeffs = get_bits1(gb);
  2100. if (mb_has_coeffs)
  2101. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2102. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  2103. GET_MQUANT();
  2104. s->current_picture.qscale_table[mb_pos] = mquant;
  2105. /* Set DC scale - y and c use the same (not sure if necessary here) */
  2106. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2107. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2108. dst_idx = 0;
  2109. for (i = 0; i < 6; i++) {
  2110. v->a_avail = v->c_avail = 0;
  2111. v->mb_type[0][s->block_index[i]] = 1;
  2112. s->dc_val[0][s->block_index[i]] = 0;
  2113. dst_idx += i >> 2;
  2114. val = ((cbp >> (5 - i)) & 1);
  2115. if (i == 2 || i == 3 || !s->first_slice_line)
  2116. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  2117. if (i == 1 || i == 3 || s->mb_x)
  2118. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  2119. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  2120. (i & 4) ? v->codingset2 : v->codingset);
  2121. if (CONFIG_GRAY && i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2122. continue;
  2123. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  2124. if (i < 4) {
  2125. stride_y = s->linesize << fieldtx;
  2126. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  2127. } else {
  2128. stride_y = s->uvlinesize;
  2129. off = 0;
  2130. }
  2131. s->idsp.put_signed_pixels_clamped(s->block[i],
  2132. s->dest[dst_idx] + off,
  2133. stride_y);
  2134. }
  2135. } else {
  2136. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2137. if (!direct) {
  2138. if (skipped || !s->mb_intra) {
  2139. bmvtype = decode012(gb);
  2140. switch (bmvtype) {
  2141. case 0:
  2142. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  2143. break;
  2144. case 1:
  2145. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  2146. break;
  2147. case 2:
  2148. bmvtype = BMV_TYPE_INTERPOLATED;
  2149. }
  2150. }
  2151. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  2152. mvsw = get_bits1(gb);
  2153. }
  2154. if (!skipped) { // inter MB
  2155. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  2156. if (mb_has_coeffs)
  2157. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2158. if (!direct) {
  2159. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  2160. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2161. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  2162. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  2163. }
  2164. }
  2165. for (i = 0; i < 6; i++)
  2166. v->mb_type[0][s->block_index[i]] = 0;
  2167. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  2168. /* for all motion vector read MVDATA and motion compensate each block */
  2169. dst_idx = 0;
  2170. if (direct) {
  2171. if (twomv) {
  2172. for (i = 0; i < 4; i++) {
  2173. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  2174. ff_vc1_mc_4mv_luma(v, i, 1, 1);
  2175. }
  2176. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2177. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2178. } else {
  2179. ff_vc1_mc_1mv(v, 0);
  2180. ff_vc1_interp_mc(v);
  2181. }
  2182. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  2183. mvbp = v->fourmvbp;
  2184. for (i = 0; i < 4; i++) {
  2185. dir = i==1 || i==3;
  2186. dmv_x = dmv_y = 0;
  2187. val = ((mvbp >> (3 - i)) & 1);
  2188. if (val)
  2189. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2190. j = i > 1 ? 2 : 0;
  2191. ff_vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2192. ff_vc1_mc_4mv_luma(v, j, dir, dir);
  2193. ff_vc1_mc_4mv_luma(v, j+1, dir, dir);
  2194. }
  2195. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2196. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2197. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2198. mvbp = v->twomvbp;
  2199. dmv_x = dmv_y = 0;
  2200. if (mvbp & 2)
  2201. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2202. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2203. ff_vc1_mc_1mv(v, 0);
  2204. dmv_x = dmv_y = 0;
  2205. if (mvbp & 1)
  2206. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2207. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2208. ff_vc1_interp_mc(v);
  2209. } else if (twomv) {
  2210. dir = bmvtype == BMV_TYPE_BACKWARD;
  2211. dir2 = dir;
  2212. if (mvsw)
  2213. dir2 = !dir;
  2214. mvbp = v->twomvbp;
  2215. dmv_x = dmv_y = 0;
  2216. if (mvbp & 2)
  2217. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2218. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2219. dmv_x = dmv_y = 0;
  2220. if (mvbp & 1)
  2221. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2222. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  2223. if (mvsw) {
  2224. for (i = 0; i < 2; i++) {
  2225. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2226. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2227. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2228. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2229. }
  2230. } else {
  2231. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2232. ff_vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2233. }
  2234. ff_vc1_mc_4mv_luma(v, 0, dir, 0);
  2235. ff_vc1_mc_4mv_luma(v, 1, dir, 0);
  2236. ff_vc1_mc_4mv_luma(v, 2, dir2, 0);
  2237. ff_vc1_mc_4mv_luma(v, 3, dir2, 0);
  2238. ff_vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  2239. } else {
  2240. dir = bmvtype == BMV_TYPE_BACKWARD;
  2241. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  2242. dmv_x = dmv_y = 0;
  2243. if (mvbp)
  2244. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2245. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2246. v->blk_mv_type[s->block_index[0]] = 1;
  2247. v->blk_mv_type[s->block_index[1]] = 1;
  2248. v->blk_mv_type[s->block_index[2]] = 1;
  2249. v->blk_mv_type[s->block_index[3]] = 1;
  2250. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2251. for (i = 0; i < 2; i++) {
  2252. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2253. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2254. }
  2255. ff_vc1_mc_1mv(v, dir);
  2256. }
  2257. if (cbp)
  2258. GET_MQUANT(); // p. 227
  2259. s->current_picture.qscale_table[mb_pos] = mquant;
  2260. if (!v->ttmbf && cbp)
  2261. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2262. for (i = 0; i < 6; i++) {
  2263. s->dc_val[0][s->block_index[i]] = 0;
  2264. dst_idx += i >> 2;
  2265. val = ((cbp >> (5 - i)) & 1);
  2266. if (!fieldtx)
  2267. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  2268. else
  2269. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  2270. if (val) {
  2271. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2272. first_block, s->dest[dst_idx] + off,
  2273. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  2274. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  2275. block_cbp |= pat << (i << 2);
  2276. if (!v->ttmbf && ttmb < 8)
  2277. ttmb = -1;
  2278. first_block = 0;
  2279. }
  2280. }
  2281. } else { // skipped
  2282. dir = 0;
  2283. for (i = 0; i < 6; i++) {
  2284. v->mb_type[0][s->block_index[i]] = 0;
  2285. s->dc_val[0][s->block_index[i]] = 0;
  2286. }
  2287. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  2288. s->current_picture.qscale_table[mb_pos] = 0;
  2289. v->blk_mv_type[s->block_index[0]] = 0;
  2290. v->blk_mv_type[s->block_index[1]] = 0;
  2291. v->blk_mv_type[s->block_index[2]] = 0;
  2292. v->blk_mv_type[s->block_index[3]] = 0;
  2293. if (!direct) {
  2294. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2295. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2296. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2297. } else {
  2298. dir = bmvtype == BMV_TYPE_BACKWARD;
  2299. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2300. if (mvsw) {
  2301. int dir2 = dir;
  2302. if (mvsw)
  2303. dir2 = !dir;
  2304. for (i = 0; i < 2; i++) {
  2305. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2306. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2307. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2308. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2309. }
  2310. } else {
  2311. v->blk_mv_type[s->block_index[0]] = 1;
  2312. v->blk_mv_type[s->block_index[1]] = 1;
  2313. v->blk_mv_type[s->block_index[2]] = 1;
  2314. v->blk_mv_type[s->block_index[3]] = 1;
  2315. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2316. for (i = 0; i < 2; i++) {
  2317. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2318. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2319. }
  2320. }
  2321. }
  2322. }
  2323. ff_vc1_mc_1mv(v, dir);
  2324. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  2325. ff_vc1_interp_mc(v);
  2326. }
  2327. }
  2328. }
  2329. if (s->mb_x == s->mb_width - 1)
  2330. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2331. v->cbp[s->mb_x] = block_cbp;
  2332. v->ttblk[s->mb_x] = block_tt;
  2333. return 0;
  2334. }
  2335. /** Decode blocks of I-frame
  2336. */
  2337. static void vc1_decode_i_blocks(VC1Context *v)
  2338. {
  2339. int k, j;
  2340. MpegEncContext *s = &v->s;
  2341. int cbp, val;
  2342. uint8_t *coded_val;
  2343. int mb_pos;
  2344. /* select coding mode used for VLC tables selection */
  2345. switch (v->y_ac_table_index) {
  2346. case 0:
  2347. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2348. break;
  2349. case 1:
  2350. v->codingset = CS_HIGH_MOT_INTRA;
  2351. break;
  2352. case 2:
  2353. v->codingset = CS_MID_RATE_INTRA;
  2354. break;
  2355. }
  2356. switch (v->c_ac_table_index) {
  2357. case 0:
  2358. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2359. break;
  2360. case 1:
  2361. v->codingset2 = CS_HIGH_MOT_INTER;
  2362. break;
  2363. case 2:
  2364. v->codingset2 = CS_MID_RATE_INTER;
  2365. break;
  2366. }
  2367. /* Set DC scale - y and c use the same */
  2368. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  2369. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  2370. //do frame decode
  2371. s->mb_x = s->mb_y = 0;
  2372. s->mb_intra = 1;
  2373. s->first_slice_line = 1;
  2374. for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
  2375. s->mb_x = 0;
  2376. init_block_index(v);
  2377. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  2378. uint8_t *dst[6];
  2379. ff_update_block_index(s);
  2380. dst[0] = s->dest[0];
  2381. dst[1] = dst[0] + 8;
  2382. dst[2] = s->dest[0] + s->linesize * 8;
  2383. dst[3] = dst[2] + 8;
  2384. dst[4] = s->dest[1];
  2385. dst[5] = s->dest[2];
  2386. s->bdsp.clear_blocks(s->block[0]);
  2387. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  2388. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2389. s->current_picture.qscale_table[mb_pos] = v->pq;
  2390. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  2391. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  2392. // do actual MB decoding and displaying
  2393. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2394. v->s.ac_pred = get_bits1(&v->s.gb);
  2395. for (k = 0; k < 6; k++) {
  2396. val = ((cbp >> (5 - k)) & 1);
  2397. if (k < 4) {
  2398. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2399. val = val ^ pred;
  2400. *coded_val = val;
  2401. }
  2402. cbp |= val << (5 - k);
  2403. vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
  2404. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2405. continue;
  2406. v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
  2407. if (v->pq >= 9 && v->overlap) {
  2408. if (v->rangeredfrm)
  2409. for (j = 0; j < 64; j++)
  2410. s->block[k][j] <<= 1;
  2411. s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
  2412. k & 4 ? s->uvlinesize
  2413. : s->linesize);
  2414. } else {
  2415. if (v->rangeredfrm)
  2416. for (j = 0; j < 64; j++)
  2417. s->block[k][j] = (s->block[k][j] - 64) << 1;
  2418. s->idsp.put_pixels_clamped(s->block[k], dst[k],
  2419. k & 4 ? s->uvlinesize
  2420. : s->linesize);
  2421. }
  2422. }
  2423. if (v->pq >= 9 && v->overlap) {
  2424. if (s->mb_x) {
  2425. v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
  2426. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2427. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2428. v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
  2429. v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
  2430. }
  2431. }
  2432. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
  2433. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2434. if (!s->first_slice_line) {
  2435. v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
  2436. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
  2437. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2438. v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
  2439. v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
  2440. }
  2441. }
  2442. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2443. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2444. }
  2445. if (v->s.loop_filter)
  2446. ff_vc1_loop_filter_iblk(v, v->pq);
  2447. if (get_bits_count(&s->gb) > v->bits) {
  2448. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  2449. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2450. get_bits_count(&s->gb), v->bits);
  2451. return;
  2452. }
  2453. }
  2454. if (!v->s.loop_filter)
  2455. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2456. else if (s->mb_y)
  2457. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2458. s->first_slice_line = 0;
  2459. }
  2460. if (v->s.loop_filter)
  2461. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2462. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  2463. * profile, these only differ are when decoding MSS2 rectangles. */
  2464. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  2465. }
  2466. /** Decode blocks of I-frame for advanced profile
  2467. */
  2468. static void vc1_decode_i_blocks_adv(VC1Context *v)
  2469. {
  2470. int k;
  2471. MpegEncContext *s = &v->s;
  2472. int cbp, val;
  2473. uint8_t *coded_val;
  2474. int mb_pos;
  2475. int mquant = v->pq;
  2476. int mqdiff;
  2477. GetBitContext *gb = &s->gb;
  2478. /* select coding mode used for VLC tables selection */
  2479. switch (v->y_ac_table_index) {
  2480. case 0:
  2481. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2482. break;
  2483. case 1:
  2484. v->codingset = CS_HIGH_MOT_INTRA;
  2485. break;
  2486. case 2:
  2487. v->codingset = CS_MID_RATE_INTRA;
  2488. break;
  2489. }
  2490. switch (v->c_ac_table_index) {
  2491. case 0:
  2492. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2493. break;
  2494. case 1:
  2495. v->codingset2 = CS_HIGH_MOT_INTER;
  2496. break;
  2497. case 2:
  2498. v->codingset2 = CS_MID_RATE_INTER;
  2499. break;
  2500. }
  2501. // do frame decode
  2502. s->mb_x = s->mb_y = 0;
  2503. s->mb_intra = 1;
  2504. s->first_slice_line = 1;
  2505. s->mb_y = s->start_mb_y;
  2506. if (s->start_mb_y) {
  2507. s->mb_x = 0;
  2508. init_block_index(v);
  2509. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  2510. (1 + s->b8_stride) * sizeof(*s->coded_block));
  2511. }
  2512. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  2513. s->mb_x = 0;
  2514. init_block_index(v);
  2515. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2516. int16_t (*block)[64] = v->block[v->cur_blk_idx];
  2517. ff_update_block_index(s);
  2518. s->bdsp.clear_blocks(block[0]);
  2519. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2520. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  2521. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  2522. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  2523. // do actual MB decoding and displaying
  2524. if (v->fieldtx_is_raw)
  2525. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  2526. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2527. if (v->acpred_is_raw)
  2528. v->s.ac_pred = get_bits1(&v->s.gb);
  2529. else
  2530. v->s.ac_pred = v->acpred_plane[mb_pos];
  2531. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  2532. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  2533. GET_MQUANT();
  2534. s->current_picture.qscale_table[mb_pos] = mquant;
  2535. /* Set DC scale - y and c use the same */
  2536. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2537. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2538. for (k = 0; k < 6; k++) {
  2539. val = ((cbp >> (5 - k)) & 1);
  2540. if (k < 4) {
  2541. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2542. val = val ^ pred;
  2543. *coded_val = val;
  2544. }
  2545. cbp |= val << (5 - k);
  2546. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  2547. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  2548. vc1_decode_i_block_adv(v, block[k], k, val,
  2549. (k < 4) ? v->codingset : v->codingset2, mquant);
  2550. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2551. continue;
  2552. v->vc1dsp.vc1_inv_trans_8x8(block[k]);
  2553. }
  2554. ff_vc1_smooth_overlap_filter_iblk(v);
  2555. vc1_put_signed_blocks_clamped(v);
  2556. if (v->s.loop_filter)
  2557. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2558. if (get_bits_count(&s->gb) > v->bits) {
  2559. // TODO: may need modification to handle slice coding
  2560. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2561. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2562. get_bits_count(&s->gb), v->bits);
  2563. return;
  2564. }
  2565. }
  2566. if (!v->s.loop_filter)
  2567. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2568. else if (s->mb_y)
  2569. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  2570. s->first_slice_line = 0;
  2571. }
  2572. /* raw bottom MB row */
  2573. s->mb_x = 0;
  2574. init_block_index(v);
  2575. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2576. ff_update_block_index(s);
  2577. vc1_put_signed_blocks_clamped(v);
  2578. if (v->s.loop_filter)
  2579. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2580. }
  2581. if (v->s.loop_filter)
  2582. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2583. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2584. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2585. }
  2586. static void vc1_decode_p_blocks(VC1Context *v)
  2587. {
  2588. MpegEncContext *s = &v->s;
  2589. int apply_loop_filter;
  2590. /* select coding mode used for VLC tables selection */
  2591. switch (v->c_ac_table_index) {
  2592. case 0:
  2593. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2594. break;
  2595. case 1:
  2596. v->codingset = CS_HIGH_MOT_INTRA;
  2597. break;
  2598. case 2:
  2599. v->codingset = CS_MID_RATE_INTRA;
  2600. break;
  2601. }
  2602. switch (v->c_ac_table_index) {
  2603. case 0:
  2604. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2605. break;
  2606. case 1:
  2607. v->codingset2 = CS_HIGH_MOT_INTER;
  2608. break;
  2609. case 2:
  2610. v->codingset2 = CS_MID_RATE_INTER;
  2611. break;
  2612. }
  2613. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
  2614. v->fcm == PROGRESSIVE;
  2615. s->first_slice_line = 1;
  2616. memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
  2617. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2618. s->mb_x = 0;
  2619. init_block_index(v);
  2620. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2621. ff_update_block_index(s);
  2622. if (v->fcm == ILACE_FIELD)
  2623. vc1_decode_p_mb_intfi(v);
  2624. else if (v->fcm == ILACE_FRAME)
  2625. vc1_decode_p_mb_intfr(v);
  2626. else vc1_decode_p_mb(v);
  2627. if (s->mb_y != s->start_mb_y && apply_loop_filter)
  2628. ff_vc1_apply_p_loop_filter(v);
  2629. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2630. // TODO: may need modification to handle slice coding
  2631. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2632. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2633. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2634. return;
  2635. }
  2636. }
  2637. memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
  2638. memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
  2639. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2640. memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
  2641. if (s->mb_y != s->start_mb_y)
  2642. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2643. s->first_slice_line = 0;
  2644. }
  2645. if (apply_loop_filter) {
  2646. s->mb_x = 0;
  2647. init_block_index(v);
  2648. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2649. ff_update_block_index(s);
  2650. ff_vc1_apply_p_loop_filter(v);
  2651. }
  2652. }
  2653. if (s->end_mb_y >= s->start_mb_y)
  2654. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2655. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2656. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2657. }
  2658. static void vc1_decode_b_blocks(VC1Context *v)
  2659. {
  2660. MpegEncContext *s = &v->s;
  2661. /* select coding mode used for VLC tables selection */
  2662. switch (v->c_ac_table_index) {
  2663. case 0:
  2664. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2665. break;
  2666. case 1:
  2667. v->codingset = CS_HIGH_MOT_INTRA;
  2668. break;
  2669. case 2:
  2670. v->codingset = CS_MID_RATE_INTRA;
  2671. break;
  2672. }
  2673. switch (v->c_ac_table_index) {
  2674. case 0:
  2675. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2676. break;
  2677. case 1:
  2678. v->codingset2 = CS_HIGH_MOT_INTER;
  2679. break;
  2680. case 2:
  2681. v->codingset2 = CS_MID_RATE_INTER;
  2682. break;
  2683. }
  2684. s->first_slice_line = 1;
  2685. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2686. s->mb_x = 0;
  2687. init_block_index(v);
  2688. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2689. ff_update_block_index(s);
  2690. if (v->fcm == ILACE_FIELD)
  2691. vc1_decode_b_mb_intfi(v);
  2692. else if (v->fcm == ILACE_FRAME)
  2693. vc1_decode_b_mb_intfr(v);
  2694. else
  2695. vc1_decode_b_mb(v);
  2696. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2697. // TODO: may need modification to handle slice coding
  2698. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2699. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2700. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2701. return;
  2702. }
  2703. if (v->s.loop_filter)
  2704. ff_vc1_loop_filter_iblk(v, v->pq);
  2705. }
  2706. if (!v->s.loop_filter)
  2707. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2708. else if (s->mb_y)
  2709. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2710. s->first_slice_line = 0;
  2711. }
  2712. if (v->s.loop_filter)
  2713. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2714. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2715. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2716. }
  2717. static void vc1_decode_skip_blocks(VC1Context *v)
  2718. {
  2719. MpegEncContext *s = &v->s;
  2720. if (!v->s.last_picture.f->data[0])
  2721. return;
  2722. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  2723. s->first_slice_line = 1;
  2724. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2725. s->mb_x = 0;
  2726. init_block_index(v);
  2727. ff_update_block_index(s);
  2728. memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  2729. memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2730. memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2731. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2732. s->first_slice_line = 0;
  2733. }
  2734. s->pict_type = AV_PICTURE_TYPE_P;
  2735. }
  2736. void ff_vc1_decode_blocks(VC1Context *v)
  2737. {
  2738. v->s.esc3_level_length = 0;
  2739. if (v->x8_type) {
  2740. ff_intrax8_decode_picture(&v->x8, &v->s.current_picture,
  2741. &v->s.gb, &v->s.mb_x, &v->s.mb_y,
  2742. 2 * v->pq + v->halfpq, v->pq * !v->pquantizer,
  2743. v->s.loop_filter, v->s.low_delay);
  2744. ff_er_add_slice(&v->s.er, 0, 0,
  2745. (v->s.mb_x >> 1) - 1, (v->s.mb_y >> 1) - 1,
  2746. ER_MB_END);
  2747. } else {
  2748. v->cur_blk_idx = 0;
  2749. v->left_blk_idx = -1;
  2750. v->topleft_blk_idx = 1;
  2751. v->top_blk_idx = 2;
  2752. switch (v->s.pict_type) {
  2753. case AV_PICTURE_TYPE_I:
  2754. if (v->profile == PROFILE_ADVANCED)
  2755. vc1_decode_i_blocks_adv(v);
  2756. else
  2757. vc1_decode_i_blocks(v);
  2758. break;
  2759. case AV_PICTURE_TYPE_P:
  2760. if (v->p_frame_skipped)
  2761. vc1_decode_skip_blocks(v);
  2762. else
  2763. vc1_decode_p_blocks(v);
  2764. break;
  2765. case AV_PICTURE_TYPE_B:
  2766. if (v->bi_type) {
  2767. if (v->profile == PROFILE_ADVANCED)
  2768. vc1_decode_i_blocks_adv(v);
  2769. else
  2770. vc1_decode_i_blocks(v);
  2771. } else
  2772. vc1_decode_b_blocks(v);
  2773. break;
  2774. }
  2775. }
  2776. }