You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2997 lines
118KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 block decoding routines
  26. */
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "msmpeg4data.h"
  31. #include "unary.h"
  32. #include "vc1.h"
  33. #include "vc1_pred.h"
  34. #include "vc1acdata.h"
  35. #include "vc1data.h"
  36. #define MB_INTRA_VLC_BITS 9
  37. #define DC_VLC_BITS 9
  38. // offset tables for interlaced picture MVDATA decoding
  39. static const uint8_t offset_table[2][9] = {
  40. { 0, 1, 2, 4, 8, 16, 32, 64, 128 },
  41. { 0, 1, 3, 7, 15, 31, 63, 127, 255 },
  42. };
  43. /***********************************************************************/
  44. /**
  45. * @name VC-1 Bitplane decoding
  46. * @see 8.7, p56
  47. * @{
  48. */
  49. static inline void init_block_index(VC1Context *v)
  50. {
  51. MpegEncContext *s = &v->s;
  52. ff_init_block_index(s);
  53. if (v->field_mode && !(v->second_field ^ v->tff)) {
  54. s->dest[0] += s->current_picture_ptr->f->linesize[0];
  55. s->dest[1] += s->current_picture_ptr->f->linesize[1];
  56. s->dest[2] += s->current_picture_ptr->f->linesize[2];
  57. }
  58. }
  59. /** @} */ //Bitplane group
  60. static void vc1_put_signed_blocks_clamped(VC1Context *v)
  61. {
  62. MpegEncContext *s = &v->s;
  63. int topleft_mb_pos, top_mb_pos;
  64. int stride_y, fieldtx = 0;
  65. int v_dist;
  66. /* The put pixels loop is always one MB row behind the decoding loop,
  67. * because we can only put pixels when overlap filtering is done, and
  68. * for filtering of the bottom edge of a MB, we need the next MB row
  69. * present as well.
  70. * Within the row, the put pixels loop is also one MB col behind the
  71. * decoding loop. The reason for this is again, because for filtering
  72. * of the right MB edge, we need the next MB present. */
  73. if (!s->first_slice_line) {
  74. if (s->mb_x) {
  75. topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
  76. if (v->fcm == ILACE_FRAME)
  77. fieldtx = v->fieldtx_plane[topleft_mb_pos];
  78. stride_y = s->linesize << fieldtx;
  79. v_dist = (16 - fieldtx) >> (fieldtx == 0);
  80. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
  81. s->dest[0] - 16 * s->linesize - 16,
  82. stride_y);
  83. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
  84. s->dest[0] - 16 * s->linesize - 8,
  85. stride_y);
  86. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
  87. s->dest[0] - v_dist * s->linesize - 16,
  88. stride_y);
  89. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
  90. s->dest[0] - v_dist * s->linesize - 8,
  91. stride_y);
  92. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
  93. s->dest[1] - 8 * s->uvlinesize - 8,
  94. s->uvlinesize);
  95. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
  96. s->dest[2] - 8 * s->uvlinesize - 8,
  97. s->uvlinesize);
  98. }
  99. if (s->mb_x == s->mb_width - 1) {
  100. top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
  101. if (v->fcm == ILACE_FRAME)
  102. fieldtx = v->fieldtx_plane[top_mb_pos];
  103. stride_y = s->linesize << fieldtx;
  104. v_dist = fieldtx ? 15 : 8;
  105. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
  106. s->dest[0] - 16 * s->linesize,
  107. stride_y);
  108. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
  109. s->dest[0] - 16 * s->linesize + 8,
  110. stride_y);
  111. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
  112. s->dest[0] - v_dist * s->linesize,
  113. stride_y);
  114. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
  115. s->dest[0] - v_dist * s->linesize + 8,
  116. stride_y);
  117. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
  118. s->dest[1] - 8 * s->uvlinesize,
  119. s->uvlinesize);
  120. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
  121. s->dest[2] - 8 * s->uvlinesize,
  122. s->uvlinesize);
  123. }
  124. }
  125. #define inc_blk_idx(idx) do { \
  126. idx++; \
  127. if (idx >= v->n_allocated_blks) \
  128. idx = 0; \
  129. } while (0)
  130. inc_blk_idx(v->topleft_blk_idx);
  131. inc_blk_idx(v->top_blk_idx);
  132. inc_blk_idx(v->left_blk_idx);
  133. inc_blk_idx(v->cur_blk_idx);
  134. }
  135. /***********************************************************************/
  136. /**
  137. * @name VC-1 Block-level functions
  138. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  139. * @{
  140. */
  141. /**
  142. * @def GET_MQUANT
  143. * @brief Get macroblock-level quantizer scale
  144. */
  145. #define GET_MQUANT() \
  146. if (v->dquantfrm) { \
  147. int edges = 0; \
  148. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  149. if (v->dqbilevel) { \
  150. mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
  151. } else { \
  152. mqdiff = get_bits(gb, 3); \
  153. if (mqdiff != 7) \
  154. mquant = v->pq + mqdiff; \
  155. else \
  156. mquant = get_bits(gb, 5); \
  157. } \
  158. } \
  159. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  160. edges = 1 << v->dqsbedge; \
  161. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  162. edges = (3 << v->dqsbedge) % 15; \
  163. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  164. edges = 15; \
  165. if ((edges&1) && !s->mb_x) \
  166. mquant = v->altpq; \
  167. if ((edges&2) && s->first_slice_line) \
  168. mquant = v->altpq; \
  169. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  170. mquant = v->altpq; \
  171. if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
  172. mquant = v->altpq; \
  173. if (!mquant || mquant > 31) { \
  174. av_log(v->s.avctx, AV_LOG_ERROR, \
  175. "Overriding invalid mquant %d\n", mquant); \
  176. mquant = 1; \
  177. } \
  178. }
  179. /**
  180. * @def GET_MVDATA(_dmv_x, _dmv_y)
  181. * @brief Get MV differentials
  182. * @see MVDATA decoding from 8.3.5.2, p(1)20
  183. * @param _dmv_x Horizontal differential for decoded MV
  184. * @param _dmv_y Vertical differential for decoded MV
  185. */
  186. #define GET_MVDATA(_dmv_x, _dmv_y) \
  187. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  188. VC1_MV_DIFF_VLC_BITS, 2); \
  189. if (index > 36) { \
  190. mb_has_coeffs = 1; \
  191. index -= 37; \
  192. } else \
  193. mb_has_coeffs = 0; \
  194. s->mb_intra = 0; \
  195. if (!index) { \
  196. _dmv_x = _dmv_y = 0; \
  197. } else if (index == 35) { \
  198. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  199. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  200. } else if (index == 36) { \
  201. _dmv_x = 0; \
  202. _dmv_y = 0; \
  203. s->mb_intra = 1; \
  204. } else { \
  205. index1 = index % 6; \
  206. _dmv_x = offset_table[1][index1]; \
  207. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  208. if (val > 0) { \
  209. val = get_bits(gb, val); \
  210. sign = 0 - (val & 1); \
  211. _dmv_x = (sign ^ ((val >> 1) + _dmv_x)) - sign; \
  212. } \
  213. \
  214. index1 = index / 6; \
  215. _dmv_y = offset_table[1][index1]; \
  216. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  217. if (val > 0) { \
  218. val = get_bits(gb, val); \
  219. sign = 0 - (val & 1); \
  220. _dmv_y = (sign ^ ((val >> 1) + _dmv_y)) - sign; \
  221. } \
  222. }
  223. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  224. int *dmv_y, int *pred_flag)
  225. {
  226. int index, index1;
  227. int extend_x, extend_y;
  228. GetBitContext *gb = &v->s.gb;
  229. int bits, esc;
  230. int val, sign;
  231. if (v->numref) {
  232. bits = VC1_2REF_MVDATA_VLC_BITS;
  233. esc = 125;
  234. } else {
  235. bits = VC1_1REF_MVDATA_VLC_BITS;
  236. esc = 71;
  237. }
  238. extend_x = v->dmvrange & 1;
  239. extend_y = (v->dmvrange >> 1) & 1;
  240. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  241. if (index == esc) {
  242. *dmv_x = get_bits(gb, v->k_x);
  243. *dmv_y = get_bits(gb, v->k_y);
  244. if (v->numref) {
  245. if (pred_flag)
  246. *pred_flag = *dmv_y & 1;
  247. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  248. }
  249. }
  250. else {
  251. av_assert0(index < esc);
  252. index1 = (index + 1) % 9;
  253. if (index1 != 0) {
  254. val = get_bits(gb, index1 + extend_x);
  255. sign = 0 - (val & 1);
  256. *dmv_x = (sign ^ ((val >> 1) + offset_table[extend_x][index1])) - sign;
  257. } else
  258. *dmv_x = 0;
  259. index1 = (index + 1) / 9;
  260. if (index1 > v->numref) {
  261. val = get_bits(gb, (index1 >> v->numref) + extend_y);
  262. sign = 0 - (val & 1);
  263. *dmv_y = (sign ^ ((val >> 1) + offset_table[extend_y][index1 >> v->numref])) - sign;
  264. } else
  265. *dmv_y = 0;
  266. if (v->numref && pred_flag)
  267. *pred_flag = index1 & 1;
  268. }
  269. }
  270. /** Reconstruct motion vector for B-frame and do motion compensation
  271. */
  272. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  273. int direct, int mode)
  274. {
  275. if (direct) {
  276. ff_vc1_mc_1mv(v, 0);
  277. ff_vc1_interp_mc(v);
  278. return;
  279. }
  280. if (mode == BMV_TYPE_INTERPOLATED) {
  281. ff_vc1_mc_1mv(v, 0);
  282. ff_vc1_interp_mc(v);
  283. return;
  284. }
  285. ff_vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  286. }
  287. /** Get predicted DC value for I-frames only
  288. * prediction dir: left=0, top=1
  289. * @param s MpegEncContext
  290. * @param overlap flag indicating that overlap filtering is used
  291. * @param pq integer part of picture quantizer
  292. * @param[in] n block index in the current MB
  293. * @param dc_val_ptr Pointer to DC predictor
  294. * @param dir_ptr Prediction direction for use in AC prediction
  295. */
  296. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  297. int16_t **dc_val_ptr, int *dir_ptr)
  298. {
  299. int a, b, c, wrap, pred, scale;
  300. int16_t *dc_val;
  301. static const uint16_t dcpred[32] = {
  302. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  303. 114, 102, 93, 85, 79, 73, 68, 64,
  304. 60, 57, 54, 51, 49, 47, 45, 43,
  305. 41, 39, 38, 37, 35, 34, 33
  306. };
  307. /* find prediction - wmv3_dc_scale always used here in fact */
  308. if (n < 4) scale = s->y_dc_scale;
  309. else scale = s->c_dc_scale;
  310. wrap = s->block_wrap[n];
  311. dc_val = s->dc_val[0] + s->block_index[n];
  312. /* B A
  313. * C X
  314. */
  315. c = dc_val[ - 1];
  316. b = dc_val[ - 1 - wrap];
  317. a = dc_val[ - wrap];
  318. if (pq < 9 || !overlap) {
  319. /* Set outer values */
  320. if (s->first_slice_line && (n != 2 && n != 3))
  321. b = a = dcpred[scale];
  322. if (s->mb_x == 0 && (n != 1 && n != 3))
  323. b = c = dcpred[scale];
  324. } else {
  325. /* Set outer values */
  326. if (s->first_slice_line && (n != 2 && n != 3))
  327. b = a = 0;
  328. if (s->mb_x == 0 && (n != 1 && n != 3))
  329. b = c = 0;
  330. }
  331. if (abs(a - b) <= abs(b - c)) {
  332. pred = c;
  333. *dir_ptr = 1; // left
  334. } else {
  335. pred = a;
  336. *dir_ptr = 0; // top
  337. }
  338. /* update predictor */
  339. *dc_val_ptr = &dc_val[0];
  340. return pred;
  341. }
  342. /** Get predicted DC value
  343. * prediction dir: left=0, top=1
  344. * @param s MpegEncContext
  345. * @param overlap flag indicating that overlap filtering is used
  346. * @param pq integer part of picture quantizer
  347. * @param[in] n block index in the current MB
  348. * @param a_avail flag indicating top block availability
  349. * @param c_avail flag indicating left block availability
  350. * @param dc_val_ptr Pointer to DC predictor
  351. * @param dir_ptr Prediction direction for use in AC prediction
  352. */
  353. static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  354. int a_avail, int c_avail,
  355. int16_t **dc_val_ptr, int *dir_ptr)
  356. {
  357. int a, b, c, wrap, pred;
  358. int16_t *dc_val;
  359. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  360. int q1, q2 = 0;
  361. int dqscale_index;
  362. /* scale predictors if needed */
  363. q1 = s->current_picture.qscale_table[mb_pos];
  364. dqscale_index = s->y_dc_scale_table[q1] - 1;
  365. if (dqscale_index < 0)
  366. return 0;
  367. wrap = s->block_wrap[n];
  368. dc_val = s->dc_val[0] + s->block_index[n];
  369. /* B A
  370. * C X
  371. */
  372. c = dc_val[ - 1];
  373. b = dc_val[ - 1 - wrap];
  374. a = dc_val[ - wrap];
  375. if (c_avail && (n != 1 && n != 3)) {
  376. q2 = s->current_picture.qscale_table[mb_pos - 1];
  377. if (q2 && q2 != q1)
  378. c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  379. }
  380. if (a_avail && (n != 2 && n != 3)) {
  381. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  382. if (q2 && q2 != q1)
  383. a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  384. }
  385. if (a_avail && c_avail && (n != 3)) {
  386. int off = mb_pos;
  387. if (n != 1)
  388. off--;
  389. if (n != 2)
  390. off -= s->mb_stride;
  391. q2 = s->current_picture.qscale_table[off];
  392. if (q2 && q2 != q1)
  393. b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  394. }
  395. if (c_avail && (!a_avail || abs(a - b) <= abs(b - c))) {
  396. pred = c;
  397. *dir_ptr = 1; // left
  398. } else if (a_avail) {
  399. pred = a;
  400. *dir_ptr = 0; // top
  401. } else {
  402. pred = 0;
  403. *dir_ptr = 1; // left
  404. }
  405. /* update predictor */
  406. *dc_val_ptr = &dc_val[0];
  407. return pred;
  408. }
  409. /** @} */ // Block group
  410. /**
  411. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  412. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  413. * @{
  414. */
  415. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  416. uint8_t **coded_block_ptr)
  417. {
  418. int xy, wrap, pred, a, b, c;
  419. xy = s->block_index[n];
  420. wrap = s->b8_stride;
  421. /* B C
  422. * A X
  423. */
  424. a = s->coded_block[xy - 1 ];
  425. b = s->coded_block[xy - 1 - wrap];
  426. c = s->coded_block[xy - wrap];
  427. if (b == c) {
  428. pred = a;
  429. } else {
  430. pred = c;
  431. }
  432. /* store value */
  433. *coded_block_ptr = &s->coded_block[xy];
  434. return pred;
  435. }
  436. /**
  437. * Decode one AC coefficient
  438. * @param v The VC1 context
  439. * @param last Last coefficient
  440. * @param skip How much zero coefficients to skip
  441. * @param value Decoded AC coefficient value
  442. * @param codingset set of VLC to decode data
  443. * @see 8.1.3.4
  444. */
  445. static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  446. int *value, int codingset)
  447. {
  448. GetBitContext *gb = &v->s.gb;
  449. int index, run, level, lst, sign;
  450. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  451. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  452. run = vc1_index_decode_table[codingset][index][0];
  453. level = vc1_index_decode_table[codingset][index][1];
  454. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  455. sign = get_bits1(gb);
  456. } else {
  457. int escape = decode210(gb);
  458. if (escape != 2) {
  459. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  460. run = vc1_index_decode_table[codingset][index][0];
  461. level = vc1_index_decode_table[codingset][index][1];
  462. lst = index >= vc1_last_decode_table[codingset];
  463. if (escape == 0) {
  464. if (lst)
  465. level += vc1_last_delta_level_table[codingset][run];
  466. else
  467. level += vc1_delta_level_table[codingset][run];
  468. } else {
  469. if (lst)
  470. run += vc1_last_delta_run_table[codingset][level] + 1;
  471. else
  472. run += vc1_delta_run_table[codingset][level] + 1;
  473. }
  474. sign = get_bits1(gb);
  475. } else {
  476. lst = get_bits1(gb);
  477. if (v->s.esc3_level_length == 0) {
  478. if (v->pq < 8 || v->dquantfrm) { // table 59
  479. v->s.esc3_level_length = get_bits(gb, 3);
  480. if (!v->s.esc3_level_length)
  481. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  482. } else { // table 60
  483. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  484. }
  485. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  486. }
  487. run = get_bits(gb, v->s.esc3_run_length);
  488. sign = get_bits1(gb);
  489. level = get_bits(gb, v->s.esc3_level_length);
  490. }
  491. }
  492. *last = lst;
  493. *skip = run;
  494. *value = (level ^ -sign) + sign;
  495. }
  496. /** Decode intra block in intra frames - should be faster than decode_intra_block
  497. * @param v VC1Context
  498. * @param block block to decode
  499. * @param[in] n subblock index
  500. * @param coded are AC coeffs present or not
  501. * @param codingset set of VLC to decode data
  502. */
  503. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  504. int coded, int codingset)
  505. {
  506. GetBitContext *gb = &v->s.gb;
  507. MpegEncContext *s = &v->s;
  508. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  509. int i;
  510. int16_t *dc_val;
  511. int16_t *ac_val, *ac_val2;
  512. int dcdiff;
  513. /* Get DC differential */
  514. if (n < 4) {
  515. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  516. } else {
  517. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  518. }
  519. if (dcdiff < 0) {
  520. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  521. return -1;
  522. }
  523. if (dcdiff) {
  524. const int m = (v->pq == 1 || v->pq == 2) ? 3 - v->pq : 0;
  525. if (dcdiff == 119 /* ESC index value */) {
  526. dcdiff = get_bits(gb, 8 + m);
  527. } else {
  528. if (m)
  529. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  530. }
  531. if (get_bits1(gb))
  532. dcdiff = -dcdiff;
  533. }
  534. /* Prediction */
  535. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  536. *dc_val = dcdiff;
  537. /* Store the quantized DC coeff, used for prediction */
  538. if (n < 4) {
  539. block[0] = dcdiff * s->y_dc_scale;
  540. } else {
  541. block[0] = dcdiff * s->c_dc_scale;
  542. }
  543. /* Skip ? */
  544. if (!coded) {
  545. goto not_coded;
  546. }
  547. // AC Decoding
  548. i = 1;
  549. {
  550. int last = 0, skip, value;
  551. const uint8_t *zz_table;
  552. int scale;
  553. int k;
  554. scale = v->pq * 2 + v->halfpq;
  555. if (v->s.ac_pred) {
  556. if (!dc_pred_dir)
  557. zz_table = v->zz_8x8[2];
  558. else
  559. zz_table = v->zz_8x8[3];
  560. } else
  561. zz_table = v->zz_8x8[1];
  562. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  563. ac_val2 = ac_val;
  564. if (dc_pred_dir) // left
  565. ac_val -= 16;
  566. else // top
  567. ac_val -= 16 * s->block_wrap[n];
  568. while (!last) {
  569. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  570. i += skip;
  571. if (i > 63)
  572. break;
  573. block[zz_table[i++]] = value;
  574. }
  575. /* apply AC prediction if needed */
  576. if (s->ac_pred) {
  577. if (dc_pred_dir) { // left
  578. for (k = 1; k < 8; k++)
  579. block[k << v->left_blk_sh] += ac_val[k];
  580. } else { // top
  581. for (k = 1; k < 8; k++)
  582. block[k << v->top_blk_sh] += ac_val[k + 8];
  583. }
  584. }
  585. /* save AC coeffs for further prediction */
  586. for (k = 1; k < 8; k++) {
  587. ac_val2[k] = block[k << v->left_blk_sh];
  588. ac_val2[k + 8] = block[k << v->top_blk_sh];
  589. }
  590. /* scale AC coeffs */
  591. for (k = 1; k < 64; k++)
  592. if (block[k]) {
  593. block[k] *= scale;
  594. if (!v->pquantizer)
  595. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  596. }
  597. if (s->ac_pred) i = 63;
  598. }
  599. not_coded:
  600. if (!coded) {
  601. int k, scale;
  602. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  603. ac_val2 = ac_val;
  604. i = 0;
  605. scale = v->pq * 2 + v->halfpq;
  606. memset(ac_val2, 0, 16 * 2);
  607. if (dc_pred_dir) { // left
  608. ac_val -= 16;
  609. if (s->ac_pred)
  610. memcpy(ac_val2, ac_val, 8 * 2);
  611. } else { // top
  612. ac_val -= 16 * s->block_wrap[n];
  613. if (s->ac_pred)
  614. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  615. }
  616. /* apply AC prediction if needed */
  617. if (s->ac_pred) {
  618. if (dc_pred_dir) { //left
  619. for (k = 1; k < 8; k++) {
  620. block[k << v->left_blk_sh] = ac_val[k] * scale;
  621. if (!v->pquantizer && block[k << v->left_blk_sh])
  622. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
  623. }
  624. } else { // top
  625. for (k = 1; k < 8; k++) {
  626. block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
  627. if (!v->pquantizer && block[k << v->top_blk_sh])
  628. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
  629. }
  630. }
  631. i = 63;
  632. }
  633. }
  634. s->block_last_index[n] = i;
  635. return 0;
  636. }
  637. /** Decode intra block in intra frames - should be faster than decode_intra_block
  638. * @param v VC1Context
  639. * @param block block to decode
  640. * @param[in] n subblock number
  641. * @param coded are AC coeffs present or not
  642. * @param codingset set of VLC to decode data
  643. * @param mquant quantizer value for this macroblock
  644. */
  645. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  646. int coded, int codingset, int mquant)
  647. {
  648. GetBitContext *gb = &v->s.gb;
  649. MpegEncContext *s = &v->s;
  650. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  651. int i;
  652. int16_t *dc_val = NULL;
  653. int16_t *ac_val, *ac_val2;
  654. int dcdiff;
  655. int a_avail = v->a_avail, c_avail = v->c_avail;
  656. int use_pred = s->ac_pred;
  657. int scale;
  658. int q1, q2 = 0;
  659. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  660. /* Get DC differential */
  661. if (n < 4) {
  662. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  663. } else {
  664. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  665. }
  666. if (dcdiff < 0) {
  667. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  668. return -1;
  669. }
  670. if (dcdiff) {
  671. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  672. if (dcdiff == 119 /* ESC index value */) {
  673. dcdiff = get_bits(gb, 8 + m);
  674. } else {
  675. if (m)
  676. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  677. }
  678. if (get_bits1(gb))
  679. dcdiff = -dcdiff;
  680. }
  681. /* Prediction */
  682. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  683. *dc_val = dcdiff;
  684. /* Store the quantized DC coeff, used for prediction */
  685. if (n < 4) {
  686. block[0] = dcdiff * s->y_dc_scale;
  687. } else {
  688. block[0] = dcdiff * s->c_dc_scale;
  689. }
  690. //AC Decoding
  691. i = 1;
  692. /* check if AC is needed at all */
  693. if (!a_avail && !c_avail)
  694. use_pred = 0;
  695. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  696. ac_val2 = ac_val;
  697. scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
  698. if (dc_pred_dir) // left
  699. ac_val -= 16;
  700. else // top
  701. ac_val -= 16 * s->block_wrap[n];
  702. q1 = s->current_picture.qscale_table[mb_pos];
  703. if ( dc_pred_dir && c_avail && mb_pos)
  704. q2 = s->current_picture.qscale_table[mb_pos - 1];
  705. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  706. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  707. if ( dc_pred_dir && n == 1)
  708. q2 = q1;
  709. if (!dc_pred_dir && n == 2)
  710. q2 = q1;
  711. if (n == 3)
  712. q2 = q1;
  713. if (coded) {
  714. int last = 0, skip, value;
  715. const uint8_t *zz_table;
  716. int k;
  717. if (v->s.ac_pred) {
  718. if (!use_pred && v->fcm == ILACE_FRAME) {
  719. zz_table = v->zzi_8x8;
  720. } else {
  721. if (!dc_pred_dir) // top
  722. zz_table = v->zz_8x8[2];
  723. else // left
  724. zz_table = v->zz_8x8[3];
  725. }
  726. } else {
  727. if (v->fcm != ILACE_FRAME)
  728. zz_table = v->zz_8x8[1];
  729. else
  730. zz_table = v->zzi_8x8;
  731. }
  732. while (!last) {
  733. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  734. i += skip;
  735. if (i > 63)
  736. break;
  737. block[zz_table[i++]] = value;
  738. }
  739. /* apply AC prediction if needed */
  740. if (use_pred) {
  741. /* scale predictors if needed*/
  742. if (q2 && q1 != q2) {
  743. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  744. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  745. if (q1 < 1)
  746. return AVERROR_INVALIDDATA;
  747. if (dc_pred_dir) { // left
  748. for (k = 1; k < 8; k++)
  749. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  750. } else { // top
  751. for (k = 1; k < 8; k++)
  752. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  753. }
  754. } else {
  755. if (dc_pred_dir) { //left
  756. for (k = 1; k < 8; k++)
  757. block[k << v->left_blk_sh] += ac_val[k];
  758. } else { //top
  759. for (k = 1; k < 8; k++)
  760. block[k << v->top_blk_sh] += ac_val[k + 8];
  761. }
  762. }
  763. }
  764. /* save AC coeffs for further prediction */
  765. for (k = 1; k < 8; k++) {
  766. ac_val2[k ] = block[k << v->left_blk_sh];
  767. ac_val2[k + 8] = block[k << v->top_blk_sh];
  768. }
  769. /* scale AC coeffs */
  770. for (k = 1; k < 64; k++)
  771. if (block[k]) {
  772. block[k] *= scale;
  773. if (!v->pquantizer)
  774. block[k] += (block[k] < 0) ? -mquant : mquant;
  775. }
  776. if (use_pred) i = 63;
  777. } else { // no AC coeffs
  778. int k;
  779. memset(ac_val2, 0, 16 * 2);
  780. if (dc_pred_dir) { // left
  781. if (use_pred) {
  782. memcpy(ac_val2, ac_val, 8 * 2);
  783. if (q2 && q1 != q2) {
  784. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  785. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  786. if (q1 < 1)
  787. return AVERROR_INVALIDDATA;
  788. for (k = 1; k < 8; k++)
  789. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  790. }
  791. }
  792. } else { // top
  793. if (use_pred) {
  794. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  795. if (q2 && q1 != q2) {
  796. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  797. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  798. if (q1 < 1)
  799. return AVERROR_INVALIDDATA;
  800. for (k = 1; k < 8; k++)
  801. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  802. }
  803. }
  804. }
  805. /* apply AC prediction if needed */
  806. if (use_pred) {
  807. if (dc_pred_dir) { // left
  808. for (k = 1; k < 8; k++) {
  809. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  810. if (!v->pquantizer && block[k << v->left_blk_sh])
  811. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  812. }
  813. } else { // top
  814. for (k = 1; k < 8; k++) {
  815. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  816. if (!v->pquantizer && block[k << v->top_blk_sh])
  817. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  818. }
  819. }
  820. i = 63;
  821. }
  822. }
  823. s->block_last_index[n] = i;
  824. return 0;
  825. }
  826. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  827. * @param v VC1Context
  828. * @param block block to decode
  829. * @param[in] n subblock index
  830. * @param coded are AC coeffs present or not
  831. * @param mquant block quantizer
  832. * @param codingset set of VLC to decode data
  833. */
  834. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  835. int coded, int mquant, int codingset)
  836. {
  837. GetBitContext *gb = &v->s.gb;
  838. MpegEncContext *s = &v->s;
  839. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  840. int i;
  841. int16_t *dc_val = NULL;
  842. int16_t *ac_val, *ac_val2;
  843. int dcdiff;
  844. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  845. int a_avail = v->a_avail, c_avail = v->c_avail;
  846. int use_pred = s->ac_pred;
  847. int scale;
  848. int q1, q2 = 0;
  849. s->bdsp.clear_block(block);
  850. /* XXX: Guard against dumb values of mquant */
  851. mquant = av_clip_uintp2(mquant, 5);
  852. /* Set DC scale - y and c use the same */
  853. s->y_dc_scale = s->y_dc_scale_table[mquant];
  854. s->c_dc_scale = s->c_dc_scale_table[mquant];
  855. /* Get DC differential */
  856. if (n < 4) {
  857. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  858. } else {
  859. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  860. }
  861. if (dcdiff < 0) {
  862. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  863. return -1;
  864. }
  865. if (dcdiff) {
  866. const int m = (mquant == 1 || mquant == 2) ? 3 - mquant : 0;
  867. if (dcdiff == 119 /* ESC index value */) {
  868. dcdiff = get_bits(gb, 8 + m);
  869. } else {
  870. if (m)
  871. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  872. }
  873. if (get_bits1(gb))
  874. dcdiff = -dcdiff;
  875. }
  876. /* Prediction */
  877. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  878. *dc_val = dcdiff;
  879. /* Store the quantized DC coeff, used for prediction */
  880. if (n < 4) {
  881. block[0] = dcdiff * s->y_dc_scale;
  882. } else {
  883. block[0] = dcdiff * s->c_dc_scale;
  884. }
  885. //AC Decoding
  886. i = 1;
  887. /* check if AC is needed at all and adjust direction if needed */
  888. if (!a_avail) dc_pred_dir = 1;
  889. if (!c_avail) dc_pred_dir = 0;
  890. if (!a_avail && !c_avail) use_pred = 0;
  891. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  892. ac_val2 = ac_val;
  893. scale = mquant * 2 + v->halfpq;
  894. if (dc_pred_dir) //left
  895. ac_val -= 16;
  896. else //top
  897. ac_val -= 16 * s->block_wrap[n];
  898. q1 = s->current_picture.qscale_table[mb_pos];
  899. if (dc_pred_dir && c_avail && mb_pos)
  900. q2 = s->current_picture.qscale_table[mb_pos - 1];
  901. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  902. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  903. if ( dc_pred_dir && n == 1)
  904. q2 = q1;
  905. if (!dc_pred_dir && n == 2)
  906. q2 = q1;
  907. if (n == 3) q2 = q1;
  908. if (coded) {
  909. int last = 0, skip, value;
  910. int k;
  911. while (!last) {
  912. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  913. i += skip;
  914. if (i > 63)
  915. break;
  916. if (v->fcm == PROGRESSIVE)
  917. block[v->zz_8x8[0][i++]] = value;
  918. else {
  919. if (use_pred && (v->fcm == ILACE_FRAME)) {
  920. if (!dc_pred_dir) // top
  921. block[v->zz_8x8[2][i++]] = value;
  922. else // left
  923. block[v->zz_8x8[3][i++]] = value;
  924. } else {
  925. block[v->zzi_8x8[i++]] = value;
  926. }
  927. }
  928. }
  929. /* apply AC prediction if needed */
  930. if (use_pred) {
  931. /* scale predictors if needed*/
  932. if (q2 && q1 != q2) {
  933. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  934. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  935. if (q1 < 1)
  936. return AVERROR_INVALIDDATA;
  937. if (dc_pred_dir) { // left
  938. for (k = 1; k < 8; k++)
  939. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  940. } else { //top
  941. for (k = 1; k < 8; k++)
  942. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  943. }
  944. } else {
  945. if (dc_pred_dir) { // left
  946. for (k = 1; k < 8; k++)
  947. block[k << v->left_blk_sh] += ac_val[k];
  948. } else { // top
  949. for (k = 1; k < 8; k++)
  950. block[k << v->top_blk_sh] += ac_val[k + 8];
  951. }
  952. }
  953. }
  954. /* save AC coeffs for further prediction */
  955. for (k = 1; k < 8; k++) {
  956. ac_val2[k ] = block[k << v->left_blk_sh];
  957. ac_val2[k + 8] = block[k << v->top_blk_sh];
  958. }
  959. /* scale AC coeffs */
  960. for (k = 1; k < 64; k++)
  961. if (block[k]) {
  962. block[k] *= scale;
  963. if (!v->pquantizer)
  964. block[k] += (block[k] < 0) ? -mquant : mquant;
  965. }
  966. if (use_pred) i = 63;
  967. } else { // no AC coeffs
  968. int k;
  969. memset(ac_val2, 0, 16 * 2);
  970. if (dc_pred_dir) { // left
  971. if (use_pred) {
  972. memcpy(ac_val2, ac_val, 8 * 2);
  973. if (q2 && q1 != q2) {
  974. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  975. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  976. if (q1 < 1)
  977. return AVERROR_INVALIDDATA;
  978. for (k = 1; k < 8; k++)
  979. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  980. }
  981. }
  982. } else { // top
  983. if (use_pred) {
  984. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  985. if (q2 && q1 != q2) {
  986. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  987. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  988. if (q1 < 1)
  989. return AVERROR_INVALIDDATA;
  990. for (k = 1; k < 8; k++)
  991. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  992. }
  993. }
  994. }
  995. /* apply AC prediction if needed */
  996. if (use_pred) {
  997. if (dc_pred_dir) { // left
  998. for (k = 1; k < 8; k++) {
  999. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  1000. if (!v->pquantizer && block[k << v->left_blk_sh])
  1001. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  1002. }
  1003. } else { // top
  1004. for (k = 1; k < 8; k++) {
  1005. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  1006. if (!v->pquantizer && block[k << v->top_blk_sh])
  1007. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  1008. }
  1009. }
  1010. i = 63;
  1011. }
  1012. }
  1013. s->block_last_index[n] = i;
  1014. return 0;
  1015. }
  1016. /** Decode P block
  1017. */
  1018. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  1019. int mquant, int ttmb, int first_block,
  1020. uint8_t *dst, int linesize, int skip_block,
  1021. int *ttmb_out)
  1022. {
  1023. MpegEncContext *s = &v->s;
  1024. GetBitContext *gb = &s->gb;
  1025. int i, j;
  1026. int subblkpat = 0;
  1027. int scale, off, idx, last, skip, value;
  1028. int ttblk = ttmb & 7;
  1029. int pat = 0;
  1030. s->bdsp.clear_block(block);
  1031. if (ttmb == -1) {
  1032. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  1033. }
  1034. if (ttblk == TT_4X4) {
  1035. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  1036. }
  1037. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  1038. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  1039. || (!v->res_rtm_flag && !first_block))) {
  1040. subblkpat = decode012(gb);
  1041. if (subblkpat)
  1042. subblkpat ^= 3; // swap decoded pattern bits
  1043. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  1044. ttblk = TT_8X4;
  1045. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  1046. ttblk = TT_4X8;
  1047. }
  1048. scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
  1049. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  1050. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  1051. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  1052. ttblk = TT_8X4;
  1053. }
  1054. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  1055. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  1056. ttblk = TT_4X8;
  1057. }
  1058. switch (ttblk) {
  1059. case TT_8X8:
  1060. pat = 0xF;
  1061. i = 0;
  1062. last = 0;
  1063. while (!last) {
  1064. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1065. i += skip;
  1066. if (i > 63)
  1067. break;
  1068. if (!v->fcm)
  1069. idx = v->zz_8x8[0][i++];
  1070. else
  1071. idx = v->zzi_8x8[i++];
  1072. block[idx] = value * scale;
  1073. if (!v->pquantizer)
  1074. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1075. }
  1076. if (!skip_block) {
  1077. if (i == 1)
  1078. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  1079. else {
  1080. v->vc1dsp.vc1_inv_trans_8x8(block);
  1081. s->idsp.add_pixels_clamped(block, dst, linesize);
  1082. }
  1083. }
  1084. break;
  1085. case TT_4X4:
  1086. pat = ~subblkpat & 0xF;
  1087. for (j = 0; j < 4; j++) {
  1088. last = subblkpat & (1 << (3 - j));
  1089. i = 0;
  1090. off = (j & 1) * 4 + (j & 2) * 16;
  1091. while (!last) {
  1092. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1093. i += skip;
  1094. if (i > 15)
  1095. break;
  1096. if (!v->fcm)
  1097. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  1098. else
  1099. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  1100. block[idx + off] = value * scale;
  1101. if (!v->pquantizer)
  1102. block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
  1103. }
  1104. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  1105. if (i == 1)
  1106. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1107. else
  1108. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1109. }
  1110. }
  1111. break;
  1112. case TT_8X4:
  1113. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  1114. for (j = 0; j < 2; j++) {
  1115. last = subblkpat & (1 << (1 - j));
  1116. i = 0;
  1117. off = j * 32;
  1118. while (!last) {
  1119. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1120. i += skip;
  1121. if (i > 31)
  1122. break;
  1123. if (!v->fcm)
  1124. idx = v->zz_8x4[i++] + off;
  1125. else
  1126. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  1127. block[idx] = value * scale;
  1128. if (!v->pquantizer)
  1129. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1130. }
  1131. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1132. if (i == 1)
  1133. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  1134. else
  1135. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  1136. }
  1137. }
  1138. break;
  1139. case TT_4X8:
  1140. pat = ~(subblkpat * 5) & 0xF;
  1141. for (j = 0; j < 2; j++) {
  1142. last = subblkpat & (1 << (1 - j));
  1143. i = 0;
  1144. off = j * 4;
  1145. while (!last) {
  1146. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1147. i += skip;
  1148. if (i > 31)
  1149. break;
  1150. if (!v->fcm)
  1151. idx = v->zz_4x8[i++] + off;
  1152. else
  1153. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  1154. block[idx] = value * scale;
  1155. if (!v->pquantizer)
  1156. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1157. }
  1158. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1159. if (i == 1)
  1160. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  1161. else
  1162. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  1163. }
  1164. }
  1165. break;
  1166. }
  1167. if (ttmb_out)
  1168. *ttmb_out |= ttblk << (n * 4);
  1169. return pat;
  1170. }
  1171. /** @} */ // Macroblock group
  1172. static const uint8_t size_table[6] = { 0, 2, 3, 4, 5, 8 };
  1173. /** Decode one P-frame MB
  1174. */
  1175. static int vc1_decode_p_mb(VC1Context *v)
  1176. {
  1177. MpegEncContext *s = &v->s;
  1178. GetBitContext *gb = &s->gb;
  1179. int i, j;
  1180. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1181. int cbp; /* cbp decoding stuff */
  1182. int mqdiff, mquant; /* MB quantization */
  1183. int ttmb = v->ttfrm; /* MB Transform type */
  1184. int mb_has_coeffs = 1; /* last_flag */
  1185. int dmv_x, dmv_y; /* Differential MV components */
  1186. int index, index1; /* LUT indexes */
  1187. int val, sign; /* temp values */
  1188. int first_block = 1;
  1189. int dst_idx, off;
  1190. int skipped, fourmv;
  1191. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  1192. mquant = v->pq; /* lossy initialization */
  1193. if (v->mv_type_is_raw)
  1194. fourmv = get_bits1(gb);
  1195. else
  1196. fourmv = v->mv_type_mb_plane[mb_pos];
  1197. if (v->skip_is_raw)
  1198. skipped = get_bits1(gb);
  1199. else
  1200. skipped = v->s.mbskip_table[mb_pos];
  1201. if (!fourmv) { /* 1MV mode */
  1202. if (!skipped) {
  1203. GET_MVDATA(dmv_x, dmv_y);
  1204. if (s->mb_intra) {
  1205. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1206. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1207. }
  1208. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  1209. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1210. /* FIXME Set DC val for inter block ? */
  1211. if (s->mb_intra && !mb_has_coeffs) {
  1212. GET_MQUANT();
  1213. s->ac_pred = get_bits1(gb);
  1214. cbp = 0;
  1215. } else if (mb_has_coeffs) {
  1216. if (s->mb_intra)
  1217. s->ac_pred = get_bits1(gb);
  1218. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1219. GET_MQUANT();
  1220. } else {
  1221. mquant = v->pq;
  1222. cbp = 0;
  1223. }
  1224. s->current_picture.qscale_table[mb_pos] = mquant;
  1225. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1226. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  1227. VC1_TTMB_VLC_BITS, 2);
  1228. if (!s->mb_intra) ff_vc1_mc_1mv(v, 0);
  1229. dst_idx = 0;
  1230. for (i = 0; i < 6; i++) {
  1231. s->dc_val[0][s->block_index[i]] = 0;
  1232. dst_idx += i >> 2;
  1233. val = ((cbp >> (5 - i)) & 1);
  1234. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1235. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1236. if (s->mb_intra) {
  1237. /* check if prediction blocks A and C are available */
  1238. v->a_avail = v->c_avail = 0;
  1239. if (i == 2 || i == 3 || !s->first_slice_line)
  1240. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1241. if (i == 1 || i == 3 || s->mb_x)
  1242. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1243. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1244. (i & 4) ? v->codingset2 : v->codingset);
  1245. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  1246. continue;
  1247. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1248. if (v->rangeredfrm)
  1249. for (j = 0; j < 64; j++)
  1250. s->block[i][j] <<= 1;
  1251. s->idsp.put_signed_pixels_clamped(s->block[i],
  1252. s->dest[dst_idx] + off,
  1253. i & 4 ? s->uvlinesize
  1254. : s->linesize);
  1255. if (v->pq >= 9 && v->overlap) {
  1256. if (v->c_avail)
  1257. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1258. if (v->a_avail)
  1259. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1260. }
  1261. block_cbp |= 0xF << (i << 2);
  1262. block_intra |= 1 << i;
  1263. } else if (val) {
  1264. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
  1265. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  1266. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  1267. block_cbp |= pat << (i << 2);
  1268. if (!v->ttmbf && ttmb < 8)
  1269. ttmb = -1;
  1270. first_block = 0;
  1271. }
  1272. }
  1273. } else { // skipped
  1274. s->mb_intra = 0;
  1275. for (i = 0; i < 6; i++) {
  1276. v->mb_type[0][s->block_index[i]] = 0;
  1277. s->dc_val[0][s->block_index[i]] = 0;
  1278. }
  1279. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1280. s->current_picture.qscale_table[mb_pos] = 0;
  1281. ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1282. ff_vc1_mc_1mv(v, 0);
  1283. }
  1284. } else { // 4MV mode
  1285. if (!skipped /* unskipped MB */) {
  1286. int intra_count = 0, coded_inter = 0;
  1287. int is_intra[6], is_coded[6];
  1288. /* Get CBPCY */
  1289. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1290. for (i = 0; i < 6; i++) {
  1291. val = ((cbp >> (5 - i)) & 1);
  1292. s->dc_val[0][s->block_index[i]] = 0;
  1293. s->mb_intra = 0;
  1294. if (i < 4) {
  1295. dmv_x = dmv_y = 0;
  1296. s->mb_intra = 0;
  1297. mb_has_coeffs = 0;
  1298. if (val) {
  1299. GET_MVDATA(dmv_x, dmv_y);
  1300. }
  1301. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1302. if (!s->mb_intra)
  1303. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1304. intra_count += s->mb_intra;
  1305. is_intra[i] = s->mb_intra;
  1306. is_coded[i] = mb_has_coeffs;
  1307. }
  1308. if (i & 4) {
  1309. is_intra[i] = (intra_count >= 3);
  1310. is_coded[i] = val;
  1311. }
  1312. if (i == 4)
  1313. ff_vc1_mc_4mv_chroma(v, 0);
  1314. v->mb_type[0][s->block_index[i]] = is_intra[i];
  1315. if (!coded_inter)
  1316. coded_inter = !is_intra[i] & is_coded[i];
  1317. }
  1318. // if there are no coded blocks then don't do anything more
  1319. dst_idx = 0;
  1320. if (!intra_count && !coded_inter)
  1321. goto end;
  1322. GET_MQUANT();
  1323. s->current_picture.qscale_table[mb_pos] = mquant;
  1324. /* test if block is intra and has pred */
  1325. {
  1326. int intrapred = 0;
  1327. for (i = 0; i < 6; i++)
  1328. if (is_intra[i]) {
  1329. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  1330. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  1331. intrapred = 1;
  1332. break;
  1333. }
  1334. }
  1335. if (intrapred)
  1336. s->ac_pred = get_bits1(gb);
  1337. else
  1338. s->ac_pred = 0;
  1339. }
  1340. if (!v->ttmbf && coded_inter)
  1341. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1342. for (i = 0; i < 6; i++) {
  1343. dst_idx += i >> 2;
  1344. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1345. s->mb_intra = is_intra[i];
  1346. if (is_intra[i]) {
  1347. /* check if prediction blocks A and C are available */
  1348. v->a_avail = v->c_avail = 0;
  1349. if (i == 2 || i == 3 || !s->first_slice_line)
  1350. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1351. if (i == 1 || i == 3 || s->mb_x)
  1352. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1353. vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
  1354. (i & 4) ? v->codingset2 : v->codingset);
  1355. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  1356. continue;
  1357. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1358. if (v->rangeredfrm)
  1359. for (j = 0; j < 64; j++)
  1360. s->block[i][j] <<= 1;
  1361. s->idsp.put_signed_pixels_clamped(s->block[i],
  1362. s->dest[dst_idx] + off,
  1363. (i & 4) ? s->uvlinesize
  1364. : s->linesize);
  1365. if (v->pq >= 9 && v->overlap) {
  1366. if (v->c_avail)
  1367. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1368. if (v->a_avail)
  1369. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1370. }
  1371. block_cbp |= 0xF << (i << 2);
  1372. block_intra |= 1 << i;
  1373. } else if (is_coded[i]) {
  1374. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1375. first_block, s->dest[dst_idx] + off,
  1376. (i & 4) ? s->uvlinesize : s->linesize,
  1377. (i & 4) && (s->flags & CODEC_FLAG_GRAY),
  1378. &block_tt);
  1379. block_cbp |= pat << (i << 2);
  1380. if (!v->ttmbf && ttmb < 8)
  1381. ttmb = -1;
  1382. first_block = 0;
  1383. }
  1384. }
  1385. } else { // skipped MB
  1386. s->mb_intra = 0;
  1387. s->current_picture.qscale_table[mb_pos] = 0;
  1388. for (i = 0; i < 6; i++) {
  1389. v->mb_type[0][s->block_index[i]] = 0;
  1390. s->dc_val[0][s->block_index[i]] = 0;
  1391. }
  1392. for (i = 0; i < 4; i++) {
  1393. ff_vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1394. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1395. }
  1396. ff_vc1_mc_4mv_chroma(v, 0);
  1397. s->current_picture.qscale_table[mb_pos] = 0;
  1398. }
  1399. }
  1400. end:
  1401. v->cbp[s->mb_x] = block_cbp;
  1402. v->ttblk[s->mb_x] = block_tt;
  1403. v->is_intra[s->mb_x] = block_intra;
  1404. return 0;
  1405. }
  1406. /* Decode one macroblock in an interlaced frame p picture */
  1407. static int vc1_decode_p_mb_intfr(VC1Context *v)
  1408. {
  1409. MpegEncContext *s = &v->s;
  1410. GetBitContext *gb = &s->gb;
  1411. int i;
  1412. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1413. int cbp = 0; /* cbp decoding stuff */
  1414. int mqdiff, mquant; /* MB quantization */
  1415. int ttmb = v->ttfrm; /* MB Transform type */
  1416. int mb_has_coeffs = 1; /* last_flag */
  1417. int dmv_x, dmv_y; /* Differential MV components */
  1418. int val; /* temp value */
  1419. int first_block = 1;
  1420. int dst_idx, off;
  1421. int skipped, fourmv = 0, twomv = 0;
  1422. int block_cbp = 0, pat, block_tt = 0;
  1423. int idx_mbmode = 0, mvbp;
  1424. int stride_y, fieldtx;
  1425. mquant = v->pq; /* Lossy initialization */
  1426. if (v->skip_is_raw)
  1427. skipped = get_bits1(gb);
  1428. else
  1429. skipped = v->s.mbskip_table[mb_pos];
  1430. if (!skipped) {
  1431. if (v->fourmvswitch)
  1432. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  1433. else
  1434. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  1435. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  1436. /* store the motion vector type in a flag (useful later) */
  1437. case MV_PMODE_INTFR_4MV:
  1438. fourmv = 1;
  1439. v->blk_mv_type[s->block_index[0]] = 0;
  1440. v->blk_mv_type[s->block_index[1]] = 0;
  1441. v->blk_mv_type[s->block_index[2]] = 0;
  1442. v->blk_mv_type[s->block_index[3]] = 0;
  1443. break;
  1444. case MV_PMODE_INTFR_4MV_FIELD:
  1445. fourmv = 1;
  1446. v->blk_mv_type[s->block_index[0]] = 1;
  1447. v->blk_mv_type[s->block_index[1]] = 1;
  1448. v->blk_mv_type[s->block_index[2]] = 1;
  1449. v->blk_mv_type[s->block_index[3]] = 1;
  1450. break;
  1451. case MV_PMODE_INTFR_2MV_FIELD:
  1452. twomv = 1;
  1453. v->blk_mv_type[s->block_index[0]] = 1;
  1454. v->blk_mv_type[s->block_index[1]] = 1;
  1455. v->blk_mv_type[s->block_index[2]] = 1;
  1456. v->blk_mv_type[s->block_index[3]] = 1;
  1457. break;
  1458. case MV_PMODE_INTFR_1MV:
  1459. v->blk_mv_type[s->block_index[0]] = 0;
  1460. v->blk_mv_type[s->block_index[1]] = 0;
  1461. v->blk_mv_type[s->block_index[2]] = 0;
  1462. v->blk_mv_type[s->block_index[3]] = 0;
  1463. break;
  1464. }
  1465. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  1466. for (i = 0; i < 4; i++) {
  1467. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  1468. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  1469. }
  1470. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1471. s->mb_intra = 1;
  1472. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  1473. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  1474. mb_has_coeffs = get_bits1(gb);
  1475. if (mb_has_coeffs)
  1476. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1477. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1478. GET_MQUANT();
  1479. s->current_picture.qscale_table[mb_pos] = mquant;
  1480. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1481. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1482. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1483. dst_idx = 0;
  1484. for (i = 0; i < 6; i++) {
  1485. v->a_avail = v->c_avail = 0;
  1486. v->mb_type[0][s->block_index[i]] = 1;
  1487. s->dc_val[0][s->block_index[i]] = 0;
  1488. dst_idx += i >> 2;
  1489. val = ((cbp >> (5 - i)) & 1);
  1490. if (i == 2 || i == 3 || !s->first_slice_line)
  1491. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1492. if (i == 1 || i == 3 || s->mb_x)
  1493. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1494. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1495. (i & 4) ? v->codingset2 : v->codingset);
  1496. if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
  1497. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1498. if (i < 4) {
  1499. stride_y = s->linesize << fieldtx;
  1500. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  1501. } else {
  1502. stride_y = s->uvlinesize;
  1503. off = 0;
  1504. }
  1505. s->idsp.put_signed_pixels_clamped(s->block[i],
  1506. s->dest[dst_idx] + off,
  1507. stride_y);
  1508. //TODO: loop filter
  1509. }
  1510. } else { // inter MB
  1511. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  1512. if (mb_has_coeffs)
  1513. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1514. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  1515. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  1516. } else {
  1517. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  1518. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  1519. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1520. }
  1521. }
  1522. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1523. for (i = 0; i < 6; i++)
  1524. v->mb_type[0][s->block_index[i]] = 0;
  1525. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  1526. /* for all motion vector read MVDATA and motion compensate each block */
  1527. dst_idx = 0;
  1528. if (fourmv) {
  1529. mvbp = v->fourmvbp;
  1530. for (i = 0; i < 4; i++) {
  1531. dmv_x = dmv_y = 0;
  1532. if (mvbp & (8 >> i))
  1533. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1534. ff_vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  1535. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1536. }
  1537. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1538. } else if (twomv) {
  1539. mvbp = v->twomvbp;
  1540. dmv_x = dmv_y = 0;
  1541. if (mvbp & 2) {
  1542. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1543. }
  1544. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1545. ff_vc1_mc_4mv_luma(v, 0, 0, 0);
  1546. ff_vc1_mc_4mv_luma(v, 1, 0, 0);
  1547. dmv_x = dmv_y = 0;
  1548. if (mvbp & 1) {
  1549. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1550. }
  1551. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1552. ff_vc1_mc_4mv_luma(v, 2, 0, 0);
  1553. ff_vc1_mc_4mv_luma(v, 3, 0, 0);
  1554. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1555. } else {
  1556. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  1557. dmv_x = dmv_y = 0;
  1558. if (mvbp) {
  1559. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1560. }
  1561. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1562. ff_vc1_mc_1mv(v, 0);
  1563. }
  1564. if (cbp)
  1565. GET_MQUANT(); // p. 227
  1566. s->current_picture.qscale_table[mb_pos] = mquant;
  1567. if (!v->ttmbf && cbp)
  1568. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1569. for (i = 0; i < 6; i++) {
  1570. s->dc_val[0][s->block_index[i]] = 0;
  1571. dst_idx += i >> 2;
  1572. val = ((cbp >> (5 - i)) & 1);
  1573. if (!fieldtx)
  1574. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1575. else
  1576. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  1577. if (val) {
  1578. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1579. first_block, s->dest[dst_idx] + off,
  1580. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  1581. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  1582. block_cbp |= pat << (i << 2);
  1583. if (!v->ttmbf && ttmb < 8)
  1584. ttmb = -1;
  1585. first_block = 0;
  1586. }
  1587. }
  1588. }
  1589. } else { // skipped
  1590. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1591. for (i = 0; i < 6; i++) {
  1592. v->mb_type[0][s->block_index[i]] = 0;
  1593. s->dc_val[0][s->block_index[i]] = 0;
  1594. }
  1595. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1596. s->current_picture.qscale_table[mb_pos] = 0;
  1597. v->blk_mv_type[s->block_index[0]] = 0;
  1598. v->blk_mv_type[s->block_index[1]] = 0;
  1599. v->blk_mv_type[s->block_index[2]] = 0;
  1600. v->blk_mv_type[s->block_index[3]] = 0;
  1601. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1602. ff_vc1_mc_1mv(v, 0);
  1603. }
  1604. if (s->mb_x == s->mb_width - 1)
  1605. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
  1606. return 0;
  1607. }
  1608. static int vc1_decode_p_mb_intfi(VC1Context *v)
  1609. {
  1610. MpegEncContext *s = &v->s;
  1611. GetBitContext *gb = &s->gb;
  1612. int i;
  1613. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1614. int cbp = 0; /* cbp decoding stuff */
  1615. int mqdiff, mquant; /* MB quantization */
  1616. int ttmb = v->ttfrm; /* MB Transform type */
  1617. int mb_has_coeffs = 1; /* last_flag */
  1618. int dmv_x, dmv_y; /* Differential MV components */
  1619. int val; /* temp values */
  1620. int first_block = 1;
  1621. int dst_idx, off;
  1622. int pred_flag = 0;
  1623. int block_cbp = 0, pat, block_tt = 0;
  1624. int idx_mbmode = 0;
  1625. mquant = v->pq; /* Lossy initialization */
  1626. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1627. if (idx_mbmode <= 1) { // intra MB
  1628. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1629. s->mb_intra = 1;
  1630. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  1631. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  1632. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1633. GET_MQUANT();
  1634. s->current_picture.qscale_table[mb_pos] = mquant;
  1635. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1636. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1637. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1638. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1639. mb_has_coeffs = idx_mbmode & 1;
  1640. if (mb_has_coeffs)
  1641. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1642. dst_idx = 0;
  1643. for (i = 0; i < 6; i++) {
  1644. v->a_avail = v->c_avail = 0;
  1645. v->mb_type[0][s->block_index[i]] = 1;
  1646. s->dc_val[0][s->block_index[i]] = 0;
  1647. dst_idx += i >> 2;
  1648. val = ((cbp >> (5 - i)) & 1);
  1649. if (i == 2 || i == 3 || !s->first_slice_line)
  1650. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1651. if (i == 1 || i == 3 || s->mb_x)
  1652. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1653. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1654. (i & 4) ? v->codingset2 : v->codingset);
  1655. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  1656. continue;
  1657. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1658. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1659. s->idsp.put_signed_pixels_clamped(s->block[i],
  1660. s->dest[dst_idx] + off,
  1661. (i & 4) ? s->uvlinesize
  1662. : s->linesize);
  1663. // TODO: loop filter
  1664. }
  1665. } else {
  1666. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1667. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1668. for (i = 0; i < 6; i++)
  1669. v->mb_type[0][s->block_index[i]] = 0;
  1670. if (idx_mbmode <= 5) { // 1-MV
  1671. dmv_x = dmv_y = pred_flag = 0;
  1672. if (idx_mbmode & 1) {
  1673. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1674. }
  1675. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1676. ff_vc1_mc_1mv(v, 0);
  1677. mb_has_coeffs = !(idx_mbmode & 2);
  1678. } else { // 4-MV
  1679. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1680. for (i = 0; i < 4; i++) {
  1681. dmv_x = dmv_y = pred_flag = 0;
  1682. if (v->fourmvbp & (8 >> i))
  1683. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1684. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1685. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1686. }
  1687. ff_vc1_mc_4mv_chroma(v, 0);
  1688. mb_has_coeffs = idx_mbmode & 1;
  1689. }
  1690. if (mb_has_coeffs)
  1691. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1692. if (cbp) {
  1693. GET_MQUANT();
  1694. }
  1695. s->current_picture.qscale_table[mb_pos] = mquant;
  1696. if (!v->ttmbf && cbp) {
  1697. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1698. }
  1699. dst_idx = 0;
  1700. for (i = 0; i < 6; i++) {
  1701. s->dc_val[0][s->block_index[i]] = 0;
  1702. dst_idx += i >> 2;
  1703. val = ((cbp >> (5 - i)) & 1);
  1704. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1705. if (val) {
  1706. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1707. first_block, s->dest[dst_idx] + off,
  1708. (i & 4) ? s->uvlinesize : s->linesize,
  1709. (i & 4) && (s->flags & CODEC_FLAG_GRAY),
  1710. &block_tt);
  1711. block_cbp |= pat << (i << 2);
  1712. if (!v->ttmbf && ttmb < 8)
  1713. ttmb = -1;
  1714. first_block = 0;
  1715. }
  1716. }
  1717. }
  1718. if (s->mb_x == s->mb_width - 1)
  1719. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  1720. return 0;
  1721. }
  1722. /** Decode one B-frame MB (in Main profile)
  1723. */
  1724. static void vc1_decode_b_mb(VC1Context *v)
  1725. {
  1726. MpegEncContext *s = &v->s;
  1727. GetBitContext *gb = &s->gb;
  1728. int i, j;
  1729. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1730. int cbp = 0; /* cbp decoding stuff */
  1731. int mqdiff, mquant; /* MB quantization */
  1732. int ttmb = v->ttfrm; /* MB Transform type */
  1733. int mb_has_coeffs = 0; /* last_flag */
  1734. int index, index1; /* LUT indexes */
  1735. int val, sign; /* temp values */
  1736. int first_block = 1;
  1737. int dst_idx, off;
  1738. int skipped, direct;
  1739. int dmv_x[2], dmv_y[2];
  1740. int bmvtype = BMV_TYPE_BACKWARD;
  1741. mquant = v->pq; /* lossy initialization */
  1742. s->mb_intra = 0;
  1743. if (v->dmb_is_raw)
  1744. direct = get_bits1(gb);
  1745. else
  1746. direct = v->direct_mb_plane[mb_pos];
  1747. if (v->skip_is_raw)
  1748. skipped = get_bits1(gb);
  1749. else
  1750. skipped = v->s.mbskip_table[mb_pos];
  1751. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1752. for (i = 0; i < 6; i++) {
  1753. v->mb_type[0][s->block_index[i]] = 0;
  1754. s->dc_val[0][s->block_index[i]] = 0;
  1755. }
  1756. s->current_picture.qscale_table[mb_pos] = 0;
  1757. if (!direct) {
  1758. if (!skipped) {
  1759. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1760. dmv_x[1] = dmv_x[0];
  1761. dmv_y[1] = dmv_y[0];
  1762. }
  1763. if (skipped || !s->mb_intra) {
  1764. bmvtype = decode012(gb);
  1765. switch (bmvtype) {
  1766. case 0:
  1767. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  1768. break;
  1769. case 1:
  1770. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  1771. break;
  1772. case 2:
  1773. bmvtype = BMV_TYPE_INTERPOLATED;
  1774. dmv_x[0] = dmv_y[0] = 0;
  1775. }
  1776. }
  1777. }
  1778. for (i = 0; i < 6; i++)
  1779. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1780. if (skipped) {
  1781. if (direct)
  1782. bmvtype = BMV_TYPE_INTERPOLATED;
  1783. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1784. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1785. return;
  1786. }
  1787. if (direct) {
  1788. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1789. GET_MQUANT();
  1790. s->mb_intra = 0;
  1791. s->current_picture.qscale_table[mb_pos] = mquant;
  1792. if (!v->ttmbf)
  1793. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1794. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  1795. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1796. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1797. } else {
  1798. if (!mb_has_coeffs && !s->mb_intra) {
  1799. /* no coded blocks - effectively skipped */
  1800. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1801. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1802. return;
  1803. }
  1804. if (s->mb_intra && !mb_has_coeffs) {
  1805. GET_MQUANT();
  1806. s->current_picture.qscale_table[mb_pos] = mquant;
  1807. s->ac_pred = get_bits1(gb);
  1808. cbp = 0;
  1809. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1810. } else {
  1811. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  1812. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1813. if (!mb_has_coeffs) {
  1814. /* interpolated skipped block */
  1815. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1816. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1817. return;
  1818. }
  1819. }
  1820. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1821. if (!s->mb_intra) {
  1822. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1823. }
  1824. if (s->mb_intra)
  1825. s->ac_pred = get_bits1(gb);
  1826. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1827. GET_MQUANT();
  1828. s->current_picture.qscale_table[mb_pos] = mquant;
  1829. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1830. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1831. }
  1832. }
  1833. dst_idx = 0;
  1834. for (i = 0; i < 6; i++) {
  1835. s->dc_val[0][s->block_index[i]] = 0;
  1836. dst_idx += i >> 2;
  1837. val = ((cbp >> (5 - i)) & 1);
  1838. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1839. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1840. if (s->mb_intra) {
  1841. /* check if prediction blocks A and C are available */
  1842. v->a_avail = v->c_avail = 0;
  1843. if (i == 2 || i == 3 || !s->first_slice_line)
  1844. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1845. if (i == 1 || i == 3 || s->mb_x)
  1846. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1847. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1848. (i & 4) ? v->codingset2 : v->codingset);
  1849. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  1850. continue;
  1851. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1852. if (v->rangeredfrm)
  1853. for (j = 0; j < 64; j++)
  1854. s->block[i][j] <<= 1;
  1855. s->idsp.put_signed_pixels_clamped(s->block[i],
  1856. s->dest[dst_idx] + off,
  1857. i & 4 ? s->uvlinesize
  1858. : s->linesize);
  1859. } else if (val) {
  1860. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1861. first_block, s->dest[dst_idx] + off,
  1862. (i & 4) ? s->uvlinesize : s->linesize,
  1863. (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
  1864. if (!v->ttmbf && ttmb < 8)
  1865. ttmb = -1;
  1866. first_block = 0;
  1867. }
  1868. }
  1869. }
  1870. /** Decode one B-frame MB (in interlaced field B picture)
  1871. */
  1872. static void vc1_decode_b_mb_intfi(VC1Context *v)
  1873. {
  1874. MpegEncContext *s = &v->s;
  1875. GetBitContext *gb = &s->gb;
  1876. int i, j;
  1877. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1878. int cbp = 0; /* cbp decoding stuff */
  1879. int mqdiff, mquant; /* MB quantization */
  1880. int ttmb = v->ttfrm; /* MB Transform type */
  1881. int mb_has_coeffs = 0; /* last_flag */
  1882. int val; /* temp value */
  1883. int first_block = 1;
  1884. int dst_idx, off;
  1885. int fwd;
  1886. int dmv_x[2], dmv_y[2], pred_flag[2];
  1887. int bmvtype = BMV_TYPE_BACKWARD;
  1888. int idx_mbmode;
  1889. mquant = v->pq; /* Lossy initialization */
  1890. s->mb_intra = 0;
  1891. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1892. if (idx_mbmode <= 1) { // intra MB
  1893. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1894. s->mb_intra = 1;
  1895. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1896. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1897. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1898. GET_MQUANT();
  1899. s->current_picture.qscale_table[mb_pos] = mquant;
  1900. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1901. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1902. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1903. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1904. mb_has_coeffs = idx_mbmode & 1;
  1905. if (mb_has_coeffs)
  1906. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1907. dst_idx = 0;
  1908. for (i = 0; i < 6; i++) {
  1909. v->a_avail = v->c_avail = 0;
  1910. v->mb_type[0][s->block_index[i]] = 1;
  1911. s->dc_val[0][s->block_index[i]] = 0;
  1912. dst_idx += i >> 2;
  1913. val = ((cbp >> (5 - i)) & 1);
  1914. if (i == 2 || i == 3 || !s->first_slice_line)
  1915. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1916. if (i == 1 || i == 3 || s->mb_x)
  1917. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1918. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1919. (i & 4) ? v->codingset2 : v->codingset);
  1920. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  1921. continue;
  1922. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1923. if (v->rangeredfrm)
  1924. for (j = 0; j < 64; j++)
  1925. s->block[i][j] <<= 1;
  1926. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1927. s->idsp.put_signed_pixels_clamped(s->block[i],
  1928. s->dest[dst_idx] + off,
  1929. (i & 4) ? s->uvlinesize
  1930. : s->linesize);
  1931. // TODO: yet to perform loop filter
  1932. }
  1933. } else {
  1934. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1935. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1936. for (i = 0; i < 6; i++)
  1937. v->mb_type[0][s->block_index[i]] = 0;
  1938. if (v->fmb_is_raw)
  1939. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  1940. else
  1941. fwd = v->forward_mb_plane[mb_pos];
  1942. if (idx_mbmode <= 5) { // 1-MV
  1943. int interpmvp = 0;
  1944. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1945. pred_flag[0] = pred_flag[1] = 0;
  1946. if (fwd)
  1947. bmvtype = BMV_TYPE_FORWARD;
  1948. else {
  1949. bmvtype = decode012(gb);
  1950. switch (bmvtype) {
  1951. case 0:
  1952. bmvtype = BMV_TYPE_BACKWARD;
  1953. break;
  1954. case 1:
  1955. bmvtype = BMV_TYPE_DIRECT;
  1956. break;
  1957. case 2:
  1958. bmvtype = BMV_TYPE_INTERPOLATED;
  1959. interpmvp = get_bits1(gb);
  1960. }
  1961. }
  1962. v->bmvtype = bmvtype;
  1963. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  1964. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1965. }
  1966. if (interpmvp) {
  1967. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  1968. }
  1969. if (bmvtype == BMV_TYPE_DIRECT) {
  1970. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1971. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  1972. if (!s->next_picture_ptr->field_picture) {
  1973. av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
  1974. return;
  1975. }
  1976. }
  1977. ff_vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  1978. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  1979. mb_has_coeffs = !(idx_mbmode & 2);
  1980. } else { // 4-MV
  1981. if (fwd)
  1982. bmvtype = BMV_TYPE_FORWARD;
  1983. v->bmvtype = bmvtype;
  1984. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1985. for (i = 0; i < 4; i++) {
  1986. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1987. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  1988. if (v->fourmvbp & (8 >> i)) {
  1989. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  1990. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  1991. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1992. }
  1993. ff_vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  1994. ff_vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  1995. }
  1996. ff_vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  1997. mb_has_coeffs = idx_mbmode & 1;
  1998. }
  1999. if (mb_has_coeffs)
  2000. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2001. if (cbp) {
  2002. GET_MQUANT();
  2003. }
  2004. s->current_picture.qscale_table[mb_pos] = mquant;
  2005. if (!v->ttmbf && cbp) {
  2006. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2007. }
  2008. dst_idx = 0;
  2009. for (i = 0; i < 6; i++) {
  2010. s->dc_val[0][s->block_index[i]] = 0;
  2011. dst_idx += i >> 2;
  2012. val = ((cbp >> (5 - i)) & 1);
  2013. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  2014. if (val) {
  2015. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2016. first_block, s->dest[dst_idx] + off,
  2017. (i & 4) ? s->uvlinesize : s->linesize,
  2018. (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
  2019. if (!v->ttmbf && ttmb < 8)
  2020. ttmb = -1;
  2021. first_block = 0;
  2022. }
  2023. }
  2024. }
  2025. }
  2026. /** Decode one B-frame MB (in interlaced frame B picture)
  2027. */
  2028. static int vc1_decode_b_mb_intfr(VC1Context *v)
  2029. {
  2030. MpegEncContext *s = &v->s;
  2031. GetBitContext *gb = &s->gb;
  2032. int i, j;
  2033. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2034. int cbp = 0; /* cbp decoding stuff */
  2035. int mqdiff, mquant; /* MB quantization */
  2036. int ttmb = v->ttfrm; /* MB Transform type */
  2037. int mvsw = 0; /* motion vector switch */
  2038. int mb_has_coeffs = 1; /* last_flag */
  2039. int dmv_x, dmv_y; /* Differential MV components */
  2040. int val; /* temp value */
  2041. int first_block = 1;
  2042. int dst_idx, off;
  2043. int skipped, direct, twomv = 0;
  2044. int block_cbp = 0, pat, block_tt = 0;
  2045. int idx_mbmode = 0, mvbp;
  2046. int stride_y, fieldtx;
  2047. int bmvtype = BMV_TYPE_BACKWARD;
  2048. int dir, dir2;
  2049. mquant = v->pq; /* Lossy initialization */
  2050. s->mb_intra = 0;
  2051. if (v->skip_is_raw)
  2052. skipped = get_bits1(gb);
  2053. else
  2054. skipped = v->s.mbskip_table[mb_pos];
  2055. if (!skipped) {
  2056. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  2057. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  2058. twomv = 1;
  2059. v->blk_mv_type[s->block_index[0]] = 1;
  2060. v->blk_mv_type[s->block_index[1]] = 1;
  2061. v->blk_mv_type[s->block_index[2]] = 1;
  2062. v->blk_mv_type[s->block_index[3]] = 1;
  2063. } else {
  2064. v->blk_mv_type[s->block_index[0]] = 0;
  2065. v->blk_mv_type[s->block_index[1]] = 0;
  2066. v->blk_mv_type[s->block_index[2]] = 0;
  2067. v->blk_mv_type[s->block_index[3]] = 0;
  2068. }
  2069. }
  2070. if (v->dmb_is_raw)
  2071. direct = get_bits1(gb);
  2072. else
  2073. direct = v->direct_mb_plane[mb_pos];
  2074. if (direct) {
  2075. if (s->next_picture_ptr->field_picture)
  2076. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  2077. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  2078. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  2079. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  2080. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  2081. if (twomv) {
  2082. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  2083. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  2084. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  2085. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  2086. for (i = 1; i < 4; i += 2) {
  2087. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  2088. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  2089. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  2090. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  2091. }
  2092. } else {
  2093. for (i = 1; i < 4; i++) {
  2094. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  2095. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  2096. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  2097. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  2098. }
  2099. }
  2100. }
  2101. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  2102. for (i = 0; i < 4; i++) {
  2103. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  2104. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  2105. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2106. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2107. }
  2108. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  2109. s->mb_intra = 1;
  2110. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2111. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  2112. mb_has_coeffs = get_bits1(gb);
  2113. if (mb_has_coeffs)
  2114. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2115. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  2116. GET_MQUANT();
  2117. s->current_picture.qscale_table[mb_pos] = mquant;
  2118. /* Set DC scale - y and c use the same (not sure if necessary here) */
  2119. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2120. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2121. dst_idx = 0;
  2122. for (i = 0; i < 6; i++) {
  2123. v->a_avail = v->c_avail = 0;
  2124. v->mb_type[0][s->block_index[i]] = 1;
  2125. s->dc_val[0][s->block_index[i]] = 0;
  2126. dst_idx += i >> 2;
  2127. val = ((cbp >> (5 - i)) & 1);
  2128. if (i == 2 || i == 3 || !s->first_slice_line)
  2129. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  2130. if (i == 1 || i == 3 || s->mb_x)
  2131. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  2132. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  2133. (i & 4) ? v->codingset2 : v->codingset);
  2134. if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
  2135. continue;
  2136. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  2137. if (i < 4) {
  2138. stride_y = s->linesize << fieldtx;
  2139. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  2140. } else {
  2141. stride_y = s->uvlinesize;
  2142. off = 0;
  2143. }
  2144. s->idsp.put_signed_pixels_clamped(s->block[i],
  2145. s->dest[dst_idx] + off,
  2146. stride_y);
  2147. }
  2148. } else {
  2149. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2150. if (!direct) {
  2151. if (skipped || !s->mb_intra) {
  2152. bmvtype = decode012(gb);
  2153. switch (bmvtype) {
  2154. case 0:
  2155. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  2156. break;
  2157. case 1:
  2158. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  2159. break;
  2160. case 2:
  2161. bmvtype = BMV_TYPE_INTERPOLATED;
  2162. }
  2163. }
  2164. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  2165. mvsw = get_bits1(gb);
  2166. }
  2167. if (!skipped) { // inter MB
  2168. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  2169. if (mb_has_coeffs)
  2170. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2171. if (!direct) {
  2172. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  2173. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2174. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  2175. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  2176. }
  2177. }
  2178. for (i = 0; i < 6; i++)
  2179. v->mb_type[0][s->block_index[i]] = 0;
  2180. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  2181. /* for all motion vector read MVDATA and motion compensate each block */
  2182. dst_idx = 0;
  2183. if (direct) {
  2184. if (twomv) {
  2185. for (i = 0; i < 4; i++) {
  2186. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  2187. ff_vc1_mc_4mv_luma(v, i, 1, 1);
  2188. }
  2189. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2190. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2191. } else {
  2192. ff_vc1_mc_1mv(v, 0);
  2193. ff_vc1_interp_mc(v);
  2194. }
  2195. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  2196. mvbp = v->fourmvbp;
  2197. for (i = 0; i < 4; i++) {
  2198. dir = i==1 || i==3;
  2199. dmv_x = dmv_y = 0;
  2200. val = ((mvbp >> (3 - i)) & 1);
  2201. if (val)
  2202. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2203. j = i > 1 ? 2 : 0;
  2204. ff_vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2205. ff_vc1_mc_4mv_luma(v, j, dir, dir);
  2206. ff_vc1_mc_4mv_luma(v, j+1, dir, dir);
  2207. }
  2208. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2209. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2210. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2211. mvbp = v->twomvbp;
  2212. dmv_x = dmv_y = 0;
  2213. if (mvbp & 2)
  2214. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2215. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2216. ff_vc1_mc_1mv(v, 0);
  2217. dmv_x = dmv_y = 0;
  2218. if (mvbp & 1)
  2219. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2220. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2221. ff_vc1_interp_mc(v);
  2222. } else if (twomv) {
  2223. dir = bmvtype == BMV_TYPE_BACKWARD;
  2224. dir2 = dir;
  2225. if (mvsw)
  2226. dir2 = !dir;
  2227. mvbp = v->twomvbp;
  2228. dmv_x = dmv_y = 0;
  2229. if (mvbp & 2)
  2230. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2231. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2232. dmv_x = dmv_y = 0;
  2233. if (mvbp & 1)
  2234. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2235. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  2236. if (mvsw) {
  2237. for (i = 0; i < 2; i++) {
  2238. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2239. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2240. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2241. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2242. }
  2243. } else {
  2244. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2245. ff_vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2246. }
  2247. ff_vc1_mc_4mv_luma(v, 0, dir, 0);
  2248. ff_vc1_mc_4mv_luma(v, 1, dir, 0);
  2249. ff_vc1_mc_4mv_luma(v, 2, dir2, 0);
  2250. ff_vc1_mc_4mv_luma(v, 3, dir2, 0);
  2251. ff_vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  2252. } else {
  2253. dir = bmvtype == BMV_TYPE_BACKWARD;
  2254. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  2255. dmv_x = dmv_y = 0;
  2256. if (mvbp)
  2257. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2258. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2259. v->blk_mv_type[s->block_index[0]] = 1;
  2260. v->blk_mv_type[s->block_index[1]] = 1;
  2261. v->blk_mv_type[s->block_index[2]] = 1;
  2262. v->blk_mv_type[s->block_index[3]] = 1;
  2263. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2264. for (i = 0; i < 2; i++) {
  2265. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2266. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2267. }
  2268. ff_vc1_mc_1mv(v, dir);
  2269. }
  2270. if (cbp)
  2271. GET_MQUANT(); // p. 227
  2272. s->current_picture.qscale_table[mb_pos] = mquant;
  2273. if (!v->ttmbf && cbp)
  2274. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2275. for (i = 0; i < 6; i++) {
  2276. s->dc_val[0][s->block_index[i]] = 0;
  2277. dst_idx += i >> 2;
  2278. val = ((cbp >> (5 - i)) & 1);
  2279. if (!fieldtx)
  2280. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  2281. else
  2282. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  2283. if (val) {
  2284. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2285. first_block, s->dest[dst_idx] + off,
  2286. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  2287. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  2288. block_cbp |= pat << (i << 2);
  2289. if (!v->ttmbf && ttmb < 8)
  2290. ttmb = -1;
  2291. first_block = 0;
  2292. }
  2293. }
  2294. } else { // skipped
  2295. dir = 0;
  2296. for (i = 0; i < 6; i++) {
  2297. v->mb_type[0][s->block_index[i]] = 0;
  2298. s->dc_val[0][s->block_index[i]] = 0;
  2299. }
  2300. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  2301. s->current_picture.qscale_table[mb_pos] = 0;
  2302. v->blk_mv_type[s->block_index[0]] = 0;
  2303. v->blk_mv_type[s->block_index[1]] = 0;
  2304. v->blk_mv_type[s->block_index[2]] = 0;
  2305. v->blk_mv_type[s->block_index[3]] = 0;
  2306. if (!direct) {
  2307. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2308. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2309. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2310. } else {
  2311. dir = bmvtype == BMV_TYPE_BACKWARD;
  2312. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2313. if (mvsw) {
  2314. int dir2 = dir;
  2315. if (mvsw)
  2316. dir2 = !dir;
  2317. for (i = 0; i < 2; i++) {
  2318. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2319. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2320. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2321. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2322. }
  2323. } else {
  2324. v->blk_mv_type[s->block_index[0]] = 1;
  2325. v->blk_mv_type[s->block_index[1]] = 1;
  2326. v->blk_mv_type[s->block_index[2]] = 1;
  2327. v->blk_mv_type[s->block_index[3]] = 1;
  2328. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2329. for (i = 0; i < 2; i++) {
  2330. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2331. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2332. }
  2333. }
  2334. }
  2335. }
  2336. ff_vc1_mc_1mv(v, dir);
  2337. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  2338. ff_vc1_interp_mc(v);
  2339. }
  2340. }
  2341. }
  2342. if (s->mb_x == s->mb_width - 1)
  2343. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2344. v->cbp[s->mb_x] = block_cbp;
  2345. v->ttblk[s->mb_x] = block_tt;
  2346. return 0;
  2347. }
  2348. /** Decode blocks of I-frame
  2349. */
  2350. static void vc1_decode_i_blocks(VC1Context *v)
  2351. {
  2352. int k, j;
  2353. MpegEncContext *s = &v->s;
  2354. int cbp, val;
  2355. uint8_t *coded_val;
  2356. int mb_pos;
  2357. /* select codingmode used for VLC tables selection */
  2358. switch (v->y_ac_table_index) {
  2359. case 0:
  2360. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2361. break;
  2362. case 1:
  2363. v->codingset = CS_HIGH_MOT_INTRA;
  2364. break;
  2365. case 2:
  2366. v->codingset = CS_MID_RATE_INTRA;
  2367. break;
  2368. }
  2369. switch (v->c_ac_table_index) {
  2370. case 0:
  2371. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2372. break;
  2373. case 1:
  2374. v->codingset2 = CS_HIGH_MOT_INTER;
  2375. break;
  2376. case 2:
  2377. v->codingset2 = CS_MID_RATE_INTER;
  2378. break;
  2379. }
  2380. /* Set DC scale - y and c use the same */
  2381. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  2382. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  2383. //do frame decode
  2384. s->mb_x = s->mb_y = 0;
  2385. s->mb_intra = 1;
  2386. s->first_slice_line = 1;
  2387. for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
  2388. s->mb_x = 0;
  2389. init_block_index(v);
  2390. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  2391. uint8_t *dst[6];
  2392. ff_update_block_index(s);
  2393. dst[0] = s->dest[0];
  2394. dst[1] = dst[0] + 8;
  2395. dst[2] = s->dest[0] + s->linesize * 8;
  2396. dst[3] = dst[2] + 8;
  2397. dst[4] = s->dest[1];
  2398. dst[5] = s->dest[2];
  2399. s->bdsp.clear_blocks(s->block[0]);
  2400. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  2401. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2402. s->current_picture.qscale_table[mb_pos] = v->pq;
  2403. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  2404. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  2405. // do actual MB decoding and displaying
  2406. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2407. v->s.ac_pred = get_bits1(&v->s.gb);
  2408. for (k = 0; k < 6; k++) {
  2409. val = ((cbp >> (5 - k)) & 1);
  2410. if (k < 4) {
  2411. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2412. val = val ^ pred;
  2413. *coded_val = val;
  2414. }
  2415. cbp |= val << (5 - k);
  2416. vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
  2417. if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
  2418. continue;
  2419. v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
  2420. if (v->pq >= 9 && v->overlap) {
  2421. if (v->rangeredfrm)
  2422. for (j = 0; j < 64; j++)
  2423. s->block[k][j] <<= 1;
  2424. s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
  2425. k & 4 ? s->uvlinesize
  2426. : s->linesize);
  2427. } else {
  2428. if (v->rangeredfrm)
  2429. for (j = 0; j < 64; j++)
  2430. s->block[k][j] = (s->block[k][j] - 64) << 1;
  2431. s->idsp.put_pixels_clamped(s->block[k], dst[k],
  2432. k & 4 ? s->uvlinesize
  2433. : s->linesize);
  2434. }
  2435. }
  2436. if (v->pq >= 9 && v->overlap) {
  2437. if (s->mb_x) {
  2438. v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
  2439. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2440. if (!(s->flags & CODEC_FLAG_GRAY)) {
  2441. v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
  2442. v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
  2443. }
  2444. }
  2445. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
  2446. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2447. if (!s->first_slice_line) {
  2448. v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
  2449. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
  2450. if (!(s->flags & CODEC_FLAG_GRAY)) {
  2451. v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
  2452. v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
  2453. }
  2454. }
  2455. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2456. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2457. }
  2458. if (v->s.loop_filter)
  2459. ff_vc1_loop_filter_iblk(v, v->pq);
  2460. if (get_bits_count(&s->gb) > v->bits) {
  2461. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  2462. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2463. get_bits_count(&s->gb), v->bits);
  2464. return;
  2465. }
  2466. }
  2467. if (!v->s.loop_filter)
  2468. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2469. else if (s->mb_y)
  2470. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2471. s->first_slice_line = 0;
  2472. }
  2473. if (v->s.loop_filter)
  2474. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2475. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  2476. * profile, these only differ are when decoding MSS2 rectangles. */
  2477. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  2478. }
  2479. /** Decode blocks of I-frame for advanced profile
  2480. */
  2481. static void vc1_decode_i_blocks_adv(VC1Context *v)
  2482. {
  2483. int k;
  2484. MpegEncContext *s = &v->s;
  2485. int cbp, val;
  2486. uint8_t *coded_val;
  2487. int mb_pos;
  2488. int mquant = v->pq;
  2489. int mqdiff;
  2490. GetBitContext *gb = &s->gb;
  2491. /* select codingmode used for VLC tables selection */
  2492. switch (v->y_ac_table_index) {
  2493. case 0:
  2494. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2495. break;
  2496. case 1:
  2497. v->codingset = CS_HIGH_MOT_INTRA;
  2498. break;
  2499. case 2:
  2500. v->codingset = CS_MID_RATE_INTRA;
  2501. break;
  2502. }
  2503. switch (v->c_ac_table_index) {
  2504. case 0:
  2505. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2506. break;
  2507. case 1:
  2508. v->codingset2 = CS_HIGH_MOT_INTER;
  2509. break;
  2510. case 2:
  2511. v->codingset2 = CS_MID_RATE_INTER;
  2512. break;
  2513. }
  2514. // do frame decode
  2515. s->mb_x = s->mb_y = 0;
  2516. s->mb_intra = 1;
  2517. s->first_slice_line = 1;
  2518. s->mb_y = s->start_mb_y;
  2519. if (s->start_mb_y) {
  2520. s->mb_x = 0;
  2521. init_block_index(v);
  2522. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  2523. (1 + s->b8_stride) * sizeof(*s->coded_block));
  2524. }
  2525. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  2526. s->mb_x = 0;
  2527. init_block_index(v);
  2528. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2529. int16_t (*block)[64] = v->block[v->cur_blk_idx];
  2530. ff_update_block_index(s);
  2531. s->bdsp.clear_blocks(block[0]);
  2532. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2533. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  2534. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  2535. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  2536. // do actual MB decoding and displaying
  2537. if (v->fieldtx_is_raw)
  2538. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  2539. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2540. if ( v->acpred_is_raw)
  2541. v->s.ac_pred = get_bits1(&v->s.gb);
  2542. else
  2543. v->s.ac_pred = v->acpred_plane[mb_pos];
  2544. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  2545. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  2546. GET_MQUANT();
  2547. s->current_picture.qscale_table[mb_pos] = mquant;
  2548. /* Set DC scale - y and c use the same */
  2549. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2550. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2551. for (k = 0; k < 6; k++) {
  2552. val = ((cbp >> (5 - k)) & 1);
  2553. if (k < 4) {
  2554. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2555. val = val ^ pred;
  2556. *coded_val = val;
  2557. }
  2558. cbp |= val << (5 - k);
  2559. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  2560. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  2561. vc1_decode_i_block_adv(v, block[k], k, val,
  2562. (k < 4) ? v->codingset : v->codingset2, mquant);
  2563. if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
  2564. continue;
  2565. v->vc1dsp.vc1_inv_trans_8x8(block[k]);
  2566. }
  2567. ff_vc1_smooth_overlap_filter_iblk(v);
  2568. vc1_put_signed_blocks_clamped(v);
  2569. if (v->s.loop_filter)
  2570. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2571. if (get_bits_count(&s->gb) > v->bits) {
  2572. // TODO: may need modification to handle slice coding
  2573. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2574. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2575. get_bits_count(&s->gb), v->bits);
  2576. return;
  2577. }
  2578. }
  2579. if (!v->s.loop_filter)
  2580. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2581. else if (s->mb_y)
  2582. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  2583. s->first_slice_line = 0;
  2584. }
  2585. /* raw bottom MB row */
  2586. s->mb_x = 0;
  2587. init_block_index(v);
  2588. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2589. ff_update_block_index(s);
  2590. vc1_put_signed_blocks_clamped(v);
  2591. if (v->s.loop_filter)
  2592. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2593. }
  2594. if (v->s.loop_filter)
  2595. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2596. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2597. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2598. }
  2599. static void vc1_decode_p_blocks(VC1Context *v)
  2600. {
  2601. MpegEncContext *s = &v->s;
  2602. int apply_loop_filter;
  2603. /* select codingmode used for VLC tables selection */
  2604. switch (v->c_ac_table_index) {
  2605. case 0:
  2606. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2607. break;
  2608. case 1:
  2609. v->codingset = CS_HIGH_MOT_INTRA;
  2610. break;
  2611. case 2:
  2612. v->codingset = CS_MID_RATE_INTRA;
  2613. break;
  2614. }
  2615. switch (v->c_ac_table_index) {
  2616. case 0:
  2617. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2618. break;
  2619. case 1:
  2620. v->codingset2 = CS_HIGH_MOT_INTER;
  2621. break;
  2622. case 2:
  2623. v->codingset2 = CS_MID_RATE_INTER;
  2624. break;
  2625. }
  2626. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
  2627. v->fcm == PROGRESSIVE;
  2628. s->first_slice_line = 1;
  2629. memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
  2630. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2631. s->mb_x = 0;
  2632. init_block_index(v);
  2633. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2634. ff_update_block_index(s);
  2635. if (v->fcm == ILACE_FIELD)
  2636. vc1_decode_p_mb_intfi(v);
  2637. else if (v->fcm == ILACE_FRAME)
  2638. vc1_decode_p_mb_intfr(v);
  2639. else vc1_decode_p_mb(v);
  2640. if (s->mb_y != s->start_mb_y && apply_loop_filter)
  2641. ff_vc1_apply_p_loop_filter(v);
  2642. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2643. // TODO: may need modification to handle slice coding
  2644. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2645. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2646. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2647. return;
  2648. }
  2649. }
  2650. memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
  2651. memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
  2652. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2653. memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
  2654. if (s->mb_y != s->start_mb_y)
  2655. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2656. s->first_slice_line = 0;
  2657. }
  2658. if (apply_loop_filter) {
  2659. s->mb_x = 0;
  2660. init_block_index(v);
  2661. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2662. ff_update_block_index(s);
  2663. ff_vc1_apply_p_loop_filter(v);
  2664. }
  2665. }
  2666. if (s->end_mb_y >= s->start_mb_y)
  2667. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2668. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2669. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2670. }
  2671. static void vc1_decode_b_blocks(VC1Context *v)
  2672. {
  2673. MpegEncContext *s = &v->s;
  2674. /* select codingmode used for VLC tables selection */
  2675. switch (v->c_ac_table_index) {
  2676. case 0:
  2677. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2678. break;
  2679. case 1:
  2680. v->codingset = CS_HIGH_MOT_INTRA;
  2681. break;
  2682. case 2:
  2683. v->codingset = CS_MID_RATE_INTRA;
  2684. break;
  2685. }
  2686. switch (v->c_ac_table_index) {
  2687. case 0:
  2688. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2689. break;
  2690. case 1:
  2691. v->codingset2 = CS_HIGH_MOT_INTER;
  2692. break;
  2693. case 2:
  2694. v->codingset2 = CS_MID_RATE_INTER;
  2695. break;
  2696. }
  2697. s->first_slice_line = 1;
  2698. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2699. s->mb_x = 0;
  2700. init_block_index(v);
  2701. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2702. ff_update_block_index(s);
  2703. if (v->fcm == ILACE_FIELD)
  2704. vc1_decode_b_mb_intfi(v);
  2705. else if (v->fcm == ILACE_FRAME)
  2706. vc1_decode_b_mb_intfr(v);
  2707. else
  2708. vc1_decode_b_mb(v);
  2709. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2710. // TODO: may need modification to handle slice coding
  2711. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2712. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2713. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2714. return;
  2715. }
  2716. if (v->s.loop_filter)
  2717. ff_vc1_loop_filter_iblk(v, v->pq);
  2718. }
  2719. if (!v->s.loop_filter)
  2720. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2721. else if (s->mb_y)
  2722. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2723. s->first_slice_line = 0;
  2724. }
  2725. if (v->s.loop_filter)
  2726. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2727. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2728. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2729. }
  2730. static void vc1_decode_skip_blocks(VC1Context *v)
  2731. {
  2732. MpegEncContext *s = &v->s;
  2733. if (!v->s.last_picture.f->data[0])
  2734. return;
  2735. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  2736. s->first_slice_line = 1;
  2737. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2738. s->mb_x = 0;
  2739. init_block_index(v);
  2740. ff_update_block_index(s);
  2741. memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  2742. memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2743. memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2744. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2745. s->first_slice_line = 0;
  2746. }
  2747. s->pict_type = AV_PICTURE_TYPE_P;
  2748. }
  2749. void ff_vc1_decode_blocks(VC1Context *v)
  2750. {
  2751. v->s.esc3_level_length = 0;
  2752. if (v->x8_type) {
  2753. ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
  2754. } else {
  2755. v->cur_blk_idx = 0;
  2756. v->left_blk_idx = -1;
  2757. v->topleft_blk_idx = 1;
  2758. v->top_blk_idx = 2;
  2759. switch (v->s.pict_type) {
  2760. case AV_PICTURE_TYPE_I:
  2761. if (v->profile == PROFILE_ADVANCED)
  2762. vc1_decode_i_blocks_adv(v);
  2763. else
  2764. vc1_decode_i_blocks(v);
  2765. break;
  2766. case AV_PICTURE_TYPE_P:
  2767. if (v->p_frame_skipped)
  2768. vc1_decode_skip_blocks(v);
  2769. else
  2770. vc1_decode_p_blocks(v);
  2771. break;
  2772. case AV_PICTURE_TYPE_B:
  2773. if (v->bi_type) {
  2774. if (v->profile == PROFILE_ADVANCED)
  2775. vc1_decode_i_blocks_adv(v);
  2776. else
  2777. vc1_decode_i_blocks(v);
  2778. } else
  2779. vc1_decode_b_blocks(v);
  2780. break;
  2781. }
  2782. }
  2783. }