You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3063 lines
120KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 block decoding routines
  26. */
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "msmpeg4data.h"
  31. #include "unary_legacy.h"
  32. #include "vc1.h"
  33. #include "vc1_pred.h"
  34. #include "vc1acdata.h"
  35. #include "vc1data.h"
  36. #define MB_INTRA_VLC_BITS 9
  37. #define DC_VLC_BITS 9
  38. // offset tables for interlaced picture MVDATA decoding
  39. static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
  40. static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
  41. /***********************************************************************/
  42. /**
  43. * @name VC-1 Bitplane decoding
  44. * @see 8.7, p56
  45. * @{
  46. */
  47. /**
  48. * Imode types
  49. * @{
  50. */
  51. enum Imode {
  52. IMODE_RAW,
  53. IMODE_NORM2,
  54. IMODE_DIFF2,
  55. IMODE_NORM6,
  56. IMODE_DIFF6,
  57. IMODE_ROWSKIP,
  58. IMODE_COLSKIP
  59. };
  60. /** @} */ //imode defines
  61. static void init_block_index(VC1Context *v)
  62. {
  63. MpegEncContext *s = &v->s;
  64. ff_init_block_index(s);
  65. if (v->field_mode && !(v->second_field ^ v->tff)) {
  66. s->dest[0] += s->current_picture_ptr->f->linesize[0];
  67. s->dest[1] += s->current_picture_ptr->f->linesize[1];
  68. s->dest[2] += s->current_picture_ptr->f->linesize[2];
  69. }
  70. }
  71. /** @} */ //Bitplane group
  72. static void vc1_put_signed_blocks_clamped(VC1Context *v)
  73. {
  74. MpegEncContext *s = &v->s;
  75. int topleft_mb_pos, top_mb_pos;
  76. int stride_y, fieldtx = 0;
  77. int v_dist;
  78. /* The put pixels loop is always one MB row behind the decoding loop,
  79. * because we can only put pixels when overlap filtering is done, and
  80. * for filtering of the bottom edge of a MB, we need the next MB row
  81. * present as well.
  82. * Within the row, the put pixels loop is also one MB col behind the
  83. * decoding loop. The reason for this is again, because for filtering
  84. * of the right MB edge, we need the next MB present. */
  85. if (!s->first_slice_line) {
  86. if (s->mb_x) {
  87. topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
  88. if (v->fcm == ILACE_FRAME)
  89. fieldtx = v->fieldtx_plane[topleft_mb_pos];
  90. stride_y = s->linesize << fieldtx;
  91. v_dist = (16 - fieldtx) >> (fieldtx == 0);
  92. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
  93. s->dest[0] - 16 * s->linesize - 16,
  94. stride_y);
  95. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
  96. s->dest[0] - 16 * s->linesize - 8,
  97. stride_y);
  98. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
  99. s->dest[0] - v_dist * s->linesize - 16,
  100. stride_y);
  101. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
  102. s->dest[0] - v_dist * s->linesize - 8,
  103. stride_y);
  104. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
  105. s->dest[1] - 8 * s->uvlinesize - 8,
  106. s->uvlinesize);
  107. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
  108. s->dest[2] - 8 * s->uvlinesize - 8,
  109. s->uvlinesize);
  110. }
  111. if (s->mb_x == s->mb_width - 1) {
  112. top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
  113. if (v->fcm == ILACE_FRAME)
  114. fieldtx = v->fieldtx_plane[top_mb_pos];
  115. stride_y = s->linesize << fieldtx;
  116. v_dist = fieldtx ? 15 : 8;
  117. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
  118. s->dest[0] - 16 * s->linesize,
  119. stride_y);
  120. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
  121. s->dest[0] - 16 * s->linesize + 8,
  122. stride_y);
  123. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
  124. s->dest[0] - v_dist * s->linesize,
  125. stride_y);
  126. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
  127. s->dest[0] - v_dist * s->linesize + 8,
  128. stride_y);
  129. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
  130. s->dest[1] - 8 * s->uvlinesize,
  131. s->uvlinesize);
  132. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
  133. s->dest[2] - 8 * s->uvlinesize,
  134. s->uvlinesize);
  135. }
  136. }
  137. #define inc_blk_idx(idx) do { \
  138. idx++; \
  139. if (idx >= v->n_allocated_blks) \
  140. idx = 0; \
  141. } while (0)
  142. inc_blk_idx(v->topleft_blk_idx);
  143. inc_blk_idx(v->top_blk_idx);
  144. inc_blk_idx(v->left_blk_idx);
  145. inc_blk_idx(v->cur_blk_idx);
  146. }
  147. /***********************************************************************/
  148. /**
  149. * @name VC-1 Block-level functions
  150. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  151. * @{
  152. */
  153. /**
  154. * @def GET_MQUANT
  155. * @brief Get macroblock-level quantizer scale
  156. */
  157. #define GET_MQUANT() \
  158. if (v->dquantfrm) { \
  159. int edges = 0; \
  160. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  161. if (v->dqbilevel) { \
  162. mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
  163. } else { \
  164. mqdiff = get_bits(gb, 3); \
  165. if (mqdiff != 7) \
  166. mquant = v->pq + mqdiff; \
  167. else \
  168. mquant = get_bits(gb, 5); \
  169. } \
  170. } \
  171. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  172. edges = 1 << v->dqsbedge; \
  173. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  174. edges = (3 << v->dqsbedge) % 15; \
  175. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  176. edges = 15; \
  177. if ((edges&1) && !s->mb_x) \
  178. mquant = v->altpq; \
  179. if ((edges&2) && s->first_slice_line) \
  180. mquant = v->altpq; \
  181. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  182. mquant = v->altpq; \
  183. if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
  184. mquant = v->altpq; \
  185. if (!mquant || mquant > 31) { \
  186. av_log(v->s.avctx, AV_LOG_ERROR, \
  187. "Overriding invalid mquant %d\n", mquant); \
  188. mquant = 1; \
  189. } \
  190. }
  191. /**
  192. * @def GET_MVDATA(_dmv_x, _dmv_y)
  193. * @brief Get MV differentials
  194. * @see MVDATA decoding from 8.3.5.2, p(1)20
  195. * @param _dmv_x Horizontal differential for decoded MV
  196. * @param _dmv_y Vertical differential for decoded MV
  197. */
  198. #define GET_MVDATA(_dmv_x, _dmv_y) \
  199. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  200. VC1_MV_DIFF_VLC_BITS, 2); \
  201. if (index > 36) { \
  202. mb_has_coeffs = 1; \
  203. index -= 37; \
  204. } else \
  205. mb_has_coeffs = 0; \
  206. s->mb_intra = 0; \
  207. if (!index) { \
  208. _dmv_x = _dmv_y = 0; \
  209. } else if (index == 35) { \
  210. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  211. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  212. } else if (index == 36) { \
  213. _dmv_x = 0; \
  214. _dmv_y = 0; \
  215. s->mb_intra = 1; \
  216. } else { \
  217. index1 = index % 6; \
  218. if (!s->quarter_sample && index1 == 5) val = 1; \
  219. else val = 0; \
  220. if (size_table[index1] - val > 0) \
  221. val = get_bits(gb, size_table[index1] - val); \
  222. else val = 0; \
  223. sign = 0 - (val&1); \
  224. _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
  225. \
  226. index1 = index / 6; \
  227. if (!s->quarter_sample && index1 == 5) val = 1; \
  228. else val = 0; \
  229. if (size_table[index1] - val > 0) \
  230. val = get_bits(gb, size_table[index1] - val); \
  231. else val = 0; \
  232. sign = 0 - (val & 1); \
  233. _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
  234. }
  235. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  236. int *dmv_y, int *pred_flag)
  237. {
  238. int index, index1;
  239. int extend_x = 0, extend_y = 0;
  240. GetBitContext *gb = &v->s.gb;
  241. int bits, esc;
  242. int val, sign;
  243. const int* offs_tab;
  244. if (v->numref) {
  245. bits = VC1_2REF_MVDATA_VLC_BITS;
  246. esc = 125;
  247. } else {
  248. bits = VC1_1REF_MVDATA_VLC_BITS;
  249. esc = 71;
  250. }
  251. switch (v->dmvrange) {
  252. case 1:
  253. extend_x = 1;
  254. break;
  255. case 2:
  256. extend_y = 1;
  257. break;
  258. case 3:
  259. extend_x = extend_y = 1;
  260. break;
  261. }
  262. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  263. if (index == esc) {
  264. *dmv_x = get_bits(gb, v->k_x);
  265. *dmv_y = get_bits(gb, v->k_y);
  266. if (v->numref) {
  267. if (pred_flag) {
  268. *pred_flag = *dmv_y & 1;
  269. *dmv_y = (*dmv_y + *pred_flag) >> 1;
  270. } else {
  271. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  272. }
  273. }
  274. }
  275. else {
  276. if (extend_x)
  277. offs_tab = offset_table2;
  278. else
  279. offs_tab = offset_table1;
  280. index1 = (index + 1) % 9;
  281. if (index1 != 0) {
  282. val = get_bits(gb, index1 + extend_x);
  283. sign = 0 -(val & 1);
  284. *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
  285. } else
  286. *dmv_x = 0;
  287. if (extend_y)
  288. offs_tab = offset_table2;
  289. else
  290. offs_tab = offset_table1;
  291. index1 = (index + 1) / 9;
  292. if (index1 > v->numref) {
  293. val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
  294. sign = 0 - (val & 1);
  295. *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
  296. } else
  297. *dmv_y = 0;
  298. if (v->numref && pred_flag)
  299. *pred_flag = index1 & 1;
  300. }
  301. }
  302. /** Reconstruct motion vector for B-frame and do motion compensation
  303. */
  304. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  305. int direct, int mode)
  306. {
  307. if (direct) {
  308. ff_vc1_mc_1mv(v, 0);
  309. ff_vc1_interp_mc(v);
  310. return;
  311. }
  312. if (mode == BMV_TYPE_INTERPOLATED) {
  313. ff_vc1_mc_1mv(v, 0);
  314. ff_vc1_interp_mc(v);
  315. return;
  316. }
  317. ff_vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  318. }
  319. /** Get predicted DC value for I-frames only
  320. * prediction dir: left=0, top=1
  321. * @param s MpegEncContext
  322. * @param overlap flag indicating that overlap filtering is used
  323. * @param pq integer part of picture quantizer
  324. * @param[in] n block index in the current MB
  325. * @param dc_val_ptr Pointer to DC predictor
  326. * @param dir_ptr Prediction direction for use in AC prediction
  327. */
  328. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  329. int16_t **dc_val_ptr, int *dir_ptr)
  330. {
  331. int a, b, c, wrap, pred, scale;
  332. int16_t *dc_val;
  333. static const uint16_t dcpred[32] = {
  334. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  335. 114, 102, 93, 85, 79, 73, 68, 64,
  336. 60, 57, 54, 51, 49, 47, 45, 43,
  337. 41, 39, 38, 37, 35, 34, 33
  338. };
  339. /* find prediction - wmv3_dc_scale always used here in fact */
  340. if (n < 4) scale = s->y_dc_scale;
  341. else scale = s->c_dc_scale;
  342. wrap = s->block_wrap[n];
  343. dc_val = s->dc_val[0] + s->block_index[n];
  344. /* B A
  345. * C X
  346. */
  347. c = dc_val[ - 1];
  348. b = dc_val[ - 1 - wrap];
  349. a = dc_val[ - wrap];
  350. if (pq < 9 || !overlap) {
  351. /* Set outer values */
  352. if (s->first_slice_line && (n != 2 && n != 3))
  353. b = a = dcpred[scale];
  354. if (s->mb_x == 0 && (n != 1 && n != 3))
  355. b = c = dcpred[scale];
  356. } else {
  357. /* Set outer values */
  358. if (s->first_slice_line && (n != 2 && n != 3))
  359. b = a = 0;
  360. if (s->mb_x == 0 && (n != 1 && n != 3))
  361. b = c = 0;
  362. }
  363. if (abs(a - b) <= abs(b - c)) {
  364. pred = c;
  365. *dir_ptr = 1; // left
  366. } else {
  367. pred = a;
  368. *dir_ptr = 0; // top
  369. }
  370. /* update predictor */
  371. *dc_val_ptr = &dc_val[0];
  372. return pred;
  373. }
  374. /** Get predicted DC value
  375. * prediction dir: left=0, top=1
  376. * @param s MpegEncContext
  377. * @param overlap flag indicating that overlap filtering is used
  378. * @param pq integer part of picture quantizer
  379. * @param[in] n block index in the current MB
  380. * @param a_avail flag indicating top block availability
  381. * @param c_avail flag indicating left block availability
  382. * @param dc_val_ptr Pointer to DC predictor
  383. * @param dir_ptr Prediction direction for use in AC prediction
  384. */
  385. static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  386. int a_avail, int c_avail,
  387. int16_t **dc_val_ptr, int *dir_ptr)
  388. {
  389. int a, b, c, wrap, pred;
  390. int16_t *dc_val;
  391. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  392. int q1, q2 = 0;
  393. int dqscale_index;
  394. wrap = s->block_wrap[n];
  395. dc_val = s->dc_val[0] + s->block_index[n];
  396. /* B A
  397. * C X
  398. */
  399. c = dc_val[ - 1];
  400. b = dc_val[ - 1 - wrap];
  401. a = dc_val[ - wrap];
  402. /* scale predictors if needed */
  403. q1 = s->current_picture.qscale_table[mb_pos];
  404. dqscale_index = s->y_dc_scale_table[q1] - 1;
  405. if (dqscale_index < 0)
  406. return 0;
  407. if (c_avail && (n != 1 && n != 3)) {
  408. q2 = s->current_picture.qscale_table[mb_pos - 1];
  409. if (q2 && q2 != q1)
  410. c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  411. }
  412. if (a_avail && (n != 2 && n != 3)) {
  413. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  414. if (q2 && q2 != q1)
  415. a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  416. }
  417. if (a_avail && c_avail && (n != 3)) {
  418. int off = mb_pos;
  419. if (n != 1)
  420. off--;
  421. if (n != 2)
  422. off -= s->mb_stride;
  423. q2 = s->current_picture.qscale_table[off];
  424. if (q2 && q2 != q1)
  425. b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  426. }
  427. if (a_avail && c_avail) {
  428. if (abs(a - b) <= abs(b - c)) {
  429. pred = c;
  430. *dir_ptr = 1; // left
  431. } else {
  432. pred = a;
  433. *dir_ptr = 0; // top
  434. }
  435. } else if (a_avail) {
  436. pred = a;
  437. *dir_ptr = 0; // top
  438. } else if (c_avail) {
  439. pred = c;
  440. *dir_ptr = 1; // left
  441. } else {
  442. pred = 0;
  443. *dir_ptr = 1; // left
  444. }
  445. /* update predictor */
  446. *dc_val_ptr = &dc_val[0];
  447. return pred;
  448. }
  449. /** @} */ // Block group
  450. /**
  451. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  452. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  453. * @{
  454. */
  455. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  456. uint8_t **coded_block_ptr)
  457. {
  458. int xy, wrap, pred, a, b, c;
  459. xy = s->block_index[n];
  460. wrap = s->b8_stride;
  461. /* B C
  462. * A X
  463. */
  464. a = s->coded_block[xy - 1 ];
  465. b = s->coded_block[xy - 1 - wrap];
  466. c = s->coded_block[xy - wrap];
  467. if (b == c) {
  468. pred = a;
  469. } else {
  470. pred = c;
  471. }
  472. /* store value */
  473. *coded_block_ptr = &s->coded_block[xy];
  474. return pred;
  475. }
  476. /**
  477. * Decode one AC coefficient
  478. * @param v The VC1 context
  479. * @param last Last coefficient
  480. * @param skip How much zero coefficients to skip
  481. * @param value Decoded AC coefficient value
  482. * @param codingset set of VLC to decode data
  483. * @see 8.1.3.4
  484. */
  485. static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  486. int *value, int codingset)
  487. {
  488. GetBitContext *gb = &v->s.gb;
  489. int index, escape, run = 0, level = 0, lst = 0;
  490. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  491. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  492. run = vc1_index_decode_table[codingset][index][0];
  493. level = vc1_index_decode_table[codingset][index][1];
  494. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  495. if (get_bits1(gb))
  496. level = -level;
  497. } else {
  498. escape = decode210(gb);
  499. if (escape != 2) {
  500. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  501. run = vc1_index_decode_table[codingset][index][0];
  502. level = vc1_index_decode_table[codingset][index][1];
  503. lst = index >= vc1_last_decode_table[codingset];
  504. if (escape == 0) {
  505. if (lst)
  506. level += vc1_last_delta_level_table[codingset][run];
  507. else
  508. level += vc1_delta_level_table[codingset][run];
  509. } else {
  510. if (lst)
  511. run += vc1_last_delta_run_table[codingset][level] + 1;
  512. else
  513. run += vc1_delta_run_table[codingset][level] + 1;
  514. }
  515. if (get_bits1(gb))
  516. level = -level;
  517. } else {
  518. int sign;
  519. lst = get_bits1(gb);
  520. if (v->s.esc3_level_length == 0) {
  521. if (v->pq < 8 || v->dquantfrm) { // table 59
  522. v->s.esc3_level_length = get_bits(gb, 3);
  523. if (!v->s.esc3_level_length)
  524. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  525. } else { // table 60
  526. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  527. }
  528. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  529. }
  530. run = get_bits(gb, v->s.esc3_run_length);
  531. sign = get_bits1(gb);
  532. level = get_bits(gb, v->s.esc3_level_length);
  533. if (sign)
  534. level = -level;
  535. }
  536. }
  537. *last = lst;
  538. *skip = run;
  539. *value = level;
  540. }
  541. /** Decode intra block in intra frames - should be faster than decode_intra_block
  542. * @param v VC1Context
  543. * @param block block to decode
  544. * @param[in] n subblock index
  545. * @param coded are AC coeffs present or not
  546. * @param codingset set of VLC to decode data
  547. */
  548. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  549. int coded, int codingset)
  550. {
  551. GetBitContext *gb = &v->s.gb;
  552. MpegEncContext *s = &v->s;
  553. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  554. int i;
  555. int16_t *dc_val;
  556. int16_t *ac_val, *ac_val2;
  557. int dcdiff;
  558. /* Get DC differential */
  559. if (n < 4) {
  560. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  561. } else {
  562. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  563. }
  564. if (dcdiff < 0) {
  565. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  566. return -1;
  567. }
  568. if (dcdiff) {
  569. if (dcdiff == 119 /* ESC index value */) {
  570. /* TODO: Optimize */
  571. if (v->pq == 1) dcdiff = get_bits(gb, 10);
  572. else if (v->pq == 2) dcdiff = get_bits(gb, 9);
  573. else dcdiff = get_bits(gb, 8);
  574. } else {
  575. if (v->pq == 1)
  576. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  577. else if (v->pq == 2)
  578. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  579. }
  580. if (get_bits1(gb))
  581. dcdiff = -dcdiff;
  582. }
  583. /* Prediction */
  584. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  585. *dc_val = dcdiff;
  586. /* Store the quantized DC coeff, used for prediction */
  587. if (n < 4) {
  588. block[0] = dcdiff * s->y_dc_scale;
  589. } else {
  590. block[0] = dcdiff * s->c_dc_scale;
  591. }
  592. /* Skip ? */
  593. if (!coded) {
  594. goto not_coded;
  595. }
  596. // AC Decoding
  597. i = 1;
  598. {
  599. int last = 0, skip, value;
  600. const uint8_t *zz_table;
  601. int scale;
  602. int k;
  603. scale = v->pq * 2 + v->halfpq;
  604. if (v->s.ac_pred) {
  605. if (!dc_pred_dir)
  606. zz_table = v->zz_8x8[2];
  607. else
  608. zz_table = v->zz_8x8[3];
  609. } else
  610. zz_table = v->zz_8x8[1];
  611. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  612. ac_val2 = ac_val;
  613. if (dc_pred_dir) // left
  614. ac_val -= 16;
  615. else // top
  616. ac_val -= 16 * s->block_wrap[n];
  617. while (!last) {
  618. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  619. i += skip;
  620. if (i > 63)
  621. break;
  622. block[zz_table[i++]] = value;
  623. }
  624. /* apply AC prediction if needed */
  625. if (s->ac_pred) {
  626. if (dc_pred_dir) { // left
  627. for (k = 1; k < 8; k++)
  628. block[k << v->left_blk_sh] += ac_val[k];
  629. } else { // top
  630. for (k = 1; k < 8; k++)
  631. block[k << v->top_blk_sh] += ac_val[k + 8];
  632. }
  633. }
  634. /* save AC coeffs for further prediction */
  635. for (k = 1; k < 8; k++) {
  636. ac_val2[k] = block[k << v->left_blk_sh];
  637. ac_val2[k + 8] = block[k << v->top_blk_sh];
  638. }
  639. /* scale AC coeffs */
  640. for (k = 1; k < 64; k++)
  641. if (block[k]) {
  642. block[k] *= scale;
  643. if (!v->pquantizer)
  644. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  645. }
  646. if (s->ac_pred) i = 63;
  647. }
  648. not_coded:
  649. if (!coded) {
  650. int k, scale;
  651. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  652. ac_val2 = ac_val;
  653. i = 0;
  654. scale = v->pq * 2 + v->halfpq;
  655. memset(ac_val2, 0, 16 * 2);
  656. if (dc_pred_dir) { // left
  657. ac_val -= 16;
  658. if (s->ac_pred)
  659. memcpy(ac_val2, ac_val, 8 * 2);
  660. } else { // top
  661. ac_val -= 16 * s->block_wrap[n];
  662. if (s->ac_pred)
  663. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  664. }
  665. /* apply AC prediction if needed */
  666. if (s->ac_pred) {
  667. if (dc_pred_dir) { //left
  668. for (k = 1; k < 8; k++) {
  669. block[k << v->left_blk_sh] = ac_val[k] * scale;
  670. if (!v->pquantizer && block[k << v->left_blk_sh])
  671. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
  672. }
  673. } else { // top
  674. for (k = 1; k < 8; k++) {
  675. block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
  676. if (!v->pquantizer && block[k << v->top_blk_sh])
  677. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
  678. }
  679. }
  680. i = 63;
  681. }
  682. }
  683. s->block_last_index[n] = i;
  684. return 0;
  685. }
  686. /** Decode intra block in intra frames - should be faster than decode_intra_block
  687. * @param v VC1Context
  688. * @param block block to decode
  689. * @param[in] n subblock number
  690. * @param coded are AC coeffs present or not
  691. * @param codingset set of VLC to decode data
  692. * @param mquant quantizer value for this macroblock
  693. */
  694. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  695. int coded, int codingset, int mquant)
  696. {
  697. GetBitContext *gb = &v->s.gb;
  698. MpegEncContext *s = &v->s;
  699. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  700. int i;
  701. int16_t *dc_val;
  702. int16_t *ac_val, *ac_val2;
  703. int dcdiff;
  704. int a_avail = v->a_avail, c_avail = v->c_avail;
  705. int use_pred = s->ac_pred;
  706. int scale;
  707. int q1, q2 = 0;
  708. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  709. /* Get DC differential */
  710. if (n < 4) {
  711. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  712. } else {
  713. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  714. }
  715. if (dcdiff < 0) {
  716. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  717. return -1;
  718. }
  719. if (dcdiff) {
  720. if (dcdiff == 119 /* ESC index value */) {
  721. /* TODO: Optimize */
  722. if (mquant == 1) dcdiff = get_bits(gb, 10);
  723. else if (mquant == 2) dcdiff = get_bits(gb, 9);
  724. else dcdiff = get_bits(gb, 8);
  725. } else {
  726. if (mquant == 1)
  727. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  728. else if (mquant == 2)
  729. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  730. }
  731. if (get_bits1(gb))
  732. dcdiff = -dcdiff;
  733. }
  734. /* Prediction */
  735. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  736. *dc_val = dcdiff;
  737. /* Store the quantized DC coeff, used for prediction */
  738. if (n < 4) {
  739. block[0] = dcdiff * s->y_dc_scale;
  740. } else {
  741. block[0] = dcdiff * s->c_dc_scale;
  742. }
  743. //AC Decoding
  744. i = 1;
  745. /* check if AC is needed at all */
  746. if (!a_avail && !c_avail)
  747. use_pred = 0;
  748. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  749. ac_val2 = ac_val;
  750. scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
  751. if (dc_pred_dir) // left
  752. ac_val -= 16;
  753. else // top
  754. ac_val -= 16 * s->block_wrap[n];
  755. q1 = s->current_picture.qscale_table[mb_pos];
  756. if (dc_pred_dir && c_avail && mb_pos)
  757. q2 = s->current_picture.qscale_table[mb_pos - 1];
  758. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  759. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  760. if (dc_pred_dir && n == 1)
  761. q2 = q1;
  762. if (!dc_pred_dir && n == 2)
  763. q2 = q1;
  764. if (n == 3)
  765. q2 = q1;
  766. if (coded) {
  767. int last = 0, skip, value;
  768. const uint8_t *zz_table;
  769. int k;
  770. if (v->s.ac_pred) {
  771. if (!use_pred && v->fcm == ILACE_FRAME) {
  772. zz_table = v->zzi_8x8;
  773. } else {
  774. if (!dc_pred_dir) // top
  775. zz_table = v->zz_8x8[2];
  776. else // left
  777. zz_table = v->zz_8x8[3];
  778. }
  779. } else {
  780. if (v->fcm != ILACE_FRAME)
  781. zz_table = v->zz_8x8[1];
  782. else
  783. zz_table = v->zzi_8x8;
  784. }
  785. while (!last) {
  786. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  787. i += skip;
  788. if (i > 63)
  789. break;
  790. block[zz_table[i++]] = value;
  791. }
  792. /* apply AC prediction if needed */
  793. if (use_pred) {
  794. /* scale predictors if needed*/
  795. if (q2 && q1 != q2) {
  796. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  797. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  798. if (q1 < 1)
  799. return AVERROR_INVALIDDATA;
  800. if (dc_pred_dir) { // left
  801. for (k = 1; k < 8; k++)
  802. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  803. } else { // top
  804. for (k = 1; k < 8; k++)
  805. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  806. }
  807. } else {
  808. if (dc_pred_dir) { //left
  809. for (k = 1; k < 8; k++)
  810. block[k << v->left_blk_sh] += ac_val[k];
  811. } else { //top
  812. for (k = 1; k < 8; k++)
  813. block[k << v->top_blk_sh] += ac_val[k + 8];
  814. }
  815. }
  816. }
  817. /* save AC coeffs for further prediction */
  818. for (k = 1; k < 8; k++) {
  819. ac_val2[k ] = block[k << v->left_blk_sh];
  820. ac_val2[k + 8] = block[k << v->top_blk_sh];
  821. }
  822. /* scale AC coeffs */
  823. for (k = 1; k < 64; k++)
  824. if (block[k]) {
  825. block[k] *= scale;
  826. if (!v->pquantizer)
  827. block[k] += (block[k] < 0) ? -mquant : mquant;
  828. }
  829. if (use_pred) i = 63;
  830. } else { // no AC coeffs
  831. int k;
  832. memset(ac_val2, 0, 16 * 2);
  833. if (dc_pred_dir) { // left
  834. if (use_pred) {
  835. memcpy(ac_val2, ac_val, 8 * 2);
  836. if (q2 && q1 != q2) {
  837. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  838. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  839. if (q1 < 1)
  840. return AVERROR_INVALIDDATA;
  841. for (k = 1; k < 8; k++)
  842. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  843. }
  844. }
  845. } else { // top
  846. if (use_pred) {
  847. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  848. if (q2 && q1 != q2) {
  849. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  850. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  851. if (q1 < 1)
  852. return AVERROR_INVALIDDATA;
  853. for (k = 1; k < 8; k++)
  854. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  855. }
  856. }
  857. }
  858. /* apply AC prediction if needed */
  859. if (use_pred) {
  860. if (dc_pred_dir) { // left
  861. for (k = 1; k < 8; k++) {
  862. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  863. if (!v->pquantizer && block[k << v->left_blk_sh])
  864. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  865. }
  866. } else { // top
  867. for (k = 1; k < 8; k++) {
  868. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  869. if (!v->pquantizer && block[k << v->top_blk_sh])
  870. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  871. }
  872. }
  873. i = 63;
  874. }
  875. }
  876. s->block_last_index[n] = i;
  877. return 0;
  878. }
  879. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  880. * @param v VC1Context
  881. * @param block block to decode
  882. * @param[in] n subblock index
  883. * @param coded are AC coeffs present or not
  884. * @param mquant block quantizer
  885. * @param codingset set of VLC to decode data
  886. */
  887. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  888. int coded, int mquant, int codingset)
  889. {
  890. GetBitContext *gb = &v->s.gb;
  891. MpegEncContext *s = &v->s;
  892. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  893. int i;
  894. int16_t *dc_val;
  895. int16_t *ac_val, *ac_val2;
  896. int dcdiff;
  897. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  898. int a_avail = v->a_avail, c_avail = v->c_avail;
  899. int use_pred = s->ac_pred;
  900. int scale;
  901. int q1, q2 = 0;
  902. s->bdsp.clear_block(block);
  903. /* XXX: Guard against dumb values of mquant */
  904. mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
  905. /* Set DC scale - y and c use the same */
  906. s->y_dc_scale = s->y_dc_scale_table[mquant];
  907. s->c_dc_scale = s->c_dc_scale_table[mquant];
  908. /* Get DC differential */
  909. if (n < 4) {
  910. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  911. } else {
  912. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  913. }
  914. if (dcdiff < 0) {
  915. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  916. return -1;
  917. }
  918. if (dcdiff) {
  919. if (dcdiff == 119 /* ESC index value */) {
  920. /* TODO: Optimize */
  921. if (mquant == 1) dcdiff = get_bits(gb, 10);
  922. else if (mquant == 2) dcdiff = get_bits(gb, 9);
  923. else dcdiff = get_bits(gb, 8);
  924. } else {
  925. if (mquant == 1)
  926. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  927. else if (mquant == 2)
  928. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  929. }
  930. if (get_bits1(gb))
  931. dcdiff = -dcdiff;
  932. }
  933. /* Prediction */
  934. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  935. *dc_val = dcdiff;
  936. /* Store the quantized DC coeff, used for prediction */
  937. if (n < 4) {
  938. block[0] = dcdiff * s->y_dc_scale;
  939. } else {
  940. block[0] = dcdiff * s->c_dc_scale;
  941. }
  942. //AC Decoding
  943. i = 1;
  944. /* check if AC is needed at all and adjust direction if needed */
  945. if (!a_avail) dc_pred_dir = 1;
  946. if (!c_avail) dc_pred_dir = 0;
  947. if (!a_avail && !c_avail) use_pred = 0;
  948. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  949. ac_val2 = ac_val;
  950. scale = mquant * 2 + v->halfpq;
  951. if (dc_pred_dir) //left
  952. ac_val -= 16;
  953. else //top
  954. ac_val -= 16 * s->block_wrap[n];
  955. q1 = s->current_picture.qscale_table[mb_pos];
  956. if (dc_pred_dir && c_avail && mb_pos)
  957. q2 = s->current_picture.qscale_table[mb_pos - 1];
  958. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  959. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  960. if (dc_pred_dir && n == 1)
  961. q2 = q1;
  962. if (!dc_pred_dir && n == 2)
  963. q2 = q1;
  964. if (n == 3) q2 = q1;
  965. if (coded) {
  966. int last = 0, skip, value;
  967. int k;
  968. while (!last) {
  969. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  970. i += skip;
  971. if (i > 63)
  972. break;
  973. if (v->fcm == PROGRESSIVE)
  974. block[v->zz_8x8[0][i++]] = value;
  975. else {
  976. if (use_pred && (v->fcm == ILACE_FRAME)) {
  977. if (!dc_pred_dir) // top
  978. block[v->zz_8x8[2][i++]] = value;
  979. else // left
  980. block[v->zz_8x8[3][i++]] = value;
  981. } else {
  982. block[v->zzi_8x8[i++]] = value;
  983. }
  984. }
  985. }
  986. /* apply AC prediction if needed */
  987. if (use_pred) {
  988. /* scale predictors if needed*/
  989. if (q2 && q1 != q2) {
  990. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  991. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  992. if (q1 < 1)
  993. return AVERROR_INVALIDDATA;
  994. if (dc_pred_dir) { // left
  995. for (k = 1; k < 8; k++)
  996. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  997. } else { //top
  998. for (k = 1; k < 8; k++)
  999. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  1000. }
  1001. } else {
  1002. if (dc_pred_dir) { // left
  1003. for (k = 1; k < 8; k++)
  1004. block[k << v->left_blk_sh] += ac_val[k];
  1005. } else { // top
  1006. for (k = 1; k < 8; k++)
  1007. block[k << v->top_blk_sh] += ac_val[k + 8];
  1008. }
  1009. }
  1010. }
  1011. /* save AC coeffs for further prediction */
  1012. for (k = 1; k < 8; k++) {
  1013. ac_val2[k ] = block[k << v->left_blk_sh];
  1014. ac_val2[k + 8] = block[k << v->top_blk_sh];
  1015. }
  1016. /* scale AC coeffs */
  1017. for (k = 1; k < 64; k++)
  1018. if (block[k]) {
  1019. block[k] *= scale;
  1020. if (!v->pquantizer)
  1021. block[k] += (block[k] < 0) ? -mquant : mquant;
  1022. }
  1023. if (use_pred) i = 63;
  1024. } else { // no AC coeffs
  1025. int k;
  1026. memset(ac_val2, 0, 16 * 2);
  1027. if (dc_pred_dir) { // left
  1028. if (use_pred) {
  1029. memcpy(ac_val2, ac_val, 8 * 2);
  1030. if (q2 && q1 != q2) {
  1031. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  1032. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  1033. if (q1 < 1)
  1034. return AVERROR_INVALIDDATA;
  1035. for (k = 1; k < 8; k++)
  1036. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  1037. }
  1038. }
  1039. } else { // top
  1040. if (use_pred) {
  1041. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  1042. if (q2 && q1 != q2) {
  1043. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  1044. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  1045. if (q1 < 1)
  1046. return AVERROR_INVALIDDATA;
  1047. for (k = 1; k < 8; k++)
  1048. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  1049. }
  1050. }
  1051. }
  1052. /* apply AC prediction if needed */
  1053. if (use_pred) {
  1054. if (dc_pred_dir) { // left
  1055. for (k = 1; k < 8; k++) {
  1056. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  1057. if (!v->pquantizer && block[k << v->left_blk_sh])
  1058. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  1059. }
  1060. } else { // top
  1061. for (k = 1; k < 8; k++) {
  1062. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  1063. if (!v->pquantizer && block[k << v->top_blk_sh])
  1064. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  1065. }
  1066. }
  1067. i = 63;
  1068. }
  1069. }
  1070. s->block_last_index[n] = i;
  1071. return 0;
  1072. }
  1073. /** Decode P block
  1074. */
  1075. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  1076. int mquant, int ttmb, int first_block,
  1077. uint8_t *dst, int linesize, int skip_block,
  1078. int *ttmb_out)
  1079. {
  1080. MpegEncContext *s = &v->s;
  1081. GetBitContext *gb = &s->gb;
  1082. int i, j;
  1083. int subblkpat = 0;
  1084. int scale, off, idx, last, skip, value;
  1085. int ttblk = ttmb & 7;
  1086. int pat = 0;
  1087. s->bdsp.clear_block(block);
  1088. if (ttmb == -1) {
  1089. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  1090. }
  1091. if (ttblk == TT_4X4) {
  1092. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  1093. }
  1094. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  1095. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  1096. || (!v->res_rtm_flag && !first_block))) {
  1097. subblkpat = decode012(gb);
  1098. if (subblkpat)
  1099. subblkpat ^= 3; // swap decoded pattern bits
  1100. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  1101. ttblk = TT_8X4;
  1102. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  1103. ttblk = TT_4X8;
  1104. }
  1105. scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
  1106. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  1107. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  1108. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  1109. ttblk = TT_8X4;
  1110. }
  1111. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  1112. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  1113. ttblk = TT_4X8;
  1114. }
  1115. switch (ttblk) {
  1116. case TT_8X8:
  1117. pat = 0xF;
  1118. i = 0;
  1119. last = 0;
  1120. while (!last) {
  1121. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1122. i += skip;
  1123. if (i > 63)
  1124. break;
  1125. if (!v->fcm)
  1126. idx = v->zz_8x8[0][i++];
  1127. else
  1128. idx = v->zzi_8x8[i++];
  1129. block[idx] = value * scale;
  1130. if (!v->pquantizer)
  1131. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1132. }
  1133. if (!skip_block) {
  1134. if (i == 1)
  1135. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  1136. else {
  1137. v->vc1dsp.vc1_inv_trans_8x8(block);
  1138. s->idsp.add_pixels_clamped(block, dst, linesize);
  1139. }
  1140. }
  1141. break;
  1142. case TT_4X4:
  1143. pat = ~subblkpat & 0xF;
  1144. for (j = 0; j < 4; j++) {
  1145. last = subblkpat & (1 << (3 - j));
  1146. i = 0;
  1147. off = (j & 1) * 4 + (j & 2) * 16;
  1148. while (!last) {
  1149. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1150. i += skip;
  1151. if (i > 15)
  1152. break;
  1153. if (!v->fcm)
  1154. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  1155. else
  1156. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  1157. block[idx + off] = value * scale;
  1158. if (!v->pquantizer)
  1159. block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
  1160. }
  1161. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  1162. if (i == 1)
  1163. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1164. else
  1165. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1166. }
  1167. }
  1168. break;
  1169. case TT_8X4:
  1170. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  1171. for (j = 0; j < 2; j++) {
  1172. last = subblkpat & (1 << (1 - j));
  1173. i = 0;
  1174. off = j * 32;
  1175. while (!last) {
  1176. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1177. i += skip;
  1178. if (i > 31)
  1179. break;
  1180. if (!v->fcm)
  1181. idx = v->zz_8x4[i++] + off;
  1182. else
  1183. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  1184. block[idx] = value * scale;
  1185. if (!v->pquantizer)
  1186. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1187. }
  1188. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1189. if (i == 1)
  1190. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  1191. else
  1192. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  1193. }
  1194. }
  1195. break;
  1196. case TT_4X8:
  1197. pat = ~(subblkpat * 5) & 0xF;
  1198. for (j = 0; j < 2; j++) {
  1199. last = subblkpat & (1 << (1 - j));
  1200. i = 0;
  1201. off = j * 4;
  1202. while (!last) {
  1203. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1204. i += skip;
  1205. if (i > 31)
  1206. break;
  1207. if (!v->fcm)
  1208. idx = v->zz_4x8[i++] + off;
  1209. else
  1210. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  1211. block[idx] = value * scale;
  1212. if (!v->pquantizer)
  1213. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  1214. }
  1215. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1216. if (i == 1)
  1217. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  1218. else
  1219. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  1220. }
  1221. }
  1222. break;
  1223. }
  1224. if (ttmb_out)
  1225. *ttmb_out |= ttblk << (n * 4);
  1226. return pat;
  1227. }
  1228. /** @} */ // Macroblock group
  1229. static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
  1230. static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
  1231. /** Decode one P-frame MB
  1232. */
  1233. static int vc1_decode_p_mb(VC1Context *v)
  1234. {
  1235. MpegEncContext *s = &v->s;
  1236. GetBitContext *gb = &s->gb;
  1237. int i, j;
  1238. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1239. int cbp; /* cbp decoding stuff */
  1240. int mqdiff, mquant; /* MB quantization */
  1241. int ttmb = v->ttfrm; /* MB Transform type */
  1242. int mb_has_coeffs = 1; /* last_flag */
  1243. int dmv_x, dmv_y; /* Differential MV components */
  1244. int index, index1; /* LUT indexes */
  1245. int val, sign; /* temp values */
  1246. int first_block = 1;
  1247. int dst_idx, off;
  1248. int skipped, fourmv;
  1249. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  1250. mquant = v->pq; /* lossy initialization */
  1251. if (v->mv_type_is_raw)
  1252. fourmv = get_bits1(gb);
  1253. else
  1254. fourmv = v->mv_type_mb_plane[mb_pos];
  1255. if (v->skip_is_raw)
  1256. skipped = get_bits1(gb);
  1257. else
  1258. skipped = v->s.mbskip_table[mb_pos];
  1259. if (!fourmv) { /* 1MV mode */
  1260. if (!skipped) {
  1261. GET_MVDATA(dmv_x, dmv_y);
  1262. if (s->mb_intra) {
  1263. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1264. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1265. }
  1266. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  1267. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1268. /* FIXME Set DC val for inter block ? */
  1269. if (s->mb_intra && !mb_has_coeffs) {
  1270. GET_MQUANT();
  1271. s->ac_pred = get_bits1(gb);
  1272. cbp = 0;
  1273. } else if (mb_has_coeffs) {
  1274. if (s->mb_intra)
  1275. s->ac_pred = get_bits1(gb);
  1276. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1277. GET_MQUANT();
  1278. } else {
  1279. mquant = v->pq;
  1280. cbp = 0;
  1281. }
  1282. s->current_picture.qscale_table[mb_pos] = mquant;
  1283. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1284. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  1285. VC1_TTMB_VLC_BITS, 2);
  1286. if (!s->mb_intra) ff_vc1_mc_1mv(v, 0);
  1287. dst_idx = 0;
  1288. for (i = 0; i < 6; i++) {
  1289. s->dc_val[0][s->block_index[i]] = 0;
  1290. dst_idx += i >> 2;
  1291. val = ((cbp >> (5 - i)) & 1);
  1292. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1293. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1294. if (s->mb_intra) {
  1295. /* check if prediction blocks A and C are available */
  1296. v->a_avail = v->c_avail = 0;
  1297. if (i == 2 || i == 3 || !s->first_slice_line)
  1298. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1299. if (i == 1 || i == 3 || s->mb_x)
  1300. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1301. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1302. (i & 4) ? v->codingset2 : v->codingset);
  1303. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1304. continue;
  1305. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1306. if (v->rangeredfrm)
  1307. for (j = 0; j < 64; j++)
  1308. s->block[i][j] <<= 1;
  1309. s->idsp.put_signed_pixels_clamped(s->block[i],
  1310. s->dest[dst_idx] + off,
  1311. i & 4 ? s->uvlinesize
  1312. : s->linesize);
  1313. if (v->pq >= 9 && v->overlap) {
  1314. if (v->c_avail)
  1315. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1316. if (v->a_avail)
  1317. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1318. }
  1319. block_cbp |= 0xF << (i << 2);
  1320. block_intra |= 1 << i;
  1321. } else if (val) {
  1322. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
  1323. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  1324. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1325. block_cbp |= pat << (i << 2);
  1326. if (!v->ttmbf && ttmb < 8)
  1327. ttmb = -1;
  1328. first_block = 0;
  1329. }
  1330. }
  1331. } else { // skipped
  1332. s->mb_intra = 0;
  1333. for (i = 0; i < 6; i++) {
  1334. v->mb_type[0][s->block_index[i]] = 0;
  1335. s->dc_val[0][s->block_index[i]] = 0;
  1336. }
  1337. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1338. s->current_picture.qscale_table[mb_pos] = 0;
  1339. ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1340. ff_vc1_mc_1mv(v, 0);
  1341. }
  1342. } else { // 4MV mode
  1343. if (!skipped /* unskipped MB */) {
  1344. int intra_count = 0, coded_inter = 0;
  1345. int is_intra[6], is_coded[6];
  1346. /* Get CBPCY */
  1347. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1348. for (i = 0; i < 6; i++) {
  1349. val = ((cbp >> (5 - i)) & 1);
  1350. s->dc_val[0][s->block_index[i]] = 0;
  1351. s->mb_intra = 0;
  1352. if (i < 4) {
  1353. dmv_x = dmv_y = 0;
  1354. s->mb_intra = 0;
  1355. mb_has_coeffs = 0;
  1356. if (val) {
  1357. GET_MVDATA(dmv_x, dmv_y);
  1358. }
  1359. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1360. if (!s->mb_intra)
  1361. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1362. intra_count += s->mb_intra;
  1363. is_intra[i] = s->mb_intra;
  1364. is_coded[i] = mb_has_coeffs;
  1365. }
  1366. if (i & 4) {
  1367. is_intra[i] = (intra_count >= 3);
  1368. is_coded[i] = val;
  1369. }
  1370. if (i == 4)
  1371. ff_vc1_mc_4mv_chroma(v, 0);
  1372. v->mb_type[0][s->block_index[i]] = is_intra[i];
  1373. if (!coded_inter)
  1374. coded_inter = !is_intra[i] & is_coded[i];
  1375. }
  1376. // if there are no coded blocks then don't do anything more
  1377. dst_idx = 0;
  1378. if (!intra_count && !coded_inter)
  1379. goto end;
  1380. GET_MQUANT();
  1381. s->current_picture.qscale_table[mb_pos] = mquant;
  1382. /* test if block is intra and has pred */
  1383. {
  1384. int intrapred = 0;
  1385. for (i = 0; i < 6; i++)
  1386. if (is_intra[i]) {
  1387. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  1388. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  1389. intrapred = 1;
  1390. break;
  1391. }
  1392. }
  1393. if (intrapred)
  1394. s->ac_pred = get_bits1(gb);
  1395. else
  1396. s->ac_pred = 0;
  1397. }
  1398. if (!v->ttmbf && coded_inter)
  1399. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1400. for (i = 0; i < 6; i++) {
  1401. dst_idx += i >> 2;
  1402. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1403. s->mb_intra = is_intra[i];
  1404. if (is_intra[i]) {
  1405. /* check if prediction blocks A and C are available */
  1406. v->a_avail = v->c_avail = 0;
  1407. if (i == 2 || i == 3 || !s->first_slice_line)
  1408. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1409. if (i == 1 || i == 3 || s->mb_x)
  1410. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1411. vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
  1412. (i & 4) ? v->codingset2 : v->codingset);
  1413. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1414. continue;
  1415. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1416. if (v->rangeredfrm)
  1417. for (j = 0; j < 64; j++)
  1418. s->block[i][j] <<= 1;
  1419. s->idsp.put_signed_pixels_clamped(s->block[i],
  1420. s->dest[dst_idx] + off,
  1421. (i & 4) ? s->uvlinesize
  1422. : s->linesize);
  1423. if (v->pq >= 9 && v->overlap) {
  1424. if (v->c_avail)
  1425. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1426. if (v->a_avail)
  1427. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  1428. }
  1429. block_cbp |= 0xF << (i << 2);
  1430. block_intra |= 1 << i;
  1431. } else if (is_coded[i]) {
  1432. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1433. first_block, s->dest[dst_idx] + off,
  1434. (i & 4) ? s->uvlinesize : s->linesize,
  1435. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1436. &block_tt);
  1437. block_cbp |= pat << (i << 2);
  1438. if (!v->ttmbf && ttmb < 8)
  1439. ttmb = -1;
  1440. first_block = 0;
  1441. }
  1442. }
  1443. } else { // skipped MB
  1444. s->mb_intra = 0;
  1445. s->current_picture.qscale_table[mb_pos] = 0;
  1446. for (i = 0; i < 6; i++) {
  1447. v->mb_type[0][s->block_index[i]] = 0;
  1448. s->dc_val[0][s->block_index[i]] = 0;
  1449. }
  1450. for (i = 0; i < 4; i++) {
  1451. ff_vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1452. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1453. }
  1454. ff_vc1_mc_4mv_chroma(v, 0);
  1455. s->current_picture.qscale_table[mb_pos] = 0;
  1456. }
  1457. }
  1458. end:
  1459. v->cbp[s->mb_x] = block_cbp;
  1460. v->ttblk[s->mb_x] = block_tt;
  1461. v->is_intra[s->mb_x] = block_intra;
  1462. return 0;
  1463. }
  1464. /* Decode one macroblock in an interlaced frame p picture */
  1465. static int vc1_decode_p_mb_intfr(VC1Context *v)
  1466. {
  1467. MpegEncContext *s = &v->s;
  1468. GetBitContext *gb = &s->gb;
  1469. int i;
  1470. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1471. int cbp = 0; /* cbp decoding stuff */
  1472. int mqdiff, mquant; /* MB quantization */
  1473. int ttmb = v->ttfrm; /* MB Transform type */
  1474. int mb_has_coeffs = 1; /* last_flag */
  1475. int dmv_x, dmv_y; /* Differential MV components */
  1476. int val; /* temp value */
  1477. int first_block = 1;
  1478. int dst_idx, off;
  1479. int skipped, fourmv = 0, twomv = 0;
  1480. int block_cbp = 0, pat, block_tt = 0;
  1481. int idx_mbmode = 0, mvbp;
  1482. int stride_y, fieldtx;
  1483. mquant = v->pq; /* Lossy initialization */
  1484. if (v->skip_is_raw)
  1485. skipped = get_bits1(gb);
  1486. else
  1487. skipped = v->s.mbskip_table[mb_pos];
  1488. if (!skipped) {
  1489. if (v->fourmvswitch)
  1490. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  1491. else
  1492. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  1493. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  1494. /* store the motion vector type in a flag (useful later) */
  1495. case MV_PMODE_INTFR_4MV:
  1496. fourmv = 1;
  1497. v->blk_mv_type[s->block_index[0]] = 0;
  1498. v->blk_mv_type[s->block_index[1]] = 0;
  1499. v->blk_mv_type[s->block_index[2]] = 0;
  1500. v->blk_mv_type[s->block_index[3]] = 0;
  1501. break;
  1502. case MV_PMODE_INTFR_4MV_FIELD:
  1503. fourmv = 1;
  1504. v->blk_mv_type[s->block_index[0]] = 1;
  1505. v->blk_mv_type[s->block_index[1]] = 1;
  1506. v->blk_mv_type[s->block_index[2]] = 1;
  1507. v->blk_mv_type[s->block_index[3]] = 1;
  1508. break;
  1509. case MV_PMODE_INTFR_2MV_FIELD:
  1510. twomv = 1;
  1511. v->blk_mv_type[s->block_index[0]] = 1;
  1512. v->blk_mv_type[s->block_index[1]] = 1;
  1513. v->blk_mv_type[s->block_index[2]] = 1;
  1514. v->blk_mv_type[s->block_index[3]] = 1;
  1515. break;
  1516. case MV_PMODE_INTFR_1MV:
  1517. v->blk_mv_type[s->block_index[0]] = 0;
  1518. v->blk_mv_type[s->block_index[1]] = 0;
  1519. v->blk_mv_type[s->block_index[2]] = 0;
  1520. v->blk_mv_type[s->block_index[3]] = 0;
  1521. break;
  1522. }
  1523. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  1524. for (i = 0; i < 4; i++) {
  1525. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  1526. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  1527. }
  1528. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1529. s->mb_intra = 1;
  1530. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  1531. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  1532. mb_has_coeffs = get_bits1(gb);
  1533. if (mb_has_coeffs)
  1534. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1535. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1536. GET_MQUANT();
  1537. s->current_picture.qscale_table[mb_pos] = mquant;
  1538. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1539. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1540. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1541. dst_idx = 0;
  1542. for (i = 0; i < 6; i++) {
  1543. v->a_avail = v->c_avail = 0;
  1544. v->mb_type[0][s->block_index[i]] = 1;
  1545. s->dc_val[0][s->block_index[i]] = 0;
  1546. dst_idx += i >> 2;
  1547. val = ((cbp >> (5 - i)) & 1);
  1548. if (i == 2 || i == 3 || !s->first_slice_line)
  1549. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1550. if (i == 1 || i == 3 || s->mb_x)
  1551. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1552. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1553. (i & 4) ? v->codingset2 : v->codingset);
  1554. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1555. continue;
  1556. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1557. if (i < 4) {
  1558. stride_y = s->linesize << fieldtx;
  1559. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  1560. } else {
  1561. stride_y = s->uvlinesize;
  1562. off = 0;
  1563. }
  1564. s->idsp.put_signed_pixels_clamped(s->block[i],
  1565. s->dest[dst_idx] + off,
  1566. stride_y);
  1567. //TODO: loop filter
  1568. }
  1569. } else { // inter MB
  1570. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  1571. if (mb_has_coeffs)
  1572. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1573. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  1574. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  1575. } else {
  1576. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  1577. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  1578. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1579. }
  1580. }
  1581. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1582. for (i = 0; i < 6; i++)
  1583. v->mb_type[0][s->block_index[i]] = 0;
  1584. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  1585. /* for all motion vector read MVDATA and motion compensate each block */
  1586. dst_idx = 0;
  1587. if (fourmv) {
  1588. mvbp = v->fourmvbp;
  1589. for (i = 0; i < 6; i++) {
  1590. if (i < 4) {
  1591. dmv_x = dmv_y = 0;
  1592. val = ((mvbp >> (3 - i)) & 1);
  1593. if (val) {
  1594. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1595. }
  1596. ff_vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  1597. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1598. } else if (i == 4) {
  1599. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1600. }
  1601. }
  1602. } else if (twomv) {
  1603. mvbp = v->twomvbp;
  1604. dmv_x = dmv_y = 0;
  1605. if (mvbp & 2) {
  1606. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1607. }
  1608. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1609. ff_vc1_mc_4mv_luma(v, 0, 0, 0);
  1610. ff_vc1_mc_4mv_luma(v, 1, 0, 0);
  1611. dmv_x = dmv_y = 0;
  1612. if (mvbp & 1) {
  1613. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1614. }
  1615. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1616. ff_vc1_mc_4mv_luma(v, 2, 0, 0);
  1617. ff_vc1_mc_4mv_luma(v, 3, 0, 0);
  1618. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1619. } else {
  1620. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  1621. dmv_x = dmv_y = 0;
  1622. if (mvbp) {
  1623. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1624. }
  1625. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1626. ff_vc1_mc_1mv(v, 0);
  1627. }
  1628. if (cbp)
  1629. GET_MQUANT(); // p. 227
  1630. s->current_picture.qscale_table[mb_pos] = mquant;
  1631. if (!v->ttmbf && cbp)
  1632. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1633. for (i = 0; i < 6; i++) {
  1634. s->dc_val[0][s->block_index[i]] = 0;
  1635. dst_idx += i >> 2;
  1636. val = ((cbp >> (5 - i)) & 1);
  1637. if (!fieldtx)
  1638. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1639. else
  1640. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  1641. if (val) {
  1642. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1643. first_block, s->dest[dst_idx] + off,
  1644. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  1645. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1646. block_cbp |= pat << (i << 2);
  1647. if (!v->ttmbf && ttmb < 8)
  1648. ttmb = -1;
  1649. first_block = 0;
  1650. }
  1651. }
  1652. }
  1653. } else { // skipped
  1654. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1655. for (i = 0; i < 6; i++) {
  1656. v->mb_type[0][s->block_index[i]] = 0;
  1657. s->dc_val[0][s->block_index[i]] = 0;
  1658. }
  1659. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1660. s->current_picture.qscale_table[mb_pos] = 0;
  1661. v->blk_mv_type[s->block_index[0]] = 0;
  1662. v->blk_mv_type[s->block_index[1]] = 0;
  1663. v->blk_mv_type[s->block_index[2]] = 0;
  1664. v->blk_mv_type[s->block_index[3]] = 0;
  1665. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1666. ff_vc1_mc_1mv(v, 0);
  1667. }
  1668. if (s->mb_x == s->mb_width - 1)
  1669. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
  1670. return 0;
  1671. }
  1672. static int vc1_decode_p_mb_intfi(VC1Context *v)
  1673. {
  1674. MpegEncContext *s = &v->s;
  1675. GetBitContext *gb = &s->gb;
  1676. int i;
  1677. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1678. int cbp = 0; /* cbp decoding stuff */
  1679. int mqdiff, mquant; /* MB quantization */
  1680. int ttmb = v->ttfrm; /* MB Transform type */
  1681. int mb_has_coeffs = 1; /* last_flag */
  1682. int dmv_x, dmv_y; /* Differential MV components */
  1683. int val; /* temp values */
  1684. int first_block = 1;
  1685. int dst_idx, off;
  1686. int pred_flag;
  1687. int block_cbp = 0, pat, block_tt = 0;
  1688. int idx_mbmode = 0;
  1689. mquant = v->pq; /* Lossy initialization */
  1690. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1691. if (idx_mbmode <= 1) { // intra MB
  1692. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1693. s->mb_intra = 1;
  1694. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  1695. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  1696. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1697. GET_MQUANT();
  1698. s->current_picture.qscale_table[mb_pos] = mquant;
  1699. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1700. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1701. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1702. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1703. mb_has_coeffs = idx_mbmode & 1;
  1704. if (mb_has_coeffs)
  1705. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1706. dst_idx = 0;
  1707. for (i = 0; i < 6; i++) {
  1708. v->a_avail = v->c_avail = 0;
  1709. v->mb_type[0][s->block_index[i]] = 1;
  1710. s->dc_val[0][s->block_index[i]] = 0;
  1711. dst_idx += i >> 2;
  1712. val = ((cbp >> (5 - i)) & 1);
  1713. if (i == 2 || i == 3 || !s->first_slice_line)
  1714. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1715. if (i == 1 || i == 3 || s->mb_x)
  1716. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1717. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1718. (i & 4) ? v->codingset2 : v->codingset);
  1719. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1720. continue;
  1721. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1722. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1723. s->idsp.put_signed_pixels_clamped(s->block[i],
  1724. s->dest[dst_idx] + off,
  1725. (i & 4) ? s->uvlinesize
  1726. : s->linesize);
  1727. // TODO: loop filter
  1728. }
  1729. } else {
  1730. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1731. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1732. for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
  1733. if (idx_mbmode <= 5) { // 1-MV
  1734. dmv_x = dmv_y = pred_flag = 0;
  1735. if (idx_mbmode & 1) {
  1736. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1737. }
  1738. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1739. ff_vc1_mc_1mv(v, 0);
  1740. mb_has_coeffs = !(idx_mbmode & 2);
  1741. } else { // 4-MV
  1742. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1743. for (i = 0; i < 6; i++) {
  1744. if (i < 4) {
  1745. dmv_x = dmv_y = pred_flag = 0;
  1746. val = ((v->fourmvbp >> (3 - i)) & 1);
  1747. if (val) {
  1748. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1749. }
  1750. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1751. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1752. } else if (i == 4)
  1753. ff_vc1_mc_4mv_chroma(v, 0);
  1754. }
  1755. mb_has_coeffs = idx_mbmode & 1;
  1756. }
  1757. if (mb_has_coeffs)
  1758. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1759. if (cbp) {
  1760. GET_MQUANT();
  1761. }
  1762. s->current_picture.qscale_table[mb_pos] = mquant;
  1763. if (!v->ttmbf && cbp) {
  1764. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1765. }
  1766. dst_idx = 0;
  1767. for (i = 0; i < 6; i++) {
  1768. s->dc_val[0][s->block_index[i]] = 0;
  1769. dst_idx += i >> 2;
  1770. val = ((cbp >> (5 - i)) & 1);
  1771. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1772. if (val) {
  1773. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1774. first_block, s->dest[dst_idx] + off,
  1775. (i & 4) ? s->uvlinesize : s->linesize,
  1776. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1777. &block_tt);
  1778. block_cbp |= pat << (i << 2);
  1779. if (!v->ttmbf && ttmb < 8) ttmb = -1;
  1780. first_block = 0;
  1781. }
  1782. }
  1783. }
  1784. if (s->mb_x == s->mb_width - 1)
  1785. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  1786. return 0;
  1787. }
  1788. /** Decode one B-frame MB (in Main profile)
  1789. */
  1790. static void vc1_decode_b_mb(VC1Context *v)
  1791. {
  1792. MpegEncContext *s = &v->s;
  1793. GetBitContext *gb = &s->gb;
  1794. int i, j;
  1795. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1796. int cbp = 0; /* cbp decoding stuff */
  1797. int mqdiff, mquant; /* MB quantization */
  1798. int ttmb = v->ttfrm; /* MB Transform type */
  1799. int mb_has_coeffs = 0; /* last_flag */
  1800. int index, index1; /* LUT indexes */
  1801. int val, sign; /* temp values */
  1802. int first_block = 1;
  1803. int dst_idx, off;
  1804. int skipped, direct;
  1805. int dmv_x[2], dmv_y[2];
  1806. int bmvtype = BMV_TYPE_BACKWARD;
  1807. mquant = v->pq; /* lossy initialization */
  1808. s->mb_intra = 0;
  1809. if (v->dmb_is_raw)
  1810. direct = get_bits1(gb);
  1811. else
  1812. direct = v->direct_mb_plane[mb_pos];
  1813. if (v->skip_is_raw)
  1814. skipped = get_bits1(gb);
  1815. else
  1816. skipped = v->s.mbskip_table[mb_pos];
  1817. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1818. for (i = 0; i < 6; i++) {
  1819. v->mb_type[0][s->block_index[i]] = 0;
  1820. s->dc_val[0][s->block_index[i]] = 0;
  1821. }
  1822. s->current_picture.qscale_table[mb_pos] = 0;
  1823. if (!direct) {
  1824. if (!skipped) {
  1825. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1826. dmv_x[1] = dmv_x[0];
  1827. dmv_y[1] = dmv_y[0];
  1828. }
  1829. if (skipped || !s->mb_intra) {
  1830. bmvtype = decode012(gb);
  1831. switch (bmvtype) {
  1832. case 0:
  1833. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  1834. break;
  1835. case 1:
  1836. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  1837. break;
  1838. case 2:
  1839. bmvtype = BMV_TYPE_INTERPOLATED;
  1840. dmv_x[0] = dmv_y[0] = 0;
  1841. }
  1842. }
  1843. }
  1844. for (i = 0; i < 6; i++)
  1845. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1846. if (skipped) {
  1847. if (direct)
  1848. bmvtype = BMV_TYPE_INTERPOLATED;
  1849. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1850. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1851. return;
  1852. }
  1853. if (direct) {
  1854. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1855. GET_MQUANT();
  1856. s->mb_intra = 0;
  1857. s->current_picture.qscale_table[mb_pos] = mquant;
  1858. if (!v->ttmbf)
  1859. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1860. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  1861. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1862. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1863. } else {
  1864. if (!mb_has_coeffs && !s->mb_intra) {
  1865. /* no coded blocks - effectively skipped */
  1866. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1867. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1868. return;
  1869. }
  1870. if (s->mb_intra && !mb_has_coeffs) {
  1871. GET_MQUANT();
  1872. s->current_picture.qscale_table[mb_pos] = mquant;
  1873. s->ac_pred = get_bits1(gb);
  1874. cbp = 0;
  1875. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1876. } else {
  1877. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  1878. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1879. if (!mb_has_coeffs) {
  1880. /* interpolated skipped block */
  1881. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1882. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1883. return;
  1884. }
  1885. }
  1886. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1887. if (!s->mb_intra) {
  1888. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1889. }
  1890. if (s->mb_intra)
  1891. s->ac_pred = get_bits1(gb);
  1892. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1893. GET_MQUANT();
  1894. s->current_picture.qscale_table[mb_pos] = mquant;
  1895. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1896. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1897. }
  1898. }
  1899. dst_idx = 0;
  1900. for (i = 0; i < 6; i++) {
  1901. s->dc_val[0][s->block_index[i]] = 0;
  1902. dst_idx += i >> 2;
  1903. val = ((cbp >> (5 - i)) & 1);
  1904. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1905. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1906. if (s->mb_intra) {
  1907. /* check if prediction blocks A and C are available */
  1908. v->a_avail = v->c_avail = 0;
  1909. if (i == 2 || i == 3 || !s->first_slice_line)
  1910. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1911. if (i == 1 || i == 3 || s->mb_x)
  1912. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1913. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1914. (i & 4) ? v->codingset2 : v->codingset);
  1915. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1916. continue;
  1917. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1918. if (v->rangeredfrm)
  1919. for (j = 0; j < 64; j++)
  1920. s->block[i][j] <<= 1;
  1921. s->idsp.put_signed_pixels_clamped(s->block[i],
  1922. s->dest[dst_idx] + off,
  1923. i & 4 ? s->uvlinesize
  1924. : s->linesize);
  1925. } else if (val) {
  1926. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1927. first_block, s->dest[dst_idx] + off,
  1928. (i & 4) ? s->uvlinesize : s->linesize,
  1929. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  1930. if (!v->ttmbf && ttmb < 8)
  1931. ttmb = -1;
  1932. first_block = 0;
  1933. }
  1934. }
  1935. }
  1936. /** Decode one B-frame MB (in interlaced field B picture)
  1937. */
  1938. static void vc1_decode_b_mb_intfi(VC1Context *v)
  1939. {
  1940. MpegEncContext *s = &v->s;
  1941. GetBitContext *gb = &s->gb;
  1942. int i, j;
  1943. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1944. int cbp = 0; /* cbp decoding stuff */
  1945. int mqdiff, mquant; /* MB quantization */
  1946. int ttmb = v->ttfrm; /* MB Transform type */
  1947. int mb_has_coeffs = 0; /* last_flag */
  1948. int val; /* temp value */
  1949. int first_block = 1;
  1950. int dst_idx, off;
  1951. int fwd;
  1952. int dmv_x[2], dmv_y[2], pred_flag[2];
  1953. int bmvtype = BMV_TYPE_BACKWARD;
  1954. int idx_mbmode, interpmvp;
  1955. mquant = v->pq; /* Lossy initialization */
  1956. s->mb_intra = 0;
  1957. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1958. if (idx_mbmode <= 1) { // intra MB
  1959. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1960. s->mb_intra = 1;
  1961. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1962. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1963. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1964. GET_MQUANT();
  1965. s->current_picture.qscale_table[mb_pos] = mquant;
  1966. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1967. s->y_dc_scale = s->y_dc_scale_table[mquant];
  1968. s->c_dc_scale = s->c_dc_scale_table[mquant];
  1969. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1970. mb_has_coeffs = idx_mbmode & 1;
  1971. if (mb_has_coeffs)
  1972. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1973. dst_idx = 0;
  1974. for (i = 0; i < 6; i++) {
  1975. v->a_avail = v->c_avail = 0;
  1976. v->mb_type[0][s->block_index[i]] = 1;
  1977. s->dc_val[0][s->block_index[i]] = 0;
  1978. dst_idx += i >> 2;
  1979. val = ((cbp >> (5 - i)) & 1);
  1980. if (i == 2 || i == 3 || !s->first_slice_line)
  1981. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1982. if (i == 1 || i == 3 || s->mb_x)
  1983. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1984. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1985. (i & 4) ? v->codingset2 : v->codingset);
  1986. if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1987. continue;
  1988. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1989. if (v->rangeredfrm)
  1990. for (j = 0; j < 64; j++)
  1991. s->block[i][j] <<= 1;
  1992. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1993. s->idsp.put_signed_pixels_clamped(s->block[i],
  1994. s->dest[dst_idx] + off,
  1995. (i & 4) ? s->uvlinesize
  1996. : s->linesize);
  1997. // TODO: yet to perform loop filter
  1998. }
  1999. } else {
  2000. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2001. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  2002. for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
  2003. if (v->fmb_is_raw)
  2004. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  2005. else
  2006. fwd = v->forward_mb_plane[mb_pos];
  2007. if (idx_mbmode <= 5) { // 1-MV
  2008. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  2009. pred_flag[0] = pred_flag[1] = 0;
  2010. if (fwd)
  2011. bmvtype = BMV_TYPE_FORWARD;
  2012. else {
  2013. bmvtype = decode012(gb);
  2014. switch (bmvtype) {
  2015. case 0:
  2016. bmvtype = BMV_TYPE_BACKWARD;
  2017. break;
  2018. case 1:
  2019. bmvtype = BMV_TYPE_DIRECT;
  2020. break;
  2021. case 2:
  2022. bmvtype = BMV_TYPE_INTERPOLATED;
  2023. interpmvp = get_bits1(gb);
  2024. }
  2025. }
  2026. v->bmvtype = bmvtype;
  2027. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  2028. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  2029. }
  2030. if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
  2031. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  2032. }
  2033. if (bmvtype == BMV_TYPE_DIRECT) {
  2034. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  2035. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  2036. }
  2037. ff_vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  2038. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  2039. mb_has_coeffs = !(idx_mbmode & 2);
  2040. } else { // 4-MV
  2041. if (fwd)
  2042. bmvtype = BMV_TYPE_FORWARD;
  2043. v->bmvtype = bmvtype;
  2044. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2045. for (i = 0; i < 6; i++) {
  2046. if (i < 4) {
  2047. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  2048. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  2049. val = ((v->fourmvbp >> (3 - i)) & 1);
  2050. if (val) {
  2051. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  2052. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  2053. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  2054. }
  2055. ff_vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  2056. ff_vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  2057. } else if (i == 4)
  2058. ff_vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  2059. }
  2060. mb_has_coeffs = idx_mbmode & 1;
  2061. }
  2062. if (mb_has_coeffs)
  2063. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2064. if (cbp) {
  2065. GET_MQUANT();
  2066. }
  2067. s->current_picture.qscale_table[mb_pos] = mquant;
  2068. if (!v->ttmbf && cbp) {
  2069. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2070. }
  2071. dst_idx = 0;
  2072. for (i = 0; i < 6; i++) {
  2073. s->dc_val[0][s->block_index[i]] = 0;
  2074. dst_idx += i >> 2;
  2075. val = ((cbp >> (5 - i)) & 1);
  2076. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  2077. if (val) {
  2078. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2079. first_block, s->dest[dst_idx] + off,
  2080. (i & 4) ? s->uvlinesize : s->linesize,
  2081. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  2082. if (!v->ttmbf && ttmb < 8)
  2083. ttmb = -1;
  2084. first_block = 0;
  2085. }
  2086. }
  2087. }
  2088. }
  2089. /** Decode one B-frame MB (in interlaced frame B picture)
  2090. */
  2091. static int vc1_decode_b_mb_intfr(VC1Context *v)
  2092. {
  2093. MpegEncContext *s = &v->s;
  2094. GetBitContext *gb = &s->gb;
  2095. int i, j;
  2096. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2097. int cbp = 0; /* cbp decoding stuff */
  2098. int mqdiff, mquant; /* MB quantization */
  2099. int ttmb = v->ttfrm; /* MB Transform type */
  2100. int mvsw = 0; /* motion vector switch */
  2101. int mb_has_coeffs = 1; /* last_flag */
  2102. int dmv_x, dmv_y; /* Differential MV components */
  2103. int val; /* temp value */
  2104. int first_block = 1;
  2105. int dst_idx, off;
  2106. int skipped, direct, twomv = 0;
  2107. int block_cbp = 0, pat, block_tt = 0;
  2108. int idx_mbmode = 0, mvbp;
  2109. int stride_y, fieldtx;
  2110. int bmvtype = BMV_TYPE_BACKWARD;
  2111. int dir, dir2;
  2112. mquant = v->pq; /* Lossy initialization */
  2113. s->mb_intra = 0;
  2114. if (v->skip_is_raw)
  2115. skipped = get_bits1(gb);
  2116. else
  2117. skipped = v->s.mbskip_table[mb_pos];
  2118. if (!skipped) {
  2119. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  2120. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  2121. twomv = 1;
  2122. v->blk_mv_type[s->block_index[0]] = 1;
  2123. v->blk_mv_type[s->block_index[1]] = 1;
  2124. v->blk_mv_type[s->block_index[2]] = 1;
  2125. v->blk_mv_type[s->block_index[3]] = 1;
  2126. } else {
  2127. v->blk_mv_type[s->block_index[0]] = 0;
  2128. v->blk_mv_type[s->block_index[1]] = 0;
  2129. v->blk_mv_type[s->block_index[2]] = 0;
  2130. v->blk_mv_type[s->block_index[3]] = 0;
  2131. }
  2132. }
  2133. if (v->dmb_is_raw)
  2134. direct = get_bits1(gb);
  2135. else
  2136. direct = v->direct_mb_plane[mb_pos];
  2137. if (direct) {
  2138. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  2139. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  2140. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  2141. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  2142. if (twomv) {
  2143. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  2144. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  2145. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  2146. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  2147. for (i = 1; i < 4; i += 2) {
  2148. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  2149. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  2150. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  2151. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  2152. }
  2153. } else {
  2154. for (i = 1; i < 4; i++) {
  2155. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  2156. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  2157. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  2158. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  2159. }
  2160. }
  2161. }
  2162. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  2163. for (i = 0; i < 4; i++) {
  2164. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  2165. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  2166. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2167. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2168. }
  2169. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  2170. s->mb_intra = 1;
  2171. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2172. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  2173. mb_has_coeffs = get_bits1(gb);
  2174. if (mb_has_coeffs)
  2175. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2176. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  2177. GET_MQUANT();
  2178. s->current_picture.qscale_table[mb_pos] = mquant;
  2179. /* Set DC scale - y and c use the same (not sure if necessary here) */
  2180. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2181. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2182. dst_idx = 0;
  2183. for (i = 0; i < 6; i++) {
  2184. v->a_avail = v->c_avail = 0;
  2185. v->mb_type[0][s->block_index[i]] = 1;
  2186. s->dc_val[0][s->block_index[i]] = 0;
  2187. dst_idx += i >> 2;
  2188. val = ((cbp >> (5 - i)) & 1);
  2189. if (i == 2 || i == 3 || !s->first_slice_line)
  2190. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  2191. if (i == 1 || i == 3 || s->mb_x)
  2192. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  2193. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  2194. (i & 4) ? v->codingset2 : v->codingset);
  2195. if (i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2196. continue;
  2197. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  2198. if (i < 4) {
  2199. stride_y = s->linesize << fieldtx;
  2200. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  2201. } else {
  2202. stride_y = s->uvlinesize;
  2203. off = 0;
  2204. }
  2205. s->idsp.put_signed_pixels_clamped(s->block[i],
  2206. s->dest[dst_idx] + off,
  2207. stride_y);
  2208. }
  2209. } else {
  2210. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2211. if (!direct) {
  2212. if (skipped || !s->mb_intra) {
  2213. bmvtype = decode012(gb);
  2214. switch (bmvtype) {
  2215. case 0:
  2216. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  2217. break;
  2218. case 1:
  2219. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  2220. break;
  2221. case 2:
  2222. bmvtype = BMV_TYPE_INTERPOLATED;
  2223. }
  2224. }
  2225. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  2226. mvsw = get_bits1(gb);
  2227. }
  2228. if (!skipped) { // inter MB
  2229. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  2230. if (mb_has_coeffs)
  2231. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2232. if (!direct) {
  2233. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  2234. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2235. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  2236. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  2237. }
  2238. }
  2239. for (i = 0; i < 6; i++)
  2240. v->mb_type[0][s->block_index[i]] = 0;
  2241. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  2242. /* for all motion vector read MVDATA and motion compensate each block */
  2243. dst_idx = 0;
  2244. if (direct) {
  2245. if (twomv) {
  2246. for (i = 0; i < 4; i++) {
  2247. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  2248. ff_vc1_mc_4mv_luma(v, i, 1, 1);
  2249. }
  2250. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2251. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2252. } else {
  2253. ff_vc1_mc_1mv(v, 0);
  2254. ff_vc1_interp_mc(v);
  2255. }
  2256. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  2257. mvbp = v->fourmvbp;
  2258. for (i = 0; i < 4; i++) {
  2259. dir = i==1 || i==3;
  2260. dmv_x = dmv_y = 0;
  2261. val = ((mvbp >> (3 - i)) & 1);
  2262. if (val)
  2263. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2264. j = i > 1 ? 2 : 0;
  2265. ff_vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2266. ff_vc1_mc_4mv_luma(v, j, dir, dir);
  2267. ff_vc1_mc_4mv_luma(v, j+1, dir, dir);
  2268. }
  2269. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2270. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2271. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2272. mvbp = v->twomvbp;
  2273. dmv_x = dmv_y = 0;
  2274. if (mvbp & 2)
  2275. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2276. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2277. ff_vc1_mc_1mv(v, 0);
  2278. dmv_x = dmv_y = 0;
  2279. if (mvbp & 1)
  2280. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2281. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2282. ff_vc1_interp_mc(v);
  2283. } else if (twomv) {
  2284. dir = bmvtype == BMV_TYPE_BACKWARD;
  2285. dir2 = dir;
  2286. if (mvsw)
  2287. dir2 = !dir;
  2288. mvbp = v->twomvbp;
  2289. dmv_x = dmv_y = 0;
  2290. if (mvbp & 2)
  2291. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2292. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2293. dmv_x = dmv_y = 0;
  2294. if (mvbp & 1)
  2295. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2296. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  2297. if (mvsw) {
  2298. for (i = 0; i < 2; i++) {
  2299. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2300. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2301. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2302. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2303. }
  2304. } else {
  2305. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2306. ff_vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2307. }
  2308. ff_vc1_mc_4mv_luma(v, 0, dir, 0);
  2309. ff_vc1_mc_4mv_luma(v, 1, dir, 0);
  2310. ff_vc1_mc_4mv_luma(v, 2, dir2, 0);
  2311. ff_vc1_mc_4mv_luma(v, 3, dir2, 0);
  2312. ff_vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  2313. } else {
  2314. dir = bmvtype == BMV_TYPE_BACKWARD;
  2315. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  2316. dmv_x = dmv_y = 0;
  2317. if (mvbp)
  2318. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2319. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2320. v->blk_mv_type[s->block_index[0]] = 1;
  2321. v->blk_mv_type[s->block_index[1]] = 1;
  2322. v->blk_mv_type[s->block_index[2]] = 1;
  2323. v->blk_mv_type[s->block_index[3]] = 1;
  2324. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2325. for (i = 0; i < 2; i++) {
  2326. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2327. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2328. }
  2329. ff_vc1_mc_1mv(v, dir);
  2330. }
  2331. if (cbp)
  2332. GET_MQUANT(); // p. 227
  2333. s->current_picture.qscale_table[mb_pos] = mquant;
  2334. if (!v->ttmbf && cbp)
  2335. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2336. for (i = 0; i < 6; i++) {
  2337. s->dc_val[0][s->block_index[i]] = 0;
  2338. dst_idx += i >> 2;
  2339. val = ((cbp >> (5 - i)) & 1);
  2340. if (!fieldtx)
  2341. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  2342. else
  2343. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  2344. if (val) {
  2345. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2346. first_block, s->dest[dst_idx] + off,
  2347. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  2348. (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  2349. block_cbp |= pat << (i << 2);
  2350. if (!v->ttmbf && ttmb < 8)
  2351. ttmb = -1;
  2352. first_block = 0;
  2353. }
  2354. }
  2355. } else { // skipped
  2356. dir = 0;
  2357. for (i = 0; i < 6; i++) {
  2358. v->mb_type[0][s->block_index[i]] = 0;
  2359. s->dc_val[0][s->block_index[i]] = 0;
  2360. }
  2361. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  2362. s->current_picture.qscale_table[mb_pos] = 0;
  2363. v->blk_mv_type[s->block_index[0]] = 0;
  2364. v->blk_mv_type[s->block_index[1]] = 0;
  2365. v->blk_mv_type[s->block_index[2]] = 0;
  2366. v->blk_mv_type[s->block_index[3]] = 0;
  2367. if (!direct) {
  2368. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2369. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2370. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2371. } else {
  2372. dir = bmvtype == BMV_TYPE_BACKWARD;
  2373. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2374. if (mvsw) {
  2375. int dir2 = dir;
  2376. if (mvsw)
  2377. dir2 = !dir;
  2378. for (i = 0; i < 2; i++) {
  2379. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2380. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2381. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2382. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2383. }
  2384. } else {
  2385. v->blk_mv_type[s->block_index[0]] = 1;
  2386. v->blk_mv_type[s->block_index[1]] = 1;
  2387. v->blk_mv_type[s->block_index[2]] = 1;
  2388. v->blk_mv_type[s->block_index[3]] = 1;
  2389. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2390. for (i = 0; i < 2; i++) {
  2391. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2392. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2393. }
  2394. }
  2395. }
  2396. }
  2397. ff_vc1_mc_1mv(v, dir);
  2398. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  2399. ff_vc1_interp_mc(v);
  2400. }
  2401. }
  2402. }
  2403. if (s->mb_x == s->mb_width - 1)
  2404. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2405. v->cbp[s->mb_x] = block_cbp;
  2406. v->ttblk[s->mb_x] = block_tt;
  2407. return 0;
  2408. }
  2409. /** Decode blocks of I-frame
  2410. */
  2411. static void vc1_decode_i_blocks(VC1Context *v)
  2412. {
  2413. int k, j;
  2414. MpegEncContext *s = &v->s;
  2415. int cbp, val;
  2416. uint8_t *coded_val;
  2417. int mb_pos;
  2418. /* select coding mode used for VLC tables selection */
  2419. switch (v->y_ac_table_index) {
  2420. case 0:
  2421. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2422. break;
  2423. case 1:
  2424. v->codingset = CS_HIGH_MOT_INTRA;
  2425. break;
  2426. case 2:
  2427. v->codingset = CS_MID_RATE_INTRA;
  2428. break;
  2429. }
  2430. switch (v->c_ac_table_index) {
  2431. case 0:
  2432. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2433. break;
  2434. case 1:
  2435. v->codingset2 = CS_HIGH_MOT_INTER;
  2436. break;
  2437. case 2:
  2438. v->codingset2 = CS_MID_RATE_INTER;
  2439. break;
  2440. }
  2441. /* Set DC scale - y and c use the same */
  2442. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  2443. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  2444. //do frame decode
  2445. s->mb_x = s->mb_y = 0;
  2446. s->mb_intra = 1;
  2447. s->first_slice_line = 1;
  2448. for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
  2449. s->mb_x = 0;
  2450. init_block_index(v);
  2451. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  2452. uint8_t *dst[6];
  2453. ff_update_block_index(s);
  2454. dst[0] = s->dest[0];
  2455. dst[1] = dst[0] + 8;
  2456. dst[2] = s->dest[0] + s->linesize * 8;
  2457. dst[3] = dst[2] + 8;
  2458. dst[4] = s->dest[1];
  2459. dst[5] = s->dest[2];
  2460. s->bdsp.clear_blocks(s->block[0]);
  2461. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  2462. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2463. s->current_picture.qscale_table[mb_pos] = v->pq;
  2464. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  2465. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  2466. // do actual MB decoding and displaying
  2467. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2468. v->s.ac_pred = get_bits1(&v->s.gb);
  2469. for (k = 0; k < 6; k++) {
  2470. val = ((cbp >> (5 - k)) & 1);
  2471. if (k < 4) {
  2472. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2473. val = val ^ pred;
  2474. *coded_val = val;
  2475. }
  2476. cbp |= val << (5 - k);
  2477. vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
  2478. if (k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2479. continue;
  2480. v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
  2481. if (v->pq >= 9 && v->overlap) {
  2482. if (v->rangeredfrm)
  2483. for (j = 0; j < 64; j++)
  2484. s->block[k][j] <<= 1;
  2485. s->idsp.put_signed_pixels_clamped(s->block[k], dst[k],
  2486. k & 4 ? s->uvlinesize
  2487. : s->linesize);
  2488. } else {
  2489. if (v->rangeredfrm)
  2490. for (j = 0; j < 64; j++)
  2491. s->block[k][j] = (s->block[k][j] - 64) << 1;
  2492. s->idsp.put_pixels_clamped(s->block[k], dst[k],
  2493. k & 4 ? s->uvlinesize
  2494. : s->linesize);
  2495. }
  2496. }
  2497. if (v->pq >= 9 && v->overlap) {
  2498. if (s->mb_x) {
  2499. v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
  2500. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2501. if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2502. v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
  2503. v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
  2504. }
  2505. }
  2506. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
  2507. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2508. if (!s->first_slice_line) {
  2509. v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
  2510. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
  2511. if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  2512. v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
  2513. v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
  2514. }
  2515. }
  2516. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  2517. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  2518. }
  2519. if (v->s.loop_filter)
  2520. ff_vc1_loop_filter_iblk(v, v->pq);
  2521. if (get_bits_count(&s->gb) > v->bits) {
  2522. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  2523. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2524. get_bits_count(&s->gb), v->bits);
  2525. return;
  2526. }
  2527. }
  2528. if (!v->s.loop_filter)
  2529. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2530. else if (s->mb_y)
  2531. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2532. s->first_slice_line = 0;
  2533. }
  2534. if (v->s.loop_filter)
  2535. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2536. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  2537. * profile, these only differ are when decoding MSS2 rectangles. */
  2538. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  2539. }
  2540. /** Decode blocks of I-frame for advanced profile
  2541. */
  2542. static void vc1_decode_i_blocks_adv(VC1Context *v)
  2543. {
  2544. int k;
  2545. MpegEncContext *s = &v->s;
  2546. int cbp, val;
  2547. uint8_t *coded_val;
  2548. int mb_pos;
  2549. int mquant = v->pq;
  2550. int mqdiff;
  2551. GetBitContext *gb = &s->gb;
  2552. /* select coding mode used for VLC tables selection */
  2553. switch (v->y_ac_table_index) {
  2554. case 0:
  2555. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2556. break;
  2557. case 1:
  2558. v->codingset = CS_HIGH_MOT_INTRA;
  2559. break;
  2560. case 2:
  2561. v->codingset = CS_MID_RATE_INTRA;
  2562. break;
  2563. }
  2564. switch (v->c_ac_table_index) {
  2565. case 0:
  2566. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2567. break;
  2568. case 1:
  2569. v->codingset2 = CS_HIGH_MOT_INTER;
  2570. break;
  2571. case 2:
  2572. v->codingset2 = CS_MID_RATE_INTER;
  2573. break;
  2574. }
  2575. // do frame decode
  2576. s->mb_x = s->mb_y = 0;
  2577. s->mb_intra = 1;
  2578. s->first_slice_line = 1;
  2579. s->mb_y = s->start_mb_y;
  2580. if (s->start_mb_y) {
  2581. s->mb_x = 0;
  2582. init_block_index(v);
  2583. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  2584. (1 + s->b8_stride) * sizeof(*s->coded_block));
  2585. }
  2586. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  2587. s->mb_x = 0;
  2588. init_block_index(v);
  2589. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2590. int16_t (*block)[64] = v->block[v->cur_blk_idx];
  2591. ff_update_block_index(s);
  2592. s->bdsp.clear_blocks(block[0]);
  2593. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2594. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  2595. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  2596. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  2597. // do actual MB decoding and displaying
  2598. if (v->fieldtx_is_raw)
  2599. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  2600. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2601. if (v->acpred_is_raw)
  2602. v->s.ac_pred = get_bits1(&v->s.gb);
  2603. else
  2604. v->s.ac_pred = v->acpred_plane[mb_pos];
  2605. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  2606. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  2607. GET_MQUANT();
  2608. s->current_picture.qscale_table[mb_pos] = mquant;
  2609. /* Set DC scale - y and c use the same */
  2610. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2611. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2612. for (k = 0; k < 6; k++) {
  2613. val = ((cbp >> (5 - k)) & 1);
  2614. if (k < 4) {
  2615. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2616. val = val ^ pred;
  2617. *coded_val = val;
  2618. }
  2619. cbp |= val << (5 - k);
  2620. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  2621. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  2622. vc1_decode_i_block_adv(v, block[k], k, val,
  2623. (k < 4) ? v->codingset : v->codingset2, mquant);
  2624. if (k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2625. continue;
  2626. v->vc1dsp.vc1_inv_trans_8x8(block[k]);
  2627. }
  2628. ff_vc1_smooth_overlap_filter_iblk(v);
  2629. vc1_put_signed_blocks_clamped(v);
  2630. if (v->s.loop_filter)
  2631. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2632. if (get_bits_count(&s->gb) > v->bits) {
  2633. // TODO: may need modification to handle slice coding
  2634. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2635. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2636. get_bits_count(&s->gb), v->bits);
  2637. return;
  2638. }
  2639. }
  2640. if (!v->s.loop_filter)
  2641. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2642. else if (s->mb_y)
  2643. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  2644. s->first_slice_line = 0;
  2645. }
  2646. /* raw bottom MB row */
  2647. s->mb_x = 0;
  2648. init_block_index(v);
  2649. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2650. ff_update_block_index(s);
  2651. vc1_put_signed_blocks_clamped(v);
  2652. if (v->s.loop_filter)
  2653. ff_vc1_loop_filter_iblk_delayed(v, v->pq);
  2654. }
  2655. if (v->s.loop_filter)
  2656. ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
  2657. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2658. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2659. }
  2660. static void vc1_decode_p_blocks(VC1Context *v)
  2661. {
  2662. MpegEncContext *s = &v->s;
  2663. int apply_loop_filter;
  2664. /* select coding mode used for VLC tables selection */
  2665. switch (v->c_ac_table_index) {
  2666. case 0:
  2667. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2668. break;
  2669. case 1:
  2670. v->codingset = CS_HIGH_MOT_INTRA;
  2671. break;
  2672. case 2:
  2673. v->codingset = CS_MID_RATE_INTRA;
  2674. break;
  2675. }
  2676. switch (v->c_ac_table_index) {
  2677. case 0:
  2678. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2679. break;
  2680. case 1:
  2681. v->codingset2 = CS_HIGH_MOT_INTER;
  2682. break;
  2683. case 2:
  2684. v->codingset2 = CS_MID_RATE_INTER;
  2685. break;
  2686. }
  2687. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
  2688. v->fcm == PROGRESSIVE;
  2689. s->first_slice_line = 1;
  2690. memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
  2691. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2692. s->mb_x = 0;
  2693. init_block_index(v);
  2694. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2695. ff_update_block_index(s);
  2696. if (v->fcm == ILACE_FIELD)
  2697. vc1_decode_p_mb_intfi(v);
  2698. else if (v->fcm == ILACE_FRAME)
  2699. vc1_decode_p_mb_intfr(v);
  2700. else vc1_decode_p_mb(v);
  2701. if (s->mb_y != s->start_mb_y && apply_loop_filter)
  2702. ff_vc1_apply_p_loop_filter(v);
  2703. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2704. // TODO: may need modification to handle slice coding
  2705. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2706. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2707. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2708. return;
  2709. }
  2710. }
  2711. memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
  2712. memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
  2713. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  2714. memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
  2715. if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2716. s->first_slice_line = 0;
  2717. }
  2718. if (apply_loop_filter) {
  2719. s->mb_x = 0;
  2720. init_block_index(v);
  2721. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2722. ff_update_block_index(s);
  2723. ff_vc1_apply_p_loop_filter(v);
  2724. }
  2725. }
  2726. if (s->end_mb_y >= s->start_mb_y)
  2727. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2728. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2729. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2730. }
  2731. static void vc1_decode_b_blocks(VC1Context *v)
  2732. {
  2733. MpegEncContext *s = &v->s;
  2734. /* select coding mode used for VLC tables selection */
  2735. switch (v->c_ac_table_index) {
  2736. case 0:
  2737. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2738. break;
  2739. case 1:
  2740. v->codingset = CS_HIGH_MOT_INTRA;
  2741. break;
  2742. case 2:
  2743. v->codingset = CS_MID_RATE_INTRA;
  2744. break;
  2745. }
  2746. switch (v->c_ac_table_index) {
  2747. case 0:
  2748. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2749. break;
  2750. case 1:
  2751. v->codingset2 = CS_HIGH_MOT_INTER;
  2752. break;
  2753. case 2:
  2754. v->codingset2 = CS_MID_RATE_INTER;
  2755. break;
  2756. }
  2757. s->first_slice_line = 1;
  2758. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2759. s->mb_x = 0;
  2760. init_block_index(v);
  2761. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2762. ff_update_block_index(s);
  2763. if (v->fcm == ILACE_FIELD)
  2764. vc1_decode_b_mb_intfi(v);
  2765. else if (v->fcm == ILACE_FRAME)
  2766. vc1_decode_b_mb_intfr(v);
  2767. else
  2768. vc1_decode_b_mb(v);
  2769. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  2770. // TODO: may need modification to handle slice coding
  2771. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2772. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2773. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  2774. return;
  2775. }
  2776. if (v->s.loop_filter)
  2777. ff_vc1_loop_filter_iblk(v, v->pq);
  2778. }
  2779. if (!v->s.loop_filter)
  2780. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2781. else if (s->mb_y)
  2782. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2783. s->first_slice_line = 0;
  2784. }
  2785. if (v->s.loop_filter)
  2786. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2787. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2788. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2789. }
  2790. static void vc1_decode_skip_blocks(VC1Context *v)
  2791. {
  2792. MpegEncContext *s = &v->s;
  2793. if (!v->s.last_picture.f->data[0])
  2794. return;
  2795. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  2796. s->first_slice_line = 1;
  2797. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2798. s->mb_x = 0;
  2799. init_block_index(v);
  2800. ff_update_block_index(s);
  2801. memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  2802. memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2803. memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2804. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2805. s->first_slice_line = 0;
  2806. }
  2807. s->pict_type = AV_PICTURE_TYPE_P;
  2808. }
  2809. void ff_vc1_decode_blocks(VC1Context *v)
  2810. {
  2811. v->s.esc3_level_length = 0;
  2812. if (v->x8_type) {
  2813. ff_intrax8_decode_picture(&v->x8, &v->s.current_picture,
  2814. &v->s.gb, &v->s.mb_x, &v->s.mb_y,
  2815. 2 * v->pq + v->halfpq, v->pq * !v->pquantizer,
  2816. v->s.loop_filter, v->s.low_delay);
  2817. ff_er_add_slice(&v->s.er, 0, 0,
  2818. (v->s.mb_x >> 1) - 1, (v->s.mb_y >> 1) - 1,
  2819. ER_MB_END);
  2820. } else {
  2821. v->cur_blk_idx = 0;
  2822. v->left_blk_idx = -1;
  2823. v->topleft_blk_idx = 1;
  2824. v->top_blk_idx = 2;
  2825. switch (v->s.pict_type) {
  2826. case AV_PICTURE_TYPE_I:
  2827. if (v->profile == PROFILE_ADVANCED)
  2828. vc1_decode_i_blocks_adv(v);
  2829. else
  2830. vc1_decode_i_blocks(v);
  2831. break;
  2832. case AV_PICTURE_TYPE_P:
  2833. if (v->p_frame_skipped)
  2834. vc1_decode_skip_blocks(v);
  2835. else
  2836. vc1_decode_p_blocks(v);
  2837. break;
  2838. case AV_PICTURE_TYPE_B:
  2839. if (v->bi_type) {
  2840. if (v->profile == PROFILE_ADVANCED)
  2841. vc1_decode_i_blocks_adv(v);
  2842. else
  2843. vc1_decode_i_blocks(v);
  2844. } else
  2845. vc1_decode_b_blocks(v);
  2846. break;
  2847. }
  2848. }
  2849. }