You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3036 lines
119KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 block decoding routines
  26. */
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "msmpeg4data.h"
  31. #include "unary.h"
  32. #include "vc1.h"
  33. #include "vc1_pred.h"
  34. #include "vc1acdata.h"
  35. #include "vc1data.h"
  36. #define MB_INTRA_VLC_BITS 9
  37. #define DC_VLC_BITS 9
  38. // offset tables for interlaced picture MVDATA decoding
  39. static const uint8_t offset_table[2][9] = {
  40. { 0, 1, 2, 4, 8, 16, 32, 64, 128 },
  41. { 0, 1, 3, 7, 15, 31, 63, 127, 255 },
  42. };
  43. // mapping table for internal block representation
  44. static const int block_map[6] = {0, 2, 1, 3, 4, 5};
  45. /***********************************************************************/
  46. /**
  47. * @name VC-1 Bitplane decoding
  48. * @see 8.7, p56
  49. * @{
  50. */
  51. static inline void init_block_index(VC1Context *v)
  52. {
  53. MpegEncContext *s = &v->s;
  54. ff_init_block_index(s);
  55. if (v->field_mode && !(v->second_field ^ v->tff)) {
  56. s->dest[0] += s->current_picture_ptr->f->linesize[0];
  57. s->dest[1] += s->current_picture_ptr->f->linesize[1];
  58. s->dest[2] += s->current_picture_ptr->f->linesize[2];
  59. }
  60. }
  61. /** @} */ //Bitplane group
  62. static void vc1_put_blocks_clamped(VC1Context *v, int put_signed)
  63. {
  64. MpegEncContext *s = &v->s;
  65. uint8_t *dest;
  66. int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
  67. int fieldtx = 0;
  68. int i;
  69. /* The put pixels loop is one MB row and one MB column behind the decoding
  70. * loop because we can only put pixels when overlap filtering is done. For
  71. * interlaced frame pictures, however, the put pixels loop is only one
  72. * column behind the decoding loop as interlaced frame pictures only need
  73. * horizontal overlap filtering. */
  74. if (!s->first_slice_line && v->fcm != ILACE_FRAME) {
  75. if (s->mb_x) {
  76. for (i = 0; i < block_count; i++) {
  77. if (i > 3 ? v->mb_type[0][s->block_index[i] - s->block_wrap[i] - 1] :
  78. v->mb_type[0][s->block_index[i] - 2 * s->block_wrap[i] - 2]) {
  79. dest = s->dest[0] + ((i & 2) - 4) * 4 * s->linesize + ((i & 1) - 2) * 8;
  80. if (put_signed)
  81. s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][block_map[i]],
  82. i > 3 ? s->dest[i - 3] - 8 * s->uvlinesize - 8 : dest,
  83. i > 3 ? s->uvlinesize : s->linesize);
  84. else
  85. s->idsp.put_pixels_clamped(v->block[v->topleft_blk_idx][block_map[i]],
  86. i > 3 ? s->dest[i - 3] - 8 * s->uvlinesize - 8 : dest,
  87. i > 3 ? s->uvlinesize : s->linesize);
  88. }
  89. }
  90. }
  91. if (s->mb_x == v->end_mb_x - 1) {
  92. for (i = 0; i < block_count; i++) {
  93. if (i > 3 ? v->mb_type[0][s->block_index[i] - s->block_wrap[i]] :
  94. v->mb_type[0][s->block_index[i] - 2 * s->block_wrap[i]]) {
  95. dest = s->dest[0] + ((i & 2) - 4) * 4 * s->linesize + (i & 1) * 8;
  96. if (put_signed)
  97. s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][block_map[i]],
  98. i > 3 ? s->dest[i - 3] - 8 * s->uvlinesize : dest,
  99. i > 3 ? s->uvlinesize : s->linesize);
  100. else
  101. s->idsp.put_pixels_clamped(v->block[v->top_blk_idx][block_map[i]],
  102. i > 3 ? s->dest[i - 3] - 8 * s->uvlinesize : dest,
  103. i > 3 ? s->uvlinesize : s->linesize);
  104. }
  105. }
  106. }
  107. }
  108. if (s->mb_y == s->end_mb_y - 1 || v->fcm == ILACE_FRAME) {
  109. if (s->mb_x) {
  110. if (v->fcm == ILACE_FRAME)
  111. fieldtx = v->fieldtx_plane[s->mb_y * s->mb_stride + s->mb_x - 1];
  112. for (i = 0; i < block_count; i++) {
  113. if (i > 3 ? v->mb_type[0][s->block_index[i] - 1] :
  114. v->mb_type[0][s->block_index[i] - 2]) {
  115. if (fieldtx)
  116. dest = s->dest[0] + ((i & 2) >> 1) * s->linesize + ((i & 1) - 2) * 8;
  117. else
  118. dest = s->dest[0] + (i & 2) * 4 * s->linesize + ((i & 1) - 2) * 8;
  119. if (put_signed)
  120. s->idsp.put_signed_pixels_clamped(v->block[v->left_blk_idx][block_map[i]],
  121. i > 3 ? s->dest[i - 3] - 8 : dest,
  122. i > 3 ? s->uvlinesize : s->linesize << fieldtx);
  123. else
  124. s->idsp.put_pixels_clamped(v->block[v->left_blk_idx][block_map[i]],
  125. i > 3 ? s->dest[i - 3] - 8 : dest,
  126. i > 3 ? s->uvlinesize : s->linesize << fieldtx);
  127. }
  128. }
  129. }
  130. if (s->mb_x == v->end_mb_x - 1) {
  131. if (v->fcm == ILACE_FRAME)
  132. fieldtx = v->fieldtx_plane[s->mb_y * s->mb_stride + s->mb_x];
  133. for (i = 0; i < block_count; i++) {
  134. if (v->mb_type[0][s->block_index[i]]) {
  135. if (fieldtx)
  136. dest = s->dest[0] + ((i & 2) >> 1) * s->linesize + (i & 1) * 8;
  137. else
  138. dest = s->dest[0] + (i & 2) * 4 * s->linesize + (i & 1) * 8;
  139. if (put_signed)
  140. s->idsp.put_signed_pixels_clamped(v->block[v->cur_blk_idx][block_map[i]],
  141. i > 3 ? s->dest[i - 3] : dest,
  142. i > 3 ? s->uvlinesize : s->linesize << fieldtx);
  143. else
  144. s->idsp.put_pixels_clamped(v->block[v->cur_blk_idx][block_map[i]],
  145. i > 3 ? s->dest[i - 3] : dest,
  146. i > 3 ? s->uvlinesize : s->linesize << fieldtx);
  147. }
  148. }
  149. }
  150. }
  151. }
  152. #define inc_blk_idx(idx) do { \
  153. idx++; \
  154. if (idx >= v->n_allocated_blks) \
  155. idx = 0; \
  156. } while (0)
  157. /***********************************************************************/
  158. /**
  159. * @name VC-1 Block-level functions
  160. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  161. * @{
  162. */
  163. /**
  164. * @def GET_MQUANT
  165. * @brief Get macroblock-level quantizer scale
  166. */
  167. #define GET_MQUANT() \
  168. if (v->dquantfrm) { \
  169. int edges = 0; \
  170. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  171. if (v->dqbilevel) { \
  172. mquant = (get_bits1(gb)) ? -v->altpq : v->pq; \
  173. } else { \
  174. mqdiff = get_bits(gb, 3); \
  175. if (mqdiff != 7) \
  176. mquant = -v->pq - mqdiff; \
  177. else \
  178. mquant = -get_bits(gb, 5); \
  179. } \
  180. } \
  181. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  182. edges = 1 << v->dqsbedge; \
  183. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  184. edges = (3 << v->dqsbedge) % 15; \
  185. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  186. edges = 15; \
  187. if ((edges&1) && !s->mb_x) \
  188. mquant = -v->altpq; \
  189. if ((edges&2) && !s->mb_y) \
  190. mquant = -v->altpq; \
  191. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  192. mquant = -v->altpq; \
  193. if ((edges&8) && \
  194. s->mb_y == ((s->mb_height >> v->field_mode) - 1)) \
  195. mquant = -v->altpq; \
  196. if (!mquant || mquant > 31 || mquant < -31) { \
  197. av_log(v->s.avctx, AV_LOG_ERROR, \
  198. "Overriding invalid mquant %d\n", mquant); \
  199. mquant = 1; \
  200. } \
  201. }
  202. /**
  203. * @def GET_MVDATA(_dmv_x, _dmv_y)
  204. * @brief Get MV differentials
  205. * @see MVDATA decoding from 8.3.5.2, p(1)20
  206. * @param _dmv_x Horizontal differential for decoded MV
  207. * @param _dmv_y Vertical differential for decoded MV
  208. */
  209. #define GET_MVDATA(_dmv_x, _dmv_y) \
  210. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  211. VC1_MV_DIFF_VLC_BITS, 2); \
  212. if (index > 36) { \
  213. mb_has_coeffs = 1; \
  214. index -= 37; \
  215. } else \
  216. mb_has_coeffs = 0; \
  217. s->mb_intra = 0; \
  218. if (!index) { \
  219. _dmv_x = _dmv_y = 0; \
  220. } else if (index == 35) { \
  221. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  222. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  223. } else if (index == 36) { \
  224. _dmv_x = 0; \
  225. _dmv_y = 0; \
  226. s->mb_intra = 1; \
  227. } else { \
  228. index1 = index % 6; \
  229. _dmv_x = offset_table[1][index1]; \
  230. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  231. if (val > 0) { \
  232. val = get_bits(gb, val); \
  233. sign = 0 - (val & 1); \
  234. _dmv_x = (sign ^ ((val >> 1) + _dmv_x)) - sign; \
  235. } \
  236. \
  237. index1 = index / 6; \
  238. _dmv_y = offset_table[1][index1]; \
  239. val = size_table[index1] - (!s->quarter_sample && index1 == 5); \
  240. if (val > 0) { \
  241. val = get_bits(gb, val); \
  242. sign = 0 - (val & 1); \
  243. _dmv_y = (sign ^ ((val >> 1) + _dmv_y)) - sign; \
  244. } \
  245. }
  246. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  247. int *dmv_y, int *pred_flag)
  248. {
  249. int index, index1;
  250. int extend_x, extend_y;
  251. GetBitContext *gb = &v->s.gb;
  252. int bits, esc;
  253. int val, sign;
  254. if (v->numref) {
  255. bits = VC1_2REF_MVDATA_VLC_BITS;
  256. esc = 125;
  257. } else {
  258. bits = VC1_1REF_MVDATA_VLC_BITS;
  259. esc = 71;
  260. }
  261. extend_x = v->dmvrange & 1;
  262. extend_y = (v->dmvrange >> 1) & 1;
  263. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  264. if (index == esc) {
  265. *dmv_x = get_bits(gb, v->k_x);
  266. *dmv_y = get_bits(gb, v->k_y);
  267. if (v->numref) {
  268. if (pred_flag)
  269. *pred_flag = *dmv_y & 1;
  270. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  271. }
  272. }
  273. else {
  274. av_assert0(index < esc);
  275. index1 = (index + 1) % 9;
  276. if (index1 != 0) {
  277. val = get_bits(gb, index1 + extend_x);
  278. sign = 0 - (val & 1);
  279. *dmv_x = (sign ^ ((val >> 1) + offset_table[extend_x][index1])) - sign;
  280. } else
  281. *dmv_x = 0;
  282. index1 = (index + 1) / 9;
  283. if (index1 > v->numref) {
  284. val = get_bits(gb, (index1 >> v->numref) + extend_y);
  285. sign = 0 - (val & 1);
  286. *dmv_y = (sign ^ ((val >> 1) + offset_table[extend_y][index1 >> v->numref])) - sign;
  287. } else
  288. *dmv_y = 0;
  289. if (v->numref && pred_flag)
  290. *pred_flag = index1 & 1;
  291. }
  292. }
  293. /** Reconstruct motion vector for B-frame and do motion compensation
  294. */
  295. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  296. int direct, int mode)
  297. {
  298. if (direct) {
  299. ff_vc1_mc_1mv(v, 0);
  300. ff_vc1_interp_mc(v);
  301. return;
  302. }
  303. if (mode == BMV_TYPE_INTERPOLATED) {
  304. ff_vc1_mc_1mv(v, 0);
  305. ff_vc1_interp_mc(v);
  306. return;
  307. }
  308. ff_vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  309. }
  310. /** Get predicted DC value for I-frames only
  311. * prediction dir: left=0, top=1
  312. * @param s MpegEncContext
  313. * @param overlap flag indicating that overlap filtering is used
  314. * @param pq integer part of picture quantizer
  315. * @param[in] n block index in the current MB
  316. * @param dc_val_ptr Pointer to DC predictor
  317. * @param dir_ptr Prediction direction for use in AC prediction
  318. */
  319. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  320. int16_t **dc_val_ptr, int *dir_ptr)
  321. {
  322. int a, b, c, wrap, pred, scale;
  323. int16_t *dc_val;
  324. static const uint16_t dcpred[32] = {
  325. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  326. 114, 102, 93, 85, 79, 73, 68, 64,
  327. 60, 57, 54, 51, 49, 47, 45, 43,
  328. 41, 39, 38, 37, 35, 34, 33
  329. };
  330. /* find prediction - wmv3_dc_scale always used here in fact */
  331. if (n < 4) scale = s->y_dc_scale;
  332. else scale = s->c_dc_scale;
  333. wrap = s->block_wrap[n];
  334. dc_val = s->dc_val[0] + s->block_index[n];
  335. /* B A
  336. * C X
  337. */
  338. c = dc_val[ - 1];
  339. b = dc_val[ - 1 - wrap];
  340. a = dc_val[ - wrap];
  341. if (pq < 9 || !overlap) {
  342. /* Set outer values */
  343. if (s->first_slice_line && (n != 2 && n != 3))
  344. b = a = dcpred[scale];
  345. if (s->mb_x == 0 && (n != 1 && n != 3))
  346. b = c = dcpred[scale];
  347. } else {
  348. /* Set outer values */
  349. if (s->first_slice_line && (n != 2 && n != 3))
  350. b = a = 0;
  351. if (s->mb_x == 0 && (n != 1 && n != 3))
  352. b = c = 0;
  353. }
  354. if (abs(a - b) <= abs(b - c)) {
  355. pred = c;
  356. *dir_ptr = 1; // left
  357. } else {
  358. pred = a;
  359. *dir_ptr = 0; // top
  360. }
  361. /* update predictor */
  362. *dc_val_ptr = &dc_val[0];
  363. return pred;
  364. }
  365. /** Get predicted DC value
  366. * prediction dir: left=0, top=1
  367. * @param s MpegEncContext
  368. * @param overlap flag indicating that overlap filtering is used
  369. * @param pq integer part of picture quantizer
  370. * @param[in] n block index in the current MB
  371. * @param a_avail flag indicating top block availability
  372. * @param c_avail flag indicating left block availability
  373. * @param dc_val_ptr Pointer to DC predictor
  374. * @param dir_ptr Prediction direction for use in AC prediction
  375. */
  376. static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  377. int a_avail, int c_avail,
  378. int16_t **dc_val_ptr, int *dir_ptr)
  379. {
  380. int a, b, c, wrap, pred;
  381. int16_t *dc_val;
  382. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  383. int q1, q2 = 0;
  384. int dqscale_index;
  385. /* scale predictors if needed */
  386. q1 = FFABS(s->current_picture.qscale_table[mb_pos]);
  387. dqscale_index = s->y_dc_scale_table[q1] - 1;
  388. if (dqscale_index < 0)
  389. return 0;
  390. wrap = s->block_wrap[n];
  391. dc_val = s->dc_val[0] + s->block_index[n];
  392. /* B A
  393. * C X
  394. */
  395. c = dc_val[ - 1];
  396. b = dc_val[ - 1 - wrap];
  397. a = dc_val[ - wrap];
  398. if (c_avail && (n != 1 && n != 3)) {
  399. q2 = FFABS(s->current_picture.qscale_table[mb_pos - 1]);
  400. if (q2 && q2 != q1)
  401. c = (int)((unsigned)c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  402. }
  403. if (a_avail && (n != 2 && n != 3)) {
  404. q2 = FFABS(s->current_picture.qscale_table[mb_pos - s->mb_stride]);
  405. if (q2 && q2 != q1)
  406. a = (int)((unsigned)a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  407. }
  408. if (a_avail && c_avail && (n != 3)) {
  409. int off = mb_pos;
  410. if (n != 1)
  411. off--;
  412. if (n != 2)
  413. off -= s->mb_stride;
  414. q2 = FFABS(s->current_picture.qscale_table[off]);
  415. if (q2 && q2 != q1)
  416. b = (int)((unsigned)b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  417. }
  418. if (c_avail && (!a_avail || abs(a - b) <= abs(b - c))) {
  419. pred = c;
  420. *dir_ptr = 1; // left
  421. } else if (a_avail) {
  422. pred = a;
  423. *dir_ptr = 0; // top
  424. } else {
  425. pred = 0;
  426. *dir_ptr = 1; // left
  427. }
  428. /* update predictor */
  429. *dc_val_ptr = &dc_val[0];
  430. return pred;
  431. }
  432. /** @} */ // Block group
  433. /**
  434. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  435. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  436. * @{
  437. */
  438. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  439. uint8_t **coded_block_ptr)
  440. {
  441. int xy, wrap, pred, a, b, c;
  442. xy = s->block_index[n];
  443. wrap = s->b8_stride;
  444. /* B C
  445. * A X
  446. */
  447. a = s->coded_block[xy - 1 ];
  448. b = s->coded_block[xy - 1 - wrap];
  449. c = s->coded_block[xy - wrap];
  450. if (b == c) {
  451. pred = a;
  452. } else {
  453. pred = c;
  454. }
  455. /* store value */
  456. *coded_block_ptr = &s->coded_block[xy];
  457. return pred;
  458. }
  459. /**
  460. * Decode one AC coefficient
  461. * @param v The VC1 context
  462. * @param last Last coefficient
  463. * @param skip How much zero coefficients to skip
  464. * @param value Decoded AC coefficient value
  465. * @param codingset set of VLC to decode data
  466. * @see 8.1.3.4
  467. */
  468. static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  469. int *value, int codingset)
  470. {
  471. GetBitContext *gb = &v->s.gb;
  472. int index, run, level, lst, sign;
  473. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  474. if (index < 0)
  475. return index;
  476. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  477. run = vc1_index_decode_table[codingset][index][0];
  478. level = vc1_index_decode_table[codingset][index][1];
  479. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  480. sign = get_bits1(gb);
  481. } else {
  482. int escape = decode210(gb);
  483. if (escape != 2) {
  484. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  485. if (index >= ff_vc1_ac_sizes[codingset] - 1U)
  486. return AVERROR_INVALIDDATA;
  487. run = vc1_index_decode_table[codingset][index][0];
  488. level = vc1_index_decode_table[codingset][index][1];
  489. lst = index >= vc1_last_decode_table[codingset];
  490. if (escape == 0) {
  491. if (lst)
  492. level += vc1_last_delta_level_table[codingset][run];
  493. else
  494. level += vc1_delta_level_table[codingset][run];
  495. } else {
  496. if (lst)
  497. run += vc1_last_delta_run_table[codingset][level] + 1;
  498. else
  499. run += vc1_delta_run_table[codingset][level] + 1;
  500. }
  501. sign = get_bits1(gb);
  502. } else {
  503. lst = get_bits1(gb);
  504. if (v->s.esc3_level_length == 0) {
  505. if (v->pq < 8 || v->dquantfrm) { // table 59
  506. v->s.esc3_level_length = get_bits(gb, 3);
  507. if (!v->s.esc3_level_length)
  508. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  509. } else { // table 60
  510. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  511. }
  512. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  513. }
  514. run = get_bits(gb, v->s.esc3_run_length);
  515. sign = get_bits1(gb);
  516. level = get_bits(gb, v->s.esc3_level_length);
  517. }
  518. }
  519. *last = lst;
  520. *skip = run;
  521. *value = (level ^ -sign) + sign;
  522. return 0;
  523. }
  524. /** Decode intra block in intra frames - should be faster than decode_intra_block
  525. * @param v VC1Context
  526. * @param block block to decode
  527. * @param[in] n subblock index
  528. * @param coded are AC coeffs present or not
  529. * @param codingset set of VLC to decode data
  530. */
  531. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  532. int coded, int codingset)
  533. {
  534. GetBitContext *gb = &v->s.gb;
  535. MpegEncContext *s = &v->s;
  536. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  537. int i;
  538. int16_t *dc_val;
  539. int16_t *ac_val, *ac_val2;
  540. int dcdiff, scale;
  541. /* Get DC differential */
  542. if (n < 4) {
  543. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  544. } else {
  545. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  546. }
  547. if (dcdiff) {
  548. const int m = (v->pq == 1 || v->pq == 2) ? 3 - v->pq : 0;
  549. if (dcdiff == 119 /* ESC index value */) {
  550. dcdiff = get_bits(gb, 8 + m);
  551. } else {
  552. if (m)
  553. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  554. }
  555. if (get_bits1(gb))
  556. dcdiff = -dcdiff;
  557. }
  558. /* Prediction */
  559. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  560. *dc_val = dcdiff;
  561. /* Store the quantized DC coeff, used for prediction */
  562. if (n < 4)
  563. scale = s->y_dc_scale;
  564. else
  565. scale = s->c_dc_scale;
  566. block[0] = dcdiff * scale;
  567. ac_val = s->ac_val[0][s->block_index[n]];
  568. ac_val2 = ac_val;
  569. if (dc_pred_dir) // left
  570. ac_val -= 16;
  571. else // top
  572. ac_val -= 16 * s->block_wrap[n];
  573. scale = v->pq * 2 + v->halfpq;
  574. //AC Decoding
  575. i = !!coded;
  576. if (coded) {
  577. int last = 0, skip, value;
  578. const uint8_t *zz_table;
  579. int k;
  580. if (v->s.ac_pred) {
  581. if (!dc_pred_dir)
  582. zz_table = v->zz_8x8[2];
  583. else
  584. zz_table = v->zz_8x8[3];
  585. } else
  586. zz_table = v->zz_8x8[1];
  587. while (!last) {
  588. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  589. if (ret < 0)
  590. return ret;
  591. i += skip;
  592. if (i > 63)
  593. break;
  594. block[zz_table[i++]] = value;
  595. }
  596. /* apply AC prediction if needed */
  597. if (s->ac_pred) {
  598. int sh;
  599. if (dc_pred_dir) { // left
  600. sh = v->left_blk_sh;
  601. } else { // top
  602. sh = v->top_blk_sh;
  603. ac_val += 8;
  604. }
  605. for (k = 1; k < 8; k++)
  606. block[k << sh] += ac_val[k];
  607. }
  608. /* save AC coeffs for further prediction */
  609. for (k = 1; k < 8; k++) {
  610. ac_val2[k] = block[k << v->left_blk_sh];
  611. ac_val2[k + 8] = block[k << v->top_blk_sh];
  612. }
  613. /* scale AC coeffs */
  614. for (k = 1; k < 64; k++)
  615. if (block[k]) {
  616. block[k] *= scale;
  617. if (!v->pquantizer)
  618. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  619. }
  620. } else {
  621. int k;
  622. memset(ac_val2, 0, 16 * 2);
  623. /* apply AC prediction if needed */
  624. if (s->ac_pred) {
  625. int sh;
  626. if (dc_pred_dir) { //left
  627. sh = v->left_blk_sh;
  628. } else { // top
  629. sh = v->top_blk_sh;
  630. ac_val += 8;
  631. ac_val2 += 8;
  632. }
  633. memcpy(ac_val2, ac_val, 8 * 2);
  634. for (k = 1; k < 8; k++) {
  635. block[k << sh] = ac_val[k] * scale;
  636. if (!v->pquantizer && block[k << sh])
  637. block[k << sh] += (block[k << sh] < 0) ? -v->pq : v->pq;
  638. }
  639. }
  640. }
  641. if (s->ac_pred) i = 63;
  642. s->block_last_index[n] = i;
  643. return 0;
  644. }
  645. /** Decode intra block in intra frames - should be faster than decode_intra_block
  646. * @param v VC1Context
  647. * @param block block to decode
  648. * @param[in] n subblock number
  649. * @param coded are AC coeffs present or not
  650. * @param codingset set of VLC to decode data
  651. * @param mquant quantizer value for this macroblock
  652. */
  653. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  654. int coded, int codingset, int mquant)
  655. {
  656. GetBitContext *gb = &v->s.gb;
  657. MpegEncContext *s = &v->s;
  658. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  659. int i;
  660. int16_t *dc_val = NULL;
  661. int16_t *ac_val, *ac_val2;
  662. int dcdiff;
  663. int a_avail = v->a_avail, c_avail = v->c_avail;
  664. int use_pred = s->ac_pred;
  665. int scale;
  666. int q1, q2 = 0;
  667. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  668. int quant = FFABS(mquant);
  669. /* Get DC differential */
  670. if (n < 4) {
  671. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  672. } else {
  673. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  674. }
  675. if (dcdiff) {
  676. const int m = (quant == 1 || quant == 2) ? 3 - quant : 0;
  677. if (dcdiff == 119 /* ESC index value */) {
  678. dcdiff = get_bits(gb, 8 + m);
  679. } else {
  680. if (m)
  681. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  682. }
  683. if (get_bits1(gb))
  684. dcdiff = -dcdiff;
  685. }
  686. /* Prediction */
  687. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, quant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  688. *dc_val = dcdiff;
  689. /* Store the quantized DC coeff, used for prediction */
  690. if (n < 4)
  691. scale = s->y_dc_scale;
  692. else
  693. scale = s->c_dc_scale;
  694. block[0] = dcdiff * scale;
  695. /* check if AC is needed at all */
  696. if (!a_avail && !c_avail)
  697. use_pred = 0;
  698. scale = quant * 2 + ((mquant < 0) ? 0 : v->halfpq);
  699. ac_val = s->ac_val[0][s->block_index[n]];
  700. ac_val2 = ac_val;
  701. if (dc_pred_dir) // left
  702. ac_val -= 16;
  703. else // top
  704. ac_val -= 16 * s->block_wrap[n];
  705. q1 = s->current_picture.qscale_table[mb_pos];
  706. if (n == 3)
  707. q2 = q1;
  708. else if (dc_pred_dir) {
  709. if (n == 1)
  710. q2 = q1;
  711. else if (c_avail && mb_pos)
  712. q2 = s->current_picture.qscale_table[mb_pos - 1];
  713. } else {
  714. if (n == 2)
  715. q2 = q1;
  716. else if (a_avail && mb_pos >= s->mb_stride)
  717. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  718. }
  719. //AC Decoding
  720. i = 1;
  721. if (coded) {
  722. int last = 0, skip, value;
  723. const uint8_t *zz_table;
  724. int k;
  725. if (v->s.ac_pred) {
  726. if (!use_pred && v->fcm == ILACE_FRAME) {
  727. zz_table = v->zzi_8x8;
  728. } else {
  729. if (!dc_pred_dir) // top
  730. zz_table = v->zz_8x8[2];
  731. else // left
  732. zz_table = v->zz_8x8[3];
  733. }
  734. } else {
  735. if (v->fcm != ILACE_FRAME)
  736. zz_table = v->zz_8x8[1];
  737. else
  738. zz_table = v->zzi_8x8;
  739. }
  740. while (!last) {
  741. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  742. if (ret < 0)
  743. return ret;
  744. i += skip;
  745. if (i > 63)
  746. break;
  747. block[zz_table[i++]] = value;
  748. }
  749. /* apply AC prediction if needed */
  750. if (use_pred) {
  751. int sh;
  752. if (dc_pred_dir) { // left
  753. sh = v->left_blk_sh;
  754. } else { // top
  755. sh = v->top_blk_sh;
  756. ac_val += 8;
  757. }
  758. /* scale predictors if needed*/
  759. q1 = FFABS(q1) * 2 + ((q1 < 0) ? 0 : v->halfpq) - 1;
  760. if (q1 < 1)
  761. return AVERROR_INVALIDDATA;
  762. if (q2)
  763. q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
  764. if (q2 && q1 != q2) {
  765. for (k = 1; k < 8; k++)
  766. block[k << sh] += (int)(ac_val[k] * (unsigned)q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  767. } else {
  768. for (k = 1; k < 8; k++)
  769. block[k << sh] += ac_val[k];
  770. }
  771. }
  772. /* save AC coeffs for further prediction */
  773. for (k = 1; k < 8; k++) {
  774. ac_val2[k ] = block[k << v->left_blk_sh];
  775. ac_val2[k + 8] = block[k << v->top_blk_sh];
  776. }
  777. /* scale AC coeffs */
  778. for (k = 1; k < 64; k++)
  779. if (block[k]) {
  780. block[k] *= scale;
  781. if (!v->pquantizer)
  782. block[k] += (block[k] < 0) ? -quant : quant;
  783. }
  784. } else { // no AC coeffs
  785. int k;
  786. memset(ac_val2, 0, 16 * 2);
  787. /* apply AC prediction if needed */
  788. if (use_pred) {
  789. int sh;
  790. if (dc_pred_dir) { // left
  791. sh = v->left_blk_sh;
  792. } else { // top
  793. sh = v->top_blk_sh;
  794. ac_val += 8;
  795. ac_val2 += 8;
  796. }
  797. memcpy(ac_val2, ac_val, 8 * 2);
  798. q1 = FFABS(q1) * 2 + ((q1 < 0) ? 0 : v->halfpq) - 1;
  799. if (q1 < 1)
  800. return AVERROR_INVALIDDATA;
  801. if (q2)
  802. q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
  803. if (q2 && q1 != q2) {
  804. for (k = 1; k < 8; k++)
  805. ac_val2[k] = (int)(ac_val2[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  806. }
  807. for (k = 1; k < 8; k++) {
  808. block[k << sh] = ac_val2[k] * scale;
  809. if (!v->pquantizer && block[k << sh])
  810. block[k << sh] += (block[k << sh] < 0) ? -quant : quant;
  811. }
  812. }
  813. }
  814. if (use_pred) i = 63;
  815. s->block_last_index[n] = i;
  816. return 0;
  817. }
  818. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  819. * @param v VC1Context
  820. * @param block block to decode
  821. * @param[in] n subblock index
  822. * @param coded are AC coeffs present or not
  823. * @param mquant block quantizer
  824. * @param codingset set of VLC to decode data
  825. */
  826. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  827. int coded, int mquant, int codingset)
  828. {
  829. GetBitContext *gb = &v->s.gb;
  830. MpegEncContext *s = &v->s;
  831. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  832. int i;
  833. int16_t *dc_val = NULL;
  834. int16_t *ac_val, *ac_val2;
  835. int dcdiff;
  836. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  837. int a_avail = v->a_avail, c_avail = v->c_avail;
  838. int use_pred = s->ac_pred;
  839. int scale;
  840. int q1, q2 = 0;
  841. int quant = FFABS(mquant);
  842. s->bdsp.clear_block(block);
  843. /* XXX: Guard against dumb values of mquant */
  844. quant = av_clip_uintp2(quant, 5);
  845. /* Set DC scale - y and c use the same */
  846. s->y_dc_scale = s->y_dc_scale_table[quant];
  847. s->c_dc_scale = s->c_dc_scale_table[quant];
  848. /* Get DC differential */
  849. if (n < 4) {
  850. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  851. } else {
  852. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  853. }
  854. if (dcdiff) {
  855. const int m = (quant == 1 || quant == 2) ? 3 - quant : 0;
  856. if (dcdiff == 119 /* ESC index value */) {
  857. dcdiff = get_bits(gb, 8 + m);
  858. } else {
  859. if (m)
  860. dcdiff = (dcdiff << m) + get_bits(gb, m) - ((1 << m) - 1);
  861. }
  862. if (get_bits1(gb))
  863. dcdiff = -dcdiff;
  864. }
  865. /* Prediction */
  866. dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, quant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  867. *dc_val = dcdiff;
  868. /* Store the quantized DC coeff, used for prediction */
  869. if (n < 4) {
  870. block[0] = dcdiff * s->y_dc_scale;
  871. } else {
  872. block[0] = dcdiff * s->c_dc_scale;
  873. }
  874. //AC Decoding
  875. i = 1;
  876. /* check if AC is needed at all and adjust direction if needed */
  877. if (!a_avail) dc_pred_dir = 1;
  878. if (!c_avail) dc_pred_dir = 0;
  879. if (!a_avail && !c_avail) use_pred = 0;
  880. ac_val = s->ac_val[0][s->block_index[n]];
  881. ac_val2 = ac_val;
  882. scale = quant * 2 + ((mquant < 0) ? 0 : v->halfpq);
  883. if (dc_pred_dir) //left
  884. ac_val -= 16;
  885. else //top
  886. ac_val -= 16 * s->block_wrap[n];
  887. q1 = s->current_picture.qscale_table[mb_pos];
  888. if (dc_pred_dir && c_avail && mb_pos)
  889. q2 = s->current_picture.qscale_table[mb_pos - 1];
  890. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  891. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  892. if (dc_pred_dir && n == 1)
  893. q2 = q1;
  894. if (!dc_pred_dir && n == 2)
  895. q2 = q1;
  896. if (n == 3) q2 = q1;
  897. if (coded) {
  898. int last = 0, skip, value;
  899. int k;
  900. while (!last) {
  901. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  902. if (ret < 0)
  903. return ret;
  904. i += skip;
  905. if (i > 63)
  906. break;
  907. if (v->fcm == PROGRESSIVE)
  908. block[v->zz_8x8[0][i++]] = value;
  909. else {
  910. if (use_pred && (v->fcm == ILACE_FRAME)) {
  911. if (!dc_pred_dir) // top
  912. block[v->zz_8x8[2][i++]] = value;
  913. else // left
  914. block[v->zz_8x8[3][i++]] = value;
  915. } else {
  916. block[v->zzi_8x8[i++]] = value;
  917. }
  918. }
  919. }
  920. /* apply AC prediction if needed */
  921. if (use_pred) {
  922. /* scale predictors if needed*/
  923. q1 = FFABS(q1) * 2 + ((q1 < 0) ? 0 : v->halfpq) - 1;
  924. if (q1 < 1)
  925. return AVERROR_INVALIDDATA;
  926. if (q2)
  927. q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
  928. if (q2 && q1 != q2) {
  929. if (dc_pred_dir) { // left
  930. for (k = 1; k < 8; k++)
  931. block[k << v->left_blk_sh] += (int)(ac_val[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  932. } else { //top
  933. for (k = 1; k < 8; k++)
  934. block[k << v->top_blk_sh] += (int)(ac_val[k + 8] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  935. }
  936. } else {
  937. if (dc_pred_dir) { // left
  938. for (k = 1; k < 8; k++)
  939. block[k << v->left_blk_sh] += ac_val[k];
  940. } else { // top
  941. for (k = 1; k < 8; k++)
  942. block[k << v->top_blk_sh] += ac_val[k + 8];
  943. }
  944. }
  945. }
  946. /* save AC coeffs for further prediction */
  947. for (k = 1; k < 8; k++) {
  948. ac_val2[k ] = block[k << v->left_blk_sh];
  949. ac_val2[k + 8] = block[k << v->top_blk_sh];
  950. }
  951. /* scale AC coeffs */
  952. for (k = 1; k < 64; k++)
  953. if (block[k]) {
  954. block[k] *= scale;
  955. if (!v->pquantizer)
  956. block[k] += (block[k] < 0) ? -quant : quant;
  957. }
  958. if (use_pred) i = 63;
  959. } else { // no AC coeffs
  960. int k;
  961. memset(ac_val2, 0, 16 * 2);
  962. if (dc_pred_dir) { // left
  963. if (use_pred) {
  964. memcpy(ac_val2, ac_val, 8 * 2);
  965. q1 = FFABS(q1) * 2 + ((q1 < 0) ? 0 : v->halfpq) - 1;
  966. if (q1 < 1)
  967. return AVERROR_INVALIDDATA;
  968. if (q2)
  969. q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
  970. if (q2 && q1 != q2) {
  971. for (k = 1; k < 8; k++)
  972. ac_val2[k] = (int)(ac_val2[k] * (unsigned)q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  973. }
  974. }
  975. } else { // top
  976. if (use_pred) {
  977. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  978. q1 = FFABS(q1) * 2 + ((q1 < 0) ? 0 : v->halfpq) - 1;
  979. if (q1 < 1)
  980. return AVERROR_INVALIDDATA;
  981. if (q2)
  982. q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
  983. if (q2 && q1 != q2) {
  984. for (k = 1; k < 8; k++)
  985. ac_val2[k + 8] = (int)(ac_val2[k + 8] * (unsigned)q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  986. }
  987. }
  988. }
  989. /* apply AC prediction if needed */
  990. if (use_pred) {
  991. if (dc_pred_dir) { // left
  992. for (k = 1; k < 8; k++) {
  993. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  994. if (!v->pquantizer && block[k << v->left_blk_sh])
  995. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -quant : quant;
  996. }
  997. } else { // top
  998. for (k = 1; k < 8; k++) {
  999. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  1000. if (!v->pquantizer && block[k << v->top_blk_sh])
  1001. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -quant : quant;
  1002. }
  1003. }
  1004. i = 63;
  1005. }
  1006. }
  1007. s->block_last_index[n] = i;
  1008. return 0;
  1009. }
  1010. /** Decode P block
  1011. */
  1012. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  1013. int mquant, int ttmb, int first_block,
  1014. uint8_t *dst, int linesize, int skip_block,
  1015. int *ttmb_out)
  1016. {
  1017. MpegEncContext *s = &v->s;
  1018. GetBitContext *gb = &s->gb;
  1019. int i, j;
  1020. int subblkpat = 0;
  1021. int scale, off, idx, last, skip, value;
  1022. int ttblk = ttmb & 7;
  1023. int pat = 0;
  1024. int quant = FFABS(mquant);
  1025. s->bdsp.clear_block(block);
  1026. if (ttmb == -1) {
  1027. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  1028. }
  1029. if (ttblk == TT_4X4) {
  1030. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  1031. }
  1032. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  1033. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  1034. || (!v->res_rtm_flag && !first_block))) {
  1035. subblkpat = decode012(gb);
  1036. if (subblkpat)
  1037. subblkpat ^= 3; // swap decoded pattern bits
  1038. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  1039. ttblk = TT_8X4;
  1040. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  1041. ttblk = TT_4X8;
  1042. }
  1043. scale = quant * 2 + ((mquant < 0) ? 0 : v->halfpq);
  1044. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  1045. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  1046. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  1047. ttblk = TT_8X4;
  1048. }
  1049. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  1050. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  1051. ttblk = TT_4X8;
  1052. }
  1053. switch (ttblk) {
  1054. case TT_8X8:
  1055. pat = 0xF;
  1056. i = 0;
  1057. last = 0;
  1058. while (!last) {
  1059. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1060. if (ret < 0)
  1061. return ret;
  1062. i += skip;
  1063. if (i > 63)
  1064. break;
  1065. if (!v->fcm)
  1066. idx = v->zz_8x8[0][i++];
  1067. else
  1068. idx = v->zzi_8x8[i++];
  1069. block[idx] = value * scale;
  1070. if (!v->pquantizer)
  1071. block[idx] += (block[idx] < 0) ? -quant : quant;
  1072. }
  1073. if (!skip_block) {
  1074. if (i == 1)
  1075. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  1076. else {
  1077. v->vc1dsp.vc1_inv_trans_8x8(block);
  1078. s->idsp.add_pixels_clamped(block, dst, linesize);
  1079. }
  1080. }
  1081. break;
  1082. case TT_4X4:
  1083. pat = ~subblkpat & 0xF;
  1084. for (j = 0; j < 4; j++) {
  1085. last = subblkpat & (1 << (3 - j));
  1086. i = 0;
  1087. off = (j & 1) * 4 + (j & 2) * 16;
  1088. while (!last) {
  1089. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1090. if (ret < 0)
  1091. return ret;
  1092. i += skip;
  1093. if (i > 15)
  1094. break;
  1095. if (!v->fcm)
  1096. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  1097. else
  1098. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  1099. block[idx + off] = value * scale;
  1100. if (!v->pquantizer)
  1101. block[idx + off] += (block[idx + off] < 0) ? -quant : quant;
  1102. }
  1103. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  1104. if (i == 1)
  1105. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1106. else
  1107. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  1108. }
  1109. }
  1110. break;
  1111. case TT_8X4:
  1112. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  1113. for (j = 0; j < 2; j++) {
  1114. last = subblkpat & (1 << (1 - j));
  1115. i = 0;
  1116. off = j * 32;
  1117. while (!last) {
  1118. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1119. if (ret < 0)
  1120. return ret;
  1121. i += skip;
  1122. if (i > 31)
  1123. break;
  1124. if (!v->fcm)
  1125. idx = v->zz_8x4[i++] + off;
  1126. else
  1127. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  1128. block[idx] = value * scale;
  1129. if (!v->pquantizer)
  1130. block[idx] += (block[idx] < 0) ? -quant : quant;
  1131. }
  1132. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1133. if (i == 1)
  1134. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  1135. else
  1136. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  1137. }
  1138. }
  1139. break;
  1140. case TT_4X8:
  1141. pat = ~(subblkpat * 5) & 0xF;
  1142. for (j = 0; j < 2; j++) {
  1143. last = subblkpat & (1 << (1 - j));
  1144. i = 0;
  1145. off = j * 4;
  1146. while (!last) {
  1147. int ret = vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  1148. if (ret < 0)
  1149. return ret;
  1150. i += skip;
  1151. if (i > 31)
  1152. break;
  1153. if (!v->fcm)
  1154. idx = v->zz_4x8[i++] + off;
  1155. else
  1156. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  1157. block[idx] = value * scale;
  1158. if (!v->pquantizer)
  1159. block[idx] += (block[idx] < 0) ? -quant : quant;
  1160. }
  1161. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  1162. if (i == 1)
  1163. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  1164. else
  1165. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  1166. }
  1167. }
  1168. break;
  1169. }
  1170. if (ttmb_out)
  1171. *ttmb_out |= ttblk << (n * 4);
  1172. return pat;
  1173. }
  1174. /** @} */ // Macroblock group
  1175. static const uint8_t size_table[6] = { 0, 2, 3, 4, 5, 8 };
  1176. /** Decode one P-frame MB
  1177. */
  1178. static int vc1_decode_p_mb(VC1Context *v)
  1179. {
  1180. MpegEncContext *s = &v->s;
  1181. GetBitContext *gb = &s->gb;
  1182. int i, j;
  1183. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1184. int cbp; /* cbp decoding stuff */
  1185. int mqdiff, mquant; /* MB quantization */
  1186. int ttmb = v->ttfrm; /* MB Transform type */
  1187. int mb_has_coeffs = 1; /* last_flag */
  1188. int dmv_x, dmv_y; /* Differential MV components */
  1189. int index, index1; /* LUT indexes */
  1190. int val, sign; /* temp values */
  1191. int first_block = 1;
  1192. int dst_idx, off;
  1193. int skipped, fourmv;
  1194. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  1195. mquant = v->pq; /* lossy initialization */
  1196. if (v->mv_type_is_raw)
  1197. fourmv = get_bits1(gb);
  1198. else
  1199. fourmv = v->mv_type_mb_plane[mb_pos];
  1200. if (v->skip_is_raw)
  1201. skipped = get_bits1(gb);
  1202. else
  1203. skipped = v->s.mbskip_table[mb_pos];
  1204. if (!fourmv) { /* 1MV mode */
  1205. if (!skipped) {
  1206. GET_MVDATA(dmv_x, dmv_y);
  1207. if (s->mb_intra) {
  1208. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1209. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1210. }
  1211. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  1212. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1213. /* FIXME Set DC val for inter block ? */
  1214. if (s->mb_intra && !mb_has_coeffs) {
  1215. GET_MQUANT();
  1216. s->ac_pred = get_bits1(gb);
  1217. cbp = 0;
  1218. } else if (mb_has_coeffs) {
  1219. if (s->mb_intra)
  1220. s->ac_pred = get_bits1(gb);
  1221. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1222. GET_MQUANT();
  1223. } else {
  1224. mquant = v->pq;
  1225. cbp = 0;
  1226. }
  1227. s->current_picture.qscale_table[mb_pos] = mquant;
  1228. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1229. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  1230. VC1_TTMB_VLC_BITS, 2);
  1231. if (!s->mb_intra) ff_vc1_mc_1mv(v, 0);
  1232. dst_idx = 0;
  1233. for (i = 0; i < 6; i++) {
  1234. s->dc_val[0][s->block_index[i]] = 0;
  1235. dst_idx += i >> 2;
  1236. val = ((cbp >> (5 - i)) & 1);
  1237. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1238. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1239. if (s->mb_intra) {
  1240. /* check if prediction blocks A and C are available */
  1241. v->a_avail = v->c_avail = 0;
  1242. if (i == 2 || i == 3 || !s->first_slice_line)
  1243. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1244. if (i == 1 || i == 3 || s->mb_x)
  1245. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1246. vc1_decode_intra_block(v, v->block[v->cur_blk_idx][block_map[i]], i, val, mquant,
  1247. (i & 4) ? v->codingset2 : v->codingset);
  1248. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1249. continue;
  1250. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[i]]);
  1251. if (v->rangeredfrm)
  1252. for (j = 0; j < 64; j++)
  1253. v->block[v->cur_blk_idx][block_map[i]][j] *= 2;
  1254. block_cbp |= 0xF << (i << 2);
  1255. block_intra |= 1 << i;
  1256. } else if (val) {
  1257. pat = vc1_decode_p_block(v, v->block[v->cur_blk_idx][block_map[i]], i, mquant, ttmb, first_block,
  1258. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  1259. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1260. if (pat < 0)
  1261. return pat;
  1262. block_cbp |= pat << (i << 2);
  1263. if (!v->ttmbf && ttmb < 8)
  1264. ttmb = -1;
  1265. first_block = 0;
  1266. }
  1267. }
  1268. } else { // skipped
  1269. s->mb_intra = 0;
  1270. for (i = 0; i < 6; i++) {
  1271. v->mb_type[0][s->block_index[i]] = 0;
  1272. s->dc_val[0][s->block_index[i]] = 0;
  1273. }
  1274. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1275. s->current_picture.qscale_table[mb_pos] = 0;
  1276. ff_vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1277. ff_vc1_mc_1mv(v, 0);
  1278. }
  1279. } else { // 4MV mode
  1280. if (!skipped /* unskipped MB */) {
  1281. int intra_count = 0, coded_inter = 0;
  1282. int is_intra[6], is_coded[6];
  1283. /* Get CBPCY */
  1284. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1285. for (i = 0; i < 6; i++) {
  1286. val = ((cbp >> (5 - i)) & 1);
  1287. s->dc_val[0][s->block_index[i]] = 0;
  1288. s->mb_intra = 0;
  1289. if (i < 4) {
  1290. dmv_x = dmv_y = 0;
  1291. s->mb_intra = 0;
  1292. mb_has_coeffs = 0;
  1293. if (val) {
  1294. GET_MVDATA(dmv_x, dmv_y);
  1295. }
  1296. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1297. if (!s->mb_intra)
  1298. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1299. intra_count += s->mb_intra;
  1300. is_intra[i] = s->mb_intra;
  1301. is_coded[i] = mb_has_coeffs;
  1302. }
  1303. if (i & 4) {
  1304. is_intra[i] = (intra_count >= 3);
  1305. is_coded[i] = val;
  1306. }
  1307. if (i == 4)
  1308. ff_vc1_mc_4mv_chroma(v, 0);
  1309. v->mb_type[0][s->block_index[i]] = is_intra[i];
  1310. if (!coded_inter)
  1311. coded_inter = !is_intra[i] & is_coded[i];
  1312. }
  1313. // if there are no coded blocks then don't do anything more
  1314. dst_idx = 0;
  1315. if (!intra_count && !coded_inter)
  1316. goto end;
  1317. GET_MQUANT();
  1318. s->current_picture.qscale_table[mb_pos] = mquant;
  1319. /* test if block is intra and has pred */
  1320. {
  1321. int intrapred = 0;
  1322. for (i = 0; i < 6; i++)
  1323. if (is_intra[i]) {
  1324. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  1325. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  1326. intrapred = 1;
  1327. break;
  1328. }
  1329. }
  1330. if (intrapred)
  1331. s->ac_pred = get_bits1(gb);
  1332. else
  1333. s->ac_pred = 0;
  1334. }
  1335. if (!v->ttmbf && coded_inter)
  1336. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1337. for (i = 0; i < 6; i++) {
  1338. dst_idx += i >> 2;
  1339. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1340. s->mb_intra = is_intra[i];
  1341. if (is_intra[i]) {
  1342. /* check if prediction blocks A and C are available */
  1343. v->a_avail = v->c_avail = 0;
  1344. if (i == 2 || i == 3 || !s->first_slice_line)
  1345. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1346. if (i == 1 || i == 3 || s->mb_x)
  1347. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1348. vc1_decode_intra_block(v, v->block[v->cur_blk_idx][block_map[i]], i, is_coded[i], mquant,
  1349. (i & 4) ? v->codingset2 : v->codingset);
  1350. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1351. continue;
  1352. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[i]]);
  1353. if (v->rangeredfrm)
  1354. for (j = 0; j < 64; j++)
  1355. v->block[v->cur_blk_idx][block_map[i]][j] *= 2;
  1356. block_cbp |= 0xF << (i << 2);
  1357. block_intra |= 1 << i;
  1358. } else if (is_coded[i]) {
  1359. pat = vc1_decode_p_block(v, v->block[v->cur_blk_idx][block_map[i]], i, mquant, ttmb,
  1360. first_block, s->dest[dst_idx] + off,
  1361. (i & 4) ? s->uvlinesize : s->linesize,
  1362. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1363. &block_tt);
  1364. if (pat < 0)
  1365. return pat;
  1366. block_cbp |= pat << (i << 2);
  1367. if (!v->ttmbf && ttmb < 8)
  1368. ttmb = -1;
  1369. first_block = 0;
  1370. }
  1371. }
  1372. } else { // skipped MB
  1373. s->mb_intra = 0;
  1374. s->current_picture.qscale_table[mb_pos] = 0;
  1375. for (i = 0; i < 6; i++) {
  1376. v->mb_type[0][s->block_index[i]] = 0;
  1377. s->dc_val[0][s->block_index[i]] = 0;
  1378. }
  1379. for (i = 0; i < 4; i++) {
  1380. ff_vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  1381. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1382. }
  1383. ff_vc1_mc_4mv_chroma(v, 0);
  1384. s->current_picture.qscale_table[mb_pos] = 0;
  1385. }
  1386. }
  1387. end:
  1388. if (v->overlap && v->pq >= 9)
  1389. ff_vc1_p_overlap_filter(v);
  1390. vc1_put_blocks_clamped(v, 1);
  1391. v->cbp[s->mb_x] = block_cbp;
  1392. v->ttblk[s->mb_x] = block_tt;
  1393. v->is_intra[s->mb_x] = block_intra;
  1394. return 0;
  1395. }
  1396. /* Decode one macroblock in an interlaced frame p picture */
  1397. static int vc1_decode_p_mb_intfr(VC1Context *v)
  1398. {
  1399. MpegEncContext *s = &v->s;
  1400. GetBitContext *gb = &s->gb;
  1401. int i;
  1402. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1403. int cbp = 0; /* cbp decoding stuff */
  1404. int mqdiff, mquant; /* MB quantization */
  1405. int ttmb = v->ttfrm; /* MB Transform type */
  1406. int mb_has_coeffs = 1; /* last_flag */
  1407. int dmv_x, dmv_y; /* Differential MV components */
  1408. int val; /* temp value */
  1409. int first_block = 1;
  1410. int dst_idx, off;
  1411. int skipped, fourmv = 0, twomv = 0;
  1412. int block_cbp = 0, pat, block_tt = 0;
  1413. int idx_mbmode = 0, mvbp;
  1414. int fieldtx;
  1415. mquant = v->pq; /* Lossy initialization */
  1416. if (v->skip_is_raw)
  1417. skipped = get_bits1(gb);
  1418. else
  1419. skipped = v->s.mbskip_table[mb_pos];
  1420. if (!skipped) {
  1421. if (v->fourmvswitch)
  1422. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  1423. else
  1424. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  1425. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  1426. /* store the motion vector type in a flag (useful later) */
  1427. case MV_PMODE_INTFR_4MV:
  1428. fourmv = 1;
  1429. v->blk_mv_type[s->block_index[0]] = 0;
  1430. v->blk_mv_type[s->block_index[1]] = 0;
  1431. v->blk_mv_type[s->block_index[2]] = 0;
  1432. v->blk_mv_type[s->block_index[3]] = 0;
  1433. break;
  1434. case MV_PMODE_INTFR_4MV_FIELD:
  1435. fourmv = 1;
  1436. v->blk_mv_type[s->block_index[0]] = 1;
  1437. v->blk_mv_type[s->block_index[1]] = 1;
  1438. v->blk_mv_type[s->block_index[2]] = 1;
  1439. v->blk_mv_type[s->block_index[3]] = 1;
  1440. break;
  1441. case MV_PMODE_INTFR_2MV_FIELD:
  1442. twomv = 1;
  1443. v->blk_mv_type[s->block_index[0]] = 1;
  1444. v->blk_mv_type[s->block_index[1]] = 1;
  1445. v->blk_mv_type[s->block_index[2]] = 1;
  1446. v->blk_mv_type[s->block_index[3]] = 1;
  1447. break;
  1448. case MV_PMODE_INTFR_1MV:
  1449. v->blk_mv_type[s->block_index[0]] = 0;
  1450. v->blk_mv_type[s->block_index[1]] = 0;
  1451. v->blk_mv_type[s->block_index[2]] = 0;
  1452. v->blk_mv_type[s->block_index[3]] = 0;
  1453. break;
  1454. }
  1455. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  1456. for (i = 0; i < 4; i++) {
  1457. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  1458. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  1459. }
  1460. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1461. s->mb_intra = 1;
  1462. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  1463. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  1464. mb_has_coeffs = get_bits1(gb);
  1465. if (mb_has_coeffs)
  1466. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1467. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1468. GET_MQUANT();
  1469. s->current_picture.qscale_table[mb_pos] = mquant;
  1470. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1471. s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
  1472. s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
  1473. dst_idx = 0;
  1474. for (i = 0; i < 6; i++) {
  1475. v->a_avail = v->c_avail = 0;
  1476. v->mb_type[0][s->block_index[i]] = 1;
  1477. s->dc_val[0][s->block_index[i]] = 0;
  1478. dst_idx += i >> 2;
  1479. val = ((cbp >> (5 - i)) & 1);
  1480. if (i == 2 || i == 3 || !s->first_slice_line)
  1481. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1482. if (i == 1 || i == 3 || s->mb_x)
  1483. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1484. vc1_decode_intra_block(v, v->block[v->cur_blk_idx][block_map[i]], i, val, mquant,
  1485. (i & 4) ? v->codingset2 : v->codingset);
  1486. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1487. continue;
  1488. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[i]]);
  1489. if (i < 4)
  1490. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  1491. else
  1492. off = 0;
  1493. block_cbp |= 0xf << (i << 2);
  1494. }
  1495. } else { // inter MB
  1496. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  1497. if (mb_has_coeffs)
  1498. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1499. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  1500. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  1501. } else {
  1502. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  1503. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  1504. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1505. }
  1506. }
  1507. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1508. for (i = 0; i < 6; i++)
  1509. v->mb_type[0][s->block_index[i]] = 0;
  1510. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  1511. /* for all motion vector read MVDATA and motion compensate each block */
  1512. dst_idx = 0;
  1513. if (fourmv) {
  1514. mvbp = v->fourmvbp;
  1515. for (i = 0; i < 4; i++) {
  1516. dmv_x = dmv_y = 0;
  1517. if (mvbp & (8 >> i))
  1518. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1519. ff_vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  1520. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1521. }
  1522. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1523. } else if (twomv) {
  1524. mvbp = v->twomvbp;
  1525. dmv_x = dmv_y = 0;
  1526. if (mvbp & 2) {
  1527. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1528. }
  1529. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1530. ff_vc1_mc_4mv_luma(v, 0, 0, 0);
  1531. ff_vc1_mc_4mv_luma(v, 1, 0, 0);
  1532. dmv_x = dmv_y = 0;
  1533. if (mvbp & 1) {
  1534. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1535. }
  1536. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  1537. ff_vc1_mc_4mv_luma(v, 2, 0, 0);
  1538. ff_vc1_mc_4mv_luma(v, 3, 0, 0);
  1539. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  1540. } else {
  1541. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  1542. dmv_x = dmv_y = 0;
  1543. if (mvbp) {
  1544. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  1545. }
  1546. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1547. ff_vc1_mc_1mv(v, 0);
  1548. }
  1549. if (cbp)
  1550. GET_MQUANT(); // p. 227
  1551. s->current_picture.qscale_table[mb_pos] = mquant;
  1552. if (!v->ttmbf && cbp)
  1553. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1554. for (i = 0; i < 6; i++) {
  1555. s->dc_val[0][s->block_index[i]] = 0;
  1556. dst_idx += i >> 2;
  1557. val = ((cbp >> (5 - i)) & 1);
  1558. if (!fieldtx)
  1559. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1560. else
  1561. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  1562. if (val) {
  1563. pat = vc1_decode_p_block(v, v->block[v->cur_blk_idx][block_map[i]], i, mquant, ttmb,
  1564. first_block, s->dest[dst_idx] + off,
  1565. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  1566. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  1567. if (pat < 0)
  1568. return pat;
  1569. block_cbp |= pat << (i << 2);
  1570. if (!v->ttmbf && ttmb < 8)
  1571. ttmb = -1;
  1572. first_block = 0;
  1573. }
  1574. }
  1575. }
  1576. } else { // skipped
  1577. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1578. for (i = 0; i < 6; i++) {
  1579. v->mb_type[0][s->block_index[i]] = 0;
  1580. s->dc_val[0][s->block_index[i]] = 0;
  1581. }
  1582. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  1583. s->current_picture.qscale_table[mb_pos] = 0;
  1584. v->blk_mv_type[s->block_index[0]] = 0;
  1585. v->blk_mv_type[s->block_index[1]] = 0;
  1586. v->blk_mv_type[s->block_index[2]] = 0;
  1587. v->blk_mv_type[s->block_index[3]] = 0;
  1588. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  1589. ff_vc1_mc_1mv(v, 0);
  1590. v->fieldtx_plane[mb_pos] = 0;
  1591. }
  1592. if (v->overlap && v->pq >= 9)
  1593. ff_vc1_p_overlap_filter(v);
  1594. vc1_put_blocks_clamped(v, 1);
  1595. v->cbp[s->mb_x] = block_cbp;
  1596. v->ttblk[s->mb_x] = block_tt;
  1597. return 0;
  1598. }
  1599. static int vc1_decode_p_mb_intfi(VC1Context *v)
  1600. {
  1601. MpegEncContext *s = &v->s;
  1602. GetBitContext *gb = &s->gb;
  1603. int i;
  1604. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1605. int cbp = 0; /* cbp decoding stuff */
  1606. int mqdiff, mquant; /* MB quantization */
  1607. int ttmb = v->ttfrm; /* MB Transform type */
  1608. int mb_has_coeffs = 1; /* last_flag */
  1609. int dmv_x, dmv_y; /* Differential MV components */
  1610. int val; /* temp values */
  1611. int first_block = 1;
  1612. int dst_idx, off;
  1613. int pred_flag = 0;
  1614. int block_cbp = 0, pat, block_tt = 0;
  1615. int idx_mbmode = 0;
  1616. mquant = v->pq; /* Lossy initialization */
  1617. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1618. if (idx_mbmode <= 1) { // intra MB
  1619. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1620. s->mb_intra = 1;
  1621. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  1622. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  1623. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1624. GET_MQUANT();
  1625. s->current_picture.qscale_table[mb_pos] = mquant;
  1626. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1627. s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
  1628. s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
  1629. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1630. mb_has_coeffs = idx_mbmode & 1;
  1631. if (mb_has_coeffs)
  1632. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1633. dst_idx = 0;
  1634. for (i = 0; i < 6; i++) {
  1635. v->a_avail = v->c_avail = 0;
  1636. v->mb_type[0][s->block_index[i]] = 1;
  1637. s->dc_val[0][s->block_index[i]] = 0;
  1638. dst_idx += i >> 2;
  1639. val = ((cbp >> (5 - i)) & 1);
  1640. if (i == 2 || i == 3 || !s->first_slice_line)
  1641. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1642. if (i == 1 || i == 3 || s->mb_x)
  1643. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1644. vc1_decode_intra_block(v, v->block[v->cur_blk_idx][block_map[i]], i, val, mquant,
  1645. (i & 4) ? v->codingset2 : v->codingset);
  1646. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1647. continue;
  1648. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[i]]);
  1649. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1650. block_cbp |= 0xf << (i << 2);
  1651. }
  1652. } else {
  1653. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1654. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1655. for (i = 0; i < 6; i++)
  1656. v->mb_type[0][s->block_index[i]] = 0;
  1657. if (idx_mbmode <= 5) { // 1-MV
  1658. dmv_x = dmv_y = pred_flag = 0;
  1659. if (idx_mbmode & 1) {
  1660. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1661. }
  1662. ff_vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1663. ff_vc1_mc_1mv(v, 0);
  1664. mb_has_coeffs = !(idx_mbmode & 2);
  1665. } else { // 4-MV
  1666. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1667. for (i = 0; i < 4; i++) {
  1668. dmv_x = dmv_y = pred_flag = 0;
  1669. if (v->fourmvbp & (8 >> i))
  1670. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  1671. ff_vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  1672. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  1673. }
  1674. ff_vc1_mc_4mv_chroma(v, 0);
  1675. mb_has_coeffs = idx_mbmode & 1;
  1676. }
  1677. if (mb_has_coeffs)
  1678. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1679. if (cbp) {
  1680. GET_MQUANT();
  1681. }
  1682. s->current_picture.qscale_table[mb_pos] = mquant;
  1683. if (!v->ttmbf && cbp) {
  1684. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1685. }
  1686. dst_idx = 0;
  1687. for (i = 0; i < 6; i++) {
  1688. s->dc_val[0][s->block_index[i]] = 0;
  1689. dst_idx += i >> 2;
  1690. val = ((cbp >> (5 - i)) & 1);
  1691. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  1692. if (val) {
  1693. pat = vc1_decode_p_block(v, v->block[v->cur_blk_idx][block_map[i]], i, mquant, ttmb,
  1694. first_block, s->dest[dst_idx] + off,
  1695. (i & 4) ? s->uvlinesize : s->linesize,
  1696. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
  1697. &block_tt);
  1698. if (pat < 0)
  1699. return pat;
  1700. block_cbp |= pat << (i << 2);
  1701. if (!v->ttmbf && ttmb < 8)
  1702. ttmb = -1;
  1703. first_block = 0;
  1704. }
  1705. }
  1706. }
  1707. if (v->overlap && v->pq >= 9)
  1708. ff_vc1_p_overlap_filter(v);
  1709. vc1_put_blocks_clamped(v, 1);
  1710. v->cbp[s->mb_x] = block_cbp;
  1711. v->ttblk[s->mb_x] = block_tt;
  1712. return 0;
  1713. }
  1714. /** Decode one B-frame MB (in Main profile)
  1715. */
  1716. static int vc1_decode_b_mb(VC1Context *v)
  1717. {
  1718. MpegEncContext *s = &v->s;
  1719. GetBitContext *gb = &s->gb;
  1720. int i, j;
  1721. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1722. int cbp = 0; /* cbp decoding stuff */
  1723. int mqdiff, mquant; /* MB quantization */
  1724. int ttmb = v->ttfrm; /* MB Transform type */
  1725. int mb_has_coeffs = 0; /* last_flag */
  1726. int index, index1; /* LUT indexes */
  1727. int val, sign; /* temp values */
  1728. int first_block = 1;
  1729. int dst_idx, off;
  1730. int skipped, direct;
  1731. int dmv_x[2], dmv_y[2];
  1732. int bmvtype = BMV_TYPE_BACKWARD;
  1733. mquant = v->pq; /* lossy initialization */
  1734. s->mb_intra = 0;
  1735. if (v->dmb_is_raw)
  1736. direct = get_bits1(gb);
  1737. else
  1738. direct = v->direct_mb_plane[mb_pos];
  1739. if (v->skip_is_raw)
  1740. skipped = get_bits1(gb);
  1741. else
  1742. skipped = v->s.mbskip_table[mb_pos];
  1743. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1744. for (i = 0; i < 6; i++) {
  1745. v->mb_type[0][s->block_index[i]] = 0;
  1746. s->dc_val[0][s->block_index[i]] = 0;
  1747. }
  1748. s->current_picture.qscale_table[mb_pos] = 0;
  1749. if (!direct) {
  1750. if (!skipped) {
  1751. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1752. dmv_x[1] = dmv_x[0];
  1753. dmv_y[1] = dmv_y[0];
  1754. }
  1755. if (skipped || !s->mb_intra) {
  1756. bmvtype = decode012(gb);
  1757. switch (bmvtype) {
  1758. case 0:
  1759. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  1760. break;
  1761. case 1:
  1762. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  1763. break;
  1764. case 2:
  1765. bmvtype = BMV_TYPE_INTERPOLATED;
  1766. dmv_x[0] = dmv_y[0] = 0;
  1767. }
  1768. }
  1769. }
  1770. for (i = 0; i < 6; i++)
  1771. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1772. if (skipped) {
  1773. if (direct)
  1774. bmvtype = BMV_TYPE_INTERPOLATED;
  1775. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1776. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1777. return 0;
  1778. }
  1779. if (direct) {
  1780. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1781. GET_MQUANT();
  1782. s->mb_intra = 0;
  1783. s->current_picture.qscale_table[mb_pos] = mquant;
  1784. if (!v->ttmbf)
  1785. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1786. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  1787. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1788. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1789. } else {
  1790. if (!mb_has_coeffs && !s->mb_intra) {
  1791. /* no coded blocks - effectively skipped */
  1792. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1793. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1794. return 0;
  1795. }
  1796. if (s->mb_intra && !mb_has_coeffs) {
  1797. GET_MQUANT();
  1798. s->current_picture.qscale_table[mb_pos] = mquant;
  1799. s->ac_pred = get_bits1(gb);
  1800. cbp = 0;
  1801. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1802. } else {
  1803. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  1804. GET_MVDATA(dmv_x[0], dmv_y[0]);
  1805. if (!mb_has_coeffs) {
  1806. /* interpolated skipped block */
  1807. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1808. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1809. return 0;
  1810. }
  1811. }
  1812. ff_vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  1813. if (!s->mb_intra) {
  1814. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  1815. }
  1816. if (s->mb_intra)
  1817. s->ac_pred = get_bits1(gb);
  1818. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1819. GET_MQUANT();
  1820. s->current_picture.qscale_table[mb_pos] = mquant;
  1821. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  1822. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  1823. }
  1824. }
  1825. dst_idx = 0;
  1826. for (i = 0; i < 6; i++) {
  1827. s->dc_val[0][s->block_index[i]] = 0;
  1828. dst_idx += i >> 2;
  1829. val = ((cbp >> (5 - i)) & 1);
  1830. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1831. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  1832. if (s->mb_intra) {
  1833. /* check if prediction blocks A and C are available */
  1834. v->a_avail = v->c_avail = 0;
  1835. if (i == 2 || i == 3 || !s->first_slice_line)
  1836. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1837. if (i == 1 || i == 3 || s->mb_x)
  1838. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1839. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1840. (i & 4) ? v->codingset2 : v->codingset);
  1841. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1842. continue;
  1843. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1844. if (v->rangeredfrm)
  1845. for (j = 0; j < 64; j++)
  1846. s->block[i][j] *= 2;
  1847. s->idsp.put_signed_pixels_clamped(s->block[i],
  1848. s->dest[dst_idx] + off,
  1849. i & 4 ? s->uvlinesize
  1850. : s->linesize);
  1851. } else if (val) {
  1852. int pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  1853. first_block, s->dest[dst_idx] + off,
  1854. (i & 4) ? s->uvlinesize : s->linesize,
  1855. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
  1856. if (pat < 0)
  1857. return pat;
  1858. if (!v->ttmbf && ttmb < 8)
  1859. ttmb = -1;
  1860. first_block = 0;
  1861. }
  1862. }
  1863. return 0;
  1864. }
  1865. /** Decode one B-frame MB (in interlaced field B picture)
  1866. */
  1867. static int vc1_decode_b_mb_intfi(VC1Context *v)
  1868. {
  1869. MpegEncContext *s = &v->s;
  1870. GetBitContext *gb = &s->gb;
  1871. int i, j;
  1872. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  1873. int cbp = 0; /* cbp decoding stuff */
  1874. int mqdiff, mquant; /* MB quantization */
  1875. int ttmb = v->ttfrm; /* MB Transform type */
  1876. int mb_has_coeffs = 0; /* last_flag */
  1877. int val; /* temp value */
  1878. int first_block = 1;
  1879. int dst_idx, off;
  1880. int fwd;
  1881. int dmv_x[2], dmv_y[2], pred_flag[2];
  1882. int bmvtype = BMV_TYPE_BACKWARD;
  1883. int block_cbp = 0, pat, block_tt = 0;
  1884. int idx_mbmode;
  1885. mquant = v->pq; /* Lossy initialization */
  1886. s->mb_intra = 0;
  1887. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  1888. if (idx_mbmode <= 1) { // intra MB
  1889. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  1890. s->mb_intra = 1;
  1891. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  1892. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  1893. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  1894. GET_MQUANT();
  1895. s->current_picture.qscale_table[mb_pos] = mquant;
  1896. /* Set DC scale - y and c use the same (not sure if necessary here) */
  1897. s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
  1898. s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
  1899. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  1900. mb_has_coeffs = idx_mbmode & 1;
  1901. if (mb_has_coeffs)
  1902. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  1903. dst_idx = 0;
  1904. for (i = 0; i < 6; i++) {
  1905. v->a_avail = v->c_avail = 0;
  1906. v->mb_type[0][s->block_index[i]] = 1;
  1907. s->dc_val[0][s->block_index[i]] = 0;
  1908. dst_idx += i >> 2;
  1909. val = ((cbp >> (5 - i)) & 1);
  1910. if (i == 2 || i == 3 || !s->first_slice_line)
  1911. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  1912. if (i == 1 || i == 3 || s->mb_x)
  1913. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  1914. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  1915. (i & 4) ? v->codingset2 : v->codingset);
  1916. if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1917. continue;
  1918. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  1919. if (v->rangeredfrm)
  1920. for (j = 0; j < 64; j++)
  1921. s->block[i][j] <<= 1;
  1922. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  1923. s->idsp.put_signed_pixels_clamped(s->block[i],
  1924. s->dest[dst_idx] + off,
  1925. (i & 4) ? s->uvlinesize
  1926. : s->linesize);
  1927. }
  1928. } else {
  1929. s->mb_intra = v->is_intra[s->mb_x] = 0;
  1930. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  1931. for (i = 0; i < 6; i++)
  1932. v->mb_type[0][s->block_index[i]] = 0;
  1933. if (v->fmb_is_raw)
  1934. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  1935. else
  1936. fwd = v->forward_mb_plane[mb_pos];
  1937. if (idx_mbmode <= 5) { // 1-MV
  1938. int interpmvp = 0;
  1939. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  1940. pred_flag[0] = pred_flag[1] = 0;
  1941. if (fwd)
  1942. bmvtype = BMV_TYPE_FORWARD;
  1943. else {
  1944. bmvtype = decode012(gb);
  1945. switch (bmvtype) {
  1946. case 0:
  1947. bmvtype = BMV_TYPE_BACKWARD;
  1948. break;
  1949. case 1:
  1950. bmvtype = BMV_TYPE_DIRECT;
  1951. break;
  1952. case 2:
  1953. bmvtype = BMV_TYPE_INTERPOLATED;
  1954. interpmvp = get_bits1(gb);
  1955. }
  1956. }
  1957. v->bmvtype = bmvtype;
  1958. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  1959. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1960. }
  1961. if (interpmvp) {
  1962. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  1963. }
  1964. if (bmvtype == BMV_TYPE_DIRECT) {
  1965. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1966. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  1967. if (!s->next_picture_ptr->field_picture) {
  1968. av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
  1969. return AVERROR_INVALIDDATA;
  1970. }
  1971. }
  1972. ff_vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  1973. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  1974. mb_has_coeffs = !(idx_mbmode & 2);
  1975. } else { // 4-MV
  1976. if (fwd)
  1977. bmvtype = BMV_TYPE_FORWARD;
  1978. v->bmvtype = bmvtype;
  1979. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  1980. for (i = 0; i < 4; i++) {
  1981. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  1982. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  1983. if (v->fourmvbp & (8 >> i)) {
  1984. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  1985. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  1986. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  1987. }
  1988. ff_vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  1989. ff_vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  1990. }
  1991. ff_vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  1992. mb_has_coeffs = idx_mbmode & 1;
  1993. }
  1994. if (mb_has_coeffs)
  1995. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  1996. if (cbp) {
  1997. GET_MQUANT();
  1998. }
  1999. s->current_picture.qscale_table[mb_pos] = mquant;
  2000. if (!v->ttmbf && cbp) {
  2001. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2002. }
  2003. dst_idx = 0;
  2004. for (i = 0; i < 6; i++) {
  2005. s->dc_val[0][s->block_index[i]] = 0;
  2006. dst_idx += i >> 2;
  2007. val = ((cbp >> (5 - i)) & 1);
  2008. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  2009. if (val) {
  2010. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2011. first_block, s->dest[dst_idx] + off,
  2012. (i & 4) ? s->uvlinesize : s->linesize,
  2013. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  2014. if (pat < 0)
  2015. return pat;
  2016. block_cbp |= pat << (i << 2);
  2017. if (!v->ttmbf && ttmb < 8)
  2018. ttmb = -1;
  2019. first_block = 0;
  2020. }
  2021. }
  2022. }
  2023. v->cbp[s->mb_x] = block_cbp;
  2024. v->ttblk[s->mb_x] = block_tt;
  2025. return 0;
  2026. }
  2027. /** Decode one B-frame MB (in interlaced frame B picture)
  2028. */
  2029. static int vc1_decode_b_mb_intfr(VC1Context *v)
  2030. {
  2031. MpegEncContext *s = &v->s;
  2032. GetBitContext *gb = &s->gb;
  2033. int i, j;
  2034. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2035. int cbp = 0; /* cbp decoding stuff */
  2036. int mqdiff, mquant; /* MB quantization */
  2037. int ttmb = v->ttfrm; /* MB Transform type */
  2038. int mvsw = 0; /* motion vector switch */
  2039. int mb_has_coeffs = 1; /* last_flag */
  2040. int dmv_x, dmv_y; /* Differential MV components */
  2041. int val; /* temp value */
  2042. int first_block = 1;
  2043. int dst_idx, off;
  2044. int skipped, direct, twomv = 0;
  2045. int block_cbp = 0, pat, block_tt = 0;
  2046. int idx_mbmode = 0, mvbp;
  2047. int stride_y, fieldtx;
  2048. int bmvtype = BMV_TYPE_BACKWARD;
  2049. int dir, dir2;
  2050. mquant = v->pq; /* Lossy initialization */
  2051. s->mb_intra = 0;
  2052. if (v->skip_is_raw)
  2053. skipped = get_bits1(gb);
  2054. else
  2055. skipped = v->s.mbskip_table[mb_pos];
  2056. if (!skipped) {
  2057. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  2058. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  2059. twomv = 1;
  2060. v->blk_mv_type[s->block_index[0]] = 1;
  2061. v->blk_mv_type[s->block_index[1]] = 1;
  2062. v->blk_mv_type[s->block_index[2]] = 1;
  2063. v->blk_mv_type[s->block_index[3]] = 1;
  2064. } else {
  2065. v->blk_mv_type[s->block_index[0]] = 0;
  2066. v->blk_mv_type[s->block_index[1]] = 0;
  2067. v->blk_mv_type[s->block_index[2]] = 0;
  2068. v->blk_mv_type[s->block_index[3]] = 0;
  2069. }
  2070. }
  2071. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  2072. for (i = 0; i < 4; i++) {
  2073. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  2074. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  2075. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2076. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2077. }
  2078. v->is_intra[s->mb_x] = 0x3f; // Set the bitfield to all 1.
  2079. s->mb_intra = 1;
  2080. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2081. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  2082. mb_has_coeffs = get_bits1(gb);
  2083. if (mb_has_coeffs)
  2084. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2085. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  2086. GET_MQUANT();
  2087. s->current_picture.qscale_table[mb_pos] = mquant;
  2088. /* Set DC scale - y and c use the same (not sure if necessary here) */
  2089. s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
  2090. s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
  2091. dst_idx = 0;
  2092. for (i = 0; i < 6; i++) {
  2093. v->a_avail = v->c_avail = 0;
  2094. v->mb_type[0][s->block_index[i]] = 1;
  2095. s->dc_val[0][s->block_index[i]] = 0;
  2096. dst_idx += i >> 2;
  2097. val = ((cbp >> (5 - i)) & 1);
  2098. if (i == 2 || i == 3 || !s->first_slice_line)
  2099. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  2100. if (i == 1 || i == 3 || s->mb_x)
  2101. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  2102. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  2103. (i & 4) ? v->codingset2 : v->codingset);
  2104. if (CONFIG_GRAY && i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2105. continue;
  2106. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  2107. if (i < 4) {
  2108. stride_y = s->linesize << fieldtx;
  2109. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  2110. } else {
  2111. stride_y = s->uvlinesize;
  2112. off = 0;
  2113. }
  2114. s->idsp.put_signed_pixels_clamped(s->block[i],
  2115. s->dest[dst_idx] + off,
  2116. stride_y);
  2117. }
  2118. } else {
  2119. s->mb_intra = v->is_intra[s->mb_x] = 0;
  2120. if (v->dmb_is_raw)
  2121. direct = get_bits1(gb);
  2122. else
  2123. direct = v->direct_mb_plane[mb_pos];
  2124. if (direct) {
  2125. if (s->next_picture_ptr->field_picture)
  2126. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  2127. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  2128. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  2129. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  2130. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  2131. if (twomv) {
  2132. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  2133. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  2134. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  2135. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  2136. for (i = 1; i < 4; i += 2) {
  2137. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  2138. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  2139. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  2140. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  2141. }
  2142. } else {
  2143. for (i = 1; i < 4; i++) {
  2144. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  2145. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  2146. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  2147. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  2148. }
  2149. }
  2150. }
  2151. if (!direct) {
  2152. if (skipped || !s->mb_intra) {
  2153. bmvtype = decode012(gb);
  2154. switch (bmvtype) {
  2155. case 0:
  2156. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  2157. break;
  2158. case 1:
  2159. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  2160. break;
  2161. case 2:
  2162. bmvtype = BMV_TYPE_INTERPOLATED;
  2163. }
  2164. }
  2165. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  2166. mvsw = get_bits1(gb);
  2167. }
  2168. if (!skipped) { // inter MB
  2169. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  2170. if (mb_has_coeffs)
  2171. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  2172. if (!direct) {
  2173. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  2174. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  2175. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  2176. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  2177. }
  2178. }
  2179. for (i = 0; i < 6; i++)
  2180. v->mb_type[0][s->block_index[i]] = 0;
  2181. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  2182. /* for all motion vector read MVDATA and motion compensate each block */
  2183. dst_idx = 0;
  2184. if (direct) {
  2185. if (twomv) {
  2186. for (i = 0; i < 4; i++) {
  2187. ff_vc1_mc_4mv_luma(v, i, 0, 0);
  2188. ff_vc1_mc_4mv_luma(v, i, 1, 1);
  2189. }
  2190. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2191. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2192. } else {
  2193. ff_vc1_mc_1mv(v, 0);
  2194. ff_vc1_interp_mc(v);
  2195. }
  2196. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  2197. mvbp = v->fourmvbp;
  2198. for (i = 0; i < 4; i++) {
  2199. dir = i==1 || i==3;
  2200. dmv_x = dmv_y = 0;
  2201. val = ((mvbp >> (3 - i)) & 1);
  2202. if (val)
  2203. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2204. j = i > 1 ? 2 : 0;
  2205. ff_vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2206. ff_vc1_mc_4mv_luma(v, j, dir, dir);
  2207. ff_vc1_mc_4mv_luma(v, j+1, dir, dir);
  2208. }
  2209. ff_vc1_mc_4mv_chroma4(v, 0, 0, 0);
  2210. ff_vc1_mc_4mv_chroma4(v, 1, 1, 1);
  2211. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2212. mvbp = v->twomvbp;
  2213. dmv_x = dmv_y = 0;
  2214. if (mvbp & 2)
  2215. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2216. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2217. ff_vc1_mc_1mv(v, 0);
  2218. dmv_x = dmv_y = 0;
  2219. if (mvbp & 1)
  2220. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2221. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2222. ff_vc1_interp_mc(v);
  2223. } else if (twomv) {
  2224. dir = bmvtype == BMV_TYPE_BACKWARD;
  2225. dir2 = dir;
  2226. if (mvsw)
  2227. dir2 = !dir;
  2228. mvbp = v->twomvbp;
  2229. dmv_x = dmv_y = 0;
  2230. if (mvbp & 2)
  2231. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2232. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  2233. dmv_x = dmv_y = 0;
  2234. if (mvbp & 1)
  2235. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2236. ff_vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  2237. if (mvsw) {
  2238. for (i = 0; i < 2; i++) {
  2239. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2240. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2241. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2242. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2243. }
  2244. } else {
  2245. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2246. ff_vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  2247. }
  2248. ff_vc1_mc_4mv_luma(v, 0, dir, 0);
  2249. ff_vc1_mc_4mv_luma(v, 1, dir, 0);
  2250. ff_vc1_mc_4mv_luma(v, 2, dir2, 0);
  2251. ff_vc1_mc_4mv_luma(v, 3, dir2, 0);
  2252. ff_vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  2253. } else {
  2254. dir = bmvtype == BMV_TYPE_BACKWARD;
  2255. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  2256. dmv_x = dmv_y = 0;
  2257. if (mvbp)
  2258. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  2259. ff_vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2260. v->blk_mv_type[s->block_index[0]] = 1;
  2261. v->blk_mv_type[s->block_index[1]] = 1;
  2262. v->blk_mv_type[s->block_index[2]] = 1;
  2263. v->blk_mv_type[s->block_index[3]] = 1;
  2264. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2265. for (i = 0; i < 2; i++) {
  2266. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2267. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2268. }
  2269. ff_vc1_mc_1mv(v, dir);
  2270. }
  2271. if (cbp)
  2272. GET_MQUANT(); // p. 227
  2273. s->current_picture.qscale_table[mb_pos] = mquant;
  2274. if (!v->ttmbf && cbp)
  2275. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  2276. for (i = 0; i < 6; i++) {
  2277. s->dc_val[0][s->block_index[i]] = 0;
  2278. dst_idx += i >> 2;
  2279. val = ((cbp >> (5 - i)) & 1);
  2280. if (!fieldtx)
  2281. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  2282. else
  2283. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  2284. if (val) {
  2285. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  2286. first_block, s->dest[dst_idx] + off,
  2287. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  2288. CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
  2289. if (pat < 0)
  2290. return pat;
  2291. block_cbp |= pat << (i << 2);
  2292. if (!v->ttmbf && ttmb < 8)
  2293. ttmb = -1;
  2294. first_block = 0;
  2295. }
  2296. }
  2297. } else { // skipped
  2298. dir = 0;
  2299. for (i = 0; i < 6; i++) {
  2300. v->mb_type[0][s->block_index[i]] = 0;
  2301. s->dc_val[0][s->block_index[i]] = 0;
  2302. }
  2303. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  2304. s->current_picture.qscale_table[mb_pos] = 0;
  2305. v->blk_mv_type[s->block_index[0]] = 0;
  2306. v->blk_mv_type[s->block_index[1]] = 0;
  2307. v->blk_mv_type[s->block_index[2]] = 0;
  2308. v->blk_mv_type[s->block_index[3]] = 0;
  2309. if (!direct) {
  2310. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  2311. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  2312. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  2313. } else {
  2314. dir = bmvtype == BMV_TYPE_BACKWARD;
  2315. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  2316. if (mvsw) {
  2317. int dir2 = dir;
  2318. if (mvsw)
  2319. dir2 = !dir;
  2320. for (i = 0; i < 2; i++) {
  2321. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  2322. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  2323. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  2324. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  2325. }
  2326. } else {
  2327. v->blk_mv_type[s->block_index[0]] = 1;
  2328. v->blk_mv_type[s->block_index[1]] = 1;
  2329. v->blk_mv_type[s->block_index[2]] = 1;
  2330. v->blk_mv_type[s->block_index[3]] = 1;
  2331. ff_vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  2332. for (i = 0; i < 2; i++) {
  2333. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  2334. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  2335. }
  2336. }
  2337. }
  2338. }
  2339. ff_vc1_mc_1mv(v, dir);
  2340. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  2341. ff_vc1_interp_mc(v);
  2342. }
  2343. v->fieldtx_plane[mb_pos] = 0;
  2344. }
  2345. }
  2346. v->cbp[s->mb_x] = block_cbp;
  2347. v->ttblk[s->mb_x] = block_tt;
  2348. return 0;
  2349. }
  2350. /** Decode blocks of I-frame
  2351. */
  2352. static void vc1_decode_i_blocks(VC1Context *v)
  2353. {
  2354. int k, j;
  2355. MpegEncContext *s = &v->s;
  2356. int cbp, val;
  2357. uint8_t *coded_val;
  2358. int mb_pos;
  2359. /* select coding mode used for VLC tables selection */
  2360. switch (v->y_ac_table_index) {
  2361. case 0:
  2362. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2363. break;
  2364. case 1:
  2365. v->codingset = CS_HIGH_MOT_INTRA;
  2366. break;
  2367. case 2:
  2368. v->codingset = CS_MID_RATE_INTRA;
  2369. break;
  2370. }
  2371. switch (v->c_ac_table_index) {
  2372. case 0:
  2373. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2374. break;
  2375. case 1:
  2376. v->codingset2 = CS_HIGH_MOT_INTER;
  2377. break;
  2378. case 2:
  2379. v->codingset2 = CS_MID_RATE_INTER;
  2380. break;
  2381. }
  2382. /* Set DC scale - y and c use the same */
  2383. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  2384. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  2385. //do frame decode
  2386. s->mb_x = s->mb_y = 0;
  2387. s->mb_intra = 1;
  2388. s->first_slice_line = 1;
  2389. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2390. s->mb_x = 0;
  2391. init_block_index(v);
  2392. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  2393. ff_update_block_index(s);
  2394. s->bdsp.clear_blocks(v->block[v->cur_blk_idx][0]);
  2395. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  2396. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  2397. s->current_picture.qscale_table[mb_pos] = v->pq;
  2398. for (int i = 0; i < 4; i++) {
  2399. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  2400. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  2401. }
  2402. // do actual MB decoding and displaying
  2403. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2404. v->s.ac_pred = get_bits1(&v->s.gb);
  2405. for (k = 0; k < 6; k++) {
  2406. v->mb_type[0][s->block_index[k]] = 1;
  2407. val = ((cbp >> (5 - k)) & 1);
  2408. if (k < 4) {
  2409. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2410. val = val ^ pred;
  2411. *coded_val = val;
  2412. }
  2413. cbp |= val << (5 - k);
  2414. vc1_decode_i_block(v, v->block[v->cur_blk_idx][block_map[k]], k, val, (k < 4) ? v->codingset : v->codingset2);
  2415. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2416. continue;
  2417. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[k]]);
  2418. }
  2419. if (v->overlap && v->pq >= 9) {
  2420. ff_vc1_i_overlap_filter(v);
  2421. if (v->rangeredfrm)
  2422. for (k = 0; k < 6; k++)
  2423. for (j = 0; j < 64; j++)
  2424. v->block[v->cur_blk_idx][block_map[k]][j] *= 2;
  2425. vc1_put_blocks_clamped(v, 1);
  2426. } else {
  2427. if (v->rangeredfrm)
  2428. for (k = 0; k < 6; k++)
  2429. for (j = 0; j < 64; j++)
  2430. v->block[v->cur_blk_idx][block_map[k]][j] = (v->block[v->cur_blk_idx][block_map[k]][j] - 64) * 2;
  2431. vc1_put_blocks_clamped(v, 0);
  2432. }
  2433. if (v->s.loop_filter)
  2434. ff_vc1_i_loop_filter(v);
  2435. if (get_bits_left(&s->gb) < 0) {
  2436. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  2437. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2438. get_bits_count(&s->gb), s->gb.size_in_bits);
  2439. return;
  2440. }
  2441. v->topleft_blk_idx = (v->topleft_blk_idx + 1) % (v->end_mb_x + 2);
  2442. v->top_blk_idx = (v->top_blk_idx + 1) % (v->end_mb_x + 2);
  2443. v->left_blk_idx = (v->left_blk_idx + 1) % (v->end_mb_x + 2);
  2444. v->cur_blk_idx = (v->cur_blk_idx + 1) % (v->end_mb_x + 2);
  2445. }
  2446. if (!v->s.loop_filter)
  2447. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2448. else if (s->mb_y)
  2449. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2450. s->first_slice_line = 0;
  2451. }
  2452. if (v->s.loop_filter)
  2453. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2454. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  2455. * profile, these only differ are when decoding MSS2 rectangles. */
  2456. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  2457. }
  2458. /** Decode blocks of I-frame for advanced profile
  2459. */
  2460. static int vc1_decode_i_blocks_adv(VC1Context *v)
  2461. {
  2462. int k;
  2463. MpegEncContext *s = &v->s;
  2464. int cbp, val;
  2465. uint8_t *coded_val;
  2466. int mb_pos;
  2467. int mquant;
  2468. int mqdiff;
  2469. GetBitContext *gb = &s->gb;
  2470. if (get_bits_left(gb) <= 1)
  2471. return AVERROR_INVALIDDATA;
  2472. /* select coding mode used for VLC tables selection */
  2473. switch (v->y_ac_table_index) {
  2474. case 0:
  2475. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2476. break;
  2477. case 1:
  2478. v->codingset = CS_HIGH_MOT_INTRA;
  2479. break;
  2480. case 2:
  2481. v->codingset = CS_MID_RATE_INTRA;
  2482. break;
  2483. }
  2484. switch (v->c_ac_table_index) {
  2485. case 0:
  2486. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2487. break;
  2488. case 1:
  2489. v->codingset2 = CS_HIGH_MOT_INTER;
  2490. break;
  2491. case 2:
  2492. v->codingset2 = CS_MID_RATE_INTER;
  2493. break;
  2494. }
  2495. // do frame decode
  2496. s->mb_x = s->mb_y = 0;
  2497. s->mb_intra = 1;
  2498. s->first_slice_line = 1;
  2499. s->mb_y = s->start_mb_y;
  2500. if (s->start_mb_y) {
  2501. s->mb_x = 0;
  2502. init_block_index(v);
  2503. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  2504. (1 + s->b8_stride) * sizeof(*s->coded_block));
  2505. }
  2506. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  2507. s->mb_x = 0;
  2508. init_block_index(v);
  2509. for (;s->mb_x < s->mb_width; s->mb_x++) {
  2510. mquant = v->pq;
  2511. ff_update_block_index(s);
  2512. s->bdsp.clear_blocks(v->block[v->cur_blk_idx][0]);
  2513. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2514. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  2515. for (int i = 0; i < 4; i++) {
  2516. s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = 0;
  2517. s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = 0;
  2518. }
  2519. // do actual MB decoding and displaying
  2520. if (v->fieldtx_is_raw)
  2521. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  2522. if (get_bits_left(&v->s.gb) <= 1) {
  2523. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2524. return 0;
  2525. }
  2526. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  2527. if (v->acpred_is_raw)
  2528. v->s.ac_pred = get_bits1(&v->s.gb);
  2529. else
  2530. v->s.ac_pred = v->acpred_plane[mb_pos];
  2531. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  2532. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  2533. GET_MQUANT();
  2534. s->current_picture.qscale_table[mb_pos] = mquant;
  2535. /* Set DC scale - y and c use the same */
  2536. s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
  2537. s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
  2538. for (k = 0; k < 6; k++) {
  2539. v->mb_type[0][s->block_index[k]] = 1;
  2540. val = ((cbp >> (5 - k)) & 1);
  2541. if (k < 4) {
  2542. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  2543. val = val ^ pred;
  2544. *coded_val = val;
  2545. }
  2546. cbp |= val << (5 - k);
  2547. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  2548. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  2549. vc1_decode_i_block_adv(v, v->block[v->cur_blk_idx][block_map[k]], k, val,
  2550. (k < 4) ? v->codingset : v->codingset2, mquant);
  2551. if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
  2552. continue;
  2553. v->vc1dsp.vc1_inv_trans_8x8(v->block[v->cur_blk_idx][block_map[k]]);
  2554. }
  2555. if (v->overlap && (v->pq >= 9 || v->condover != CONDOVER_NONE))
  2556. ff_vc1_i_overlap_filter(v);
  2557. vc1_put_blocks_clamped(v, 1);
  2558. if (v->s.loop_filter)
  2559. ff_vc1_i_loop_filter(v);
  2560. if (get_bits_left(&s->gb) < 0) {
  2561. // TODO: may need modification to handle slice coding
  2562. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2563. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  2564. get_bits_count(&s->gb), s->gb.size_in_bits);
  2565. return 0;
  2566. }
  2567. inc_blk_idx(v->topleft_blk_idx);
  2568. inc_blk_idx(v->top_blk_idx);
  2569. inc_blk_idx(v->left_blk_idx);
  2570. inc_blk_idx(v->cur_blk_idx);
  2571. }
  2572. if (!v->s.loop_filter)
  2573. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2574. else if (s->mb_y)
  2575. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  2576. s->first_slice_line = 0;
  2577. }
  2578. if (v->s.loop_filter)
  2579. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2580. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2581. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2582. return 0;
  2583. }
  2584. static void vc1_decode_p_blocks(VC1Context *v)
  2585. {
  2586. MpegEncContext *s = &v->s;
  2587. int apply_loop_filter;
  2588. /* select coding mode used for VLC tables selection */
  2589. switch (v->c_ac_table_index) {
  2590. case 0:
  2591. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2592. break;
  2593. case 1:
  2594. v->codingset = CS_HIGH_MOT_INTRA;
  2595. break;
  2596. case 2:
  2597. v->codingset = CS_MID_RATE_INTRA;
  2598. break;
  2599. }
  2600. switch (v->c_ac_table_index) {
  2601. case 0:
  2602. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2603. break;
  2604. case 1:
  2605. v->codingset2 = CS_HIGH_MOT_INTER;
  2606. break;
  2607. case 2:
  2608. v->codingset2 = CS_MID_RATE_INTER;
  2609. break;
  2610. }
  2611. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
  2612. s->first_slice_line = 1;
  2613. memset(v->cbp_base, 0, sizeof(v->cbp_base[0]) * 3 * s->mb_stride);
  2614. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2615. s->mb_x = 0;
  2616. init_block_index(v);
  2617. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2618. ff_update_block_index(s);
  2619. if (v->fcm == ILACE_FIELD || (v->fcm == PROGRESSIVE && v->mv_type_is_raw) || v->skip_is_raw)
  2620. if (get_bits_left(&v->s.gb) <= 1) {
  2621. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2622. return;
  2623. }
  2624. if (v->fcm == ILACE_FIELD) {
  2625. vc1_decode_p_mb_intfi(v);
  2626. if (apply_loop_filter)
  2627. ff_vc1_p_loop_filter(v);
  2628. } else if (v->fcm == ILACE_FRAME) {
  2629. vc1_decode_p_mb_intfr(v);
  2630. if (apply_loop_filter)
  2631. ff_vc1_p_intfr_loop_filter(v);
  2632. } else {
  2633. vc1_decode_p_mb(v);
  2634. if (apply_loop_filter)
  2635. ff_vc1_p_loop_filter(v);
  2636. }
  2637. if (get_bits_left(&s->gb) < 0 || get_bits_count(&s->gb) < 0) {
  2638. // TODO: may need modification to handle slice coding
  2639. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2640. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2641. get_bits_count(&s->gb), s->gb.size_in_bits, s->mb_x, s->mb_y);
  2642. return;
  2643. }
  2644. inc_blk_idx(v->topleft_blk_idx);
  2645. inc_blk_idx(v->top_blk_idx);
  2646. inc_blk_idx(v->left_blk_idx);
  2647. inc_blk_idx(v->cur_blk_idx);
  2648. }
  2649. memmove(v->cbp_base,
  2650. v->cbp - s->mb_stride,
  2651. sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
  2652. memmove(v->ttblk_base,
  2653. v->ttblk - s->mb_stride,
  2654. sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
  2655. memmove(v->is_intra_base,
  2656. v->is_intra - s->mb_stride,
  2657. sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
  2658. memmove(v->luma_mv_base,
  2659. v->luma_mv - s->mb_stride,
  2660. sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
  2661. if (s->mb_y != s->start_mb_y)
  2662. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2663. s->first_slice_line = 0;
  2664. }
  2665. if (s->end_mb_y >= s->start_mb_y)
  2666. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2667. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2668. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2669. }
  2670. static void vc1_decode_b_blocks(VC1Context *v)
  2671. {
  2672. MpegEncContext *s = &v->s;
  2673. /* select coding mode used for VLC tables selection */
  2674. switch (v->c_ac_table_index) {
  2675. case 0:
  2676. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  2677. break;
  2678. case 1:
  2679. v->codingset = CS_HIGH_MOT_INTRA;
  2680. break;
  2681. case 2:
  2682. v->codingset = CS_MID_RATE_INTRA;
  2683. break;
  2684. }
  2685. switch (v->c_ac_table_index) {
  2686. case 0:
  2687. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  2688. break;
  2689. case 1:
  2690. v->codingset2 = CS_HIGH_MOT_INTER;
  2691. break;
  2692. case 2:
  2693. v->codingset2 = CS_MID_RATE_INTER;
  2694. break;
  2695. }
  2696. s->first_slice_line = 1;
  2697. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2698. s->mb_x = 0;
  2699. init_block_index(v);
  2700. for (; s->mb_x < s->mb_width; s->mb_x++) {
  2701. ff_update_block_index(s);
  2702. if (v->fcm == ILACE_FIELD || v->skip_is_raw || v->dmb_is_raw)
  2703. if (get_bits_left(&v->s.gb) <= 1) {
  2704. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2705. return;
  2706. }
  2707. if (v->fcm == ILACE_FIELD) {
  2708. vc1_decode_b_mb_intfi(v);
  2709. if (v->s.loop_filter)
  2710. ff_vc1_b_intfi_loop_filter(v);
  2711. } else if (v->fcm == ILACE_FRAME) {
  2712. vc1_decode_b_mb_intfr(v);
  2713. if (v->s.loop_filter)
  2714. ff_vc1_p_intfr_loop_filter(v);
  2715. } else {
  2716. vc1_decode_b_mb(v);
  2717. if (v->s.loop_filter)
  2718. ff_vc1_i_loop_filter(v);
  2719. }
  2720. if (get_bits_left(&s->gb) < 0 || get_bits_count(&s->gb) < 0) {
  2721. // TODO: may need modification to handle slice coding
  2722. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  2723. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  2724. get_bits_count(&s->gb), s->gb.size_in_bits, s->mb_x, s->mb_y);
  2725. return;
  2726. }
  2727. }
  2728. memmove(v->cbp_base,
  2729. v->cbp - s->mb_stride,
  2730. sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
  2731. memmove(v->ttblk_base,
  2732. v->ttblk - s->mb_stride,
  2733. sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
  2734. memmove(v->is_intra_base,
  2735. v->is_intra - s->mb_stride,
  2736. sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
  2737. if (!v->s.loop_filter)
  2738. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2739. else if (s->mb_y)
  2740. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  2741. s->first_slice_line = 0;
  2742. }
  2743. if (v->s.loop_filter)
  2744. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  2745. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  2746. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  2747. }
  2748. static void vc1_decode_skip_blocks(VC1Context *v)
  2749. {
  2750. MpegEncContext *s = &v->s;
  2751. if (!v->s.last_picture.f->data[0])
  2752. return;
  2753. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  2754. s->first_slice_line = 1;
  2755. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  2756. s->mb_x = 0;
  2757. init_block_index(v);
  2758. ff_update_block_index(s);
  2759. memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  2760. memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2761. memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  2762. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  2763. s->first_slice_line = 0;
  2764. }
  2765. s->pict_type = AV_PICTURE_TYPE_P;
  2766. }
  2767. void ff_vc1_decode_blocks(VC1Context *v)
  2768. {
  2769. v->s.esc3_level_length = 0;
  2770. if (v->x8_type) {
  2771. ff_intrax8_decode_picture(&v->x8, &v->s.current_picture,
  2772. &v->s.gb, &v->s.mb_x, &v->s.mb_y,
  2773. 2 * v->pq + v->halfpq, v->pq * !v->pquantizer,
  2774. v->s.loop_filter, v->s.low_delay);
  2775. ff_er_add_slice(&v->s.er, 0, 0,
  2776. (v->s.mb_x >> 1) - 1, (v->s.mb_y >> 1) - 1,
  2777. ER_MB_END);
  2778. } else {
  2779. v->cur_blk_idx = 0;
  2780. v->left_blk_idx = -1;
  2781. v->topleft_blk_idx = 1;
  2782. v->top_blk_idx = 2;
  2783. switch (v->s.pict_type) {
  2784. case AV_PICTURE_TYPE_I:
  2785. if (v->profile == PROFILE_ADVANCED)
  2786. vc1_decode_i_blocks_adv(v);
  2787. else
  2788. vc1_decode_i_blocks(v);
  2789. break;
  2790. case AV_PICTURE_TYPE_P:
  2791. if (v->p_frame_skipped)
  2792. vc1_decode_skip_blocks(v);
  2793. else
  2794. vc1_decode_p_blocks(v);
  2795. break;
  2796. case AV_PICTURE_TYPE_B:
  2797. if (v->bi_type) {
  2798. if (v->profile == PROFILE_ADVANCED)
  2799. vc1_decode_i_blocks_adv(v);
  2800. else
  2801. vc1_decode_i_blocks(v);
  2802. } else
  2803. vc1_decode_b_blocks(v);
  2804. break;
  2805. }
  2806. }
  2807. }