You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6359 lines
248KB

  1. /*
  2. * VC-1 and WMV3 decoder
  3. * Copyright (c) 2011 Mashiat Sarker Shakkhar
  4. * Copyright (c) 2006-2007 Konstantin Shishkov
  5. * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * VC-1 and WMV3 decoder
  26. */
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "error_resilience.h"
  30. #include "mpegutils.h"
  31. #include "mpegvideo.h"
  32. #include "h263.h"
  33. #include "h264chroma.h"
  34. #include "vc1.h"
  35. #include "vc1data.h"
  36. #include "vc1acdata.h"
  37. #include "msmpeg4data.h"
  38. #include "unary.h"
  39. #include "mathops.h"
  40. #include "vdpau_internal.h"
  41. #include "libavutil/avassert.h"
  42. #undef NDEBUG
  43. #include <assert.h>
  44. #define MB_INTRA_VLC_BITS 9
  45. #define DC_VLC_BITS 9
  46. // offset tables for interlaced picture MVDATA decoding
  47. static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
  48. static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
  49. /***********************************************************************/
  50. /**
  51. * @name VC-1 Bitplane decoding
  52. * @see 8.7, p56
  53. * @{
  54. */
  55. static void init_block_index(VC1Context *v)
  56. {
  57. MpegEncContext *s = &v->s;
  58. ff_init_block_index(s);
  59. if (v->field_mode && !(v->second_field ^ v->tff)) {
  60. s->dest[0] += s->current_picture_ptr->f.linesize[0];
  61. s->dest[1] += s->current_picture_ptr->f.linesize[1];
  62. s->dest[2] += s->current_picture_ptr->f.linesize[2];
  63. }
  64. }
  65. /** @} */ //Bitplane group
  66. static void vc1_put_signed_blocks_clamped(VC1Context *v)
  67. {
  68. MpegEncContext *s = &v->s;
  69. int topleft_mb_pos, top_mb_pos;
  70. int stride_y, fieldtx = 0;
  71. int v_dist;
  72. /* The put pixels loop is always one MB row behind the decoding loop,
  73. * because we can only put pixels when overlap filtering is done, and
  74. * for filtering of the bottom edge of a MB, we need the next MB row
  75. * present as well.
  76. * Within the row, the put pixels loop is also one MB col behind the
  77. * decoding loop. The reason for this is again, because for filtering
  78. * of the right MB edge, we need the next MB present. */
  79. if (!s->first_slice_line) {
  80. if (s->mb_x) {
  81. topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
  82. if (v->fcm == ILACE_FRAME)
  83. fieldtx = v->fieldtx_plane[topleft_mb_pos];
  84. stride_y = s->linesize << fieldtx;
  85. v_dist = (16 - fieldtx) >> (fieldtx == 0);
  86. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
  87. s->dest[0] - 16 * s->linesize - 16,
  88. stride_y);
  89. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
  90. s->dest[0] - 16 * s->linesize - 8,
  91. stride_y);
  92. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
  93. s->dest[0] - v_dist * s->linesize - 16,
  94. stride_y);
  95. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
  96. s->dest[0] - v_dist * s->linesize - 8,
  97. stride_y);
  98. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
  99. s->dest[1] - 8 * s->uvlinesize - 8,
  100. s->uvlinesize);
  101. s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
  102. s->dest[2] - 8 * s->uvlinesize - 8,
  103. s->uvlinesize);
  104. }
  105. if (s->mb_x == s->mb_width - 1) {
  106. top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
  107. if (v->fcm == ILACE_FRAME)
  108. fieldtx = v->fieldtx_plane[top_mb_pos];
  109. stride_y = s->linesize << fieldtx;
  110. v_dist = fieldtx ? 15 : 8;
  111. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
  112. s->dest[0] - 16 * s->linesize,
  113. stride_y);
  114. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
  115. s->dest[0] - 16 * s->linesize + 8,
  116. stride_y);
  117. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
  118. s->dest[0] - v_dist * s->linesize,
  119. stride_y);
  120. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
  121. s->dest[0] - v_dist * s->linesize + 8,
  122. stride_y);
  123. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
  124. s->dest[1] - 8 * s->uvlinesize,
  125. s->uvlinesize);
  126. s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
  127. s->dest[2] - 8 * s->uvlinesize,
  128. s->uvlinesize);
  129. }
  130. }
  131. #define inc_blk_idx(idx) do { \
  132. idx++; \
  133. if (idx >= v->n_allocated_blks) \
  134. idx = 0; \
  135. } while (0)
  136. inc_blk_idx(v->topleft_blk_idx);
  137. inc_blk_idx(v->top_blk_idx);
  138. inc_blk_idx(v->left_blk_idx);
  139. inc_blk_idx(v->cur_blk_idx);
  140. }
  141. static void vc1_loop_filter_iblk(VC1Context *v, int pq)
  142. {
  143. MpegEncContext *s = &v->s;
  144. int j;
  145. if (!s->first_slice_line) {
  146. v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
  147. if (s->mb_x)
  148. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
  149. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
  150. for (j = 0; j < 2; j++) {
  151. v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
  152. if (s->mb_x)
  153. v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
  154. }
  155. }
  156. v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
  157. if (s->mb_y == s->end_mb_y - 1) {
  158. if (s->mb_x) {
  159. v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
  160. v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
  161. v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
  162. }
  163. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
  164. }
  165. }
  166. static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
  167. {
  168. MpegEncContext *s = &v->s;
  169. int j;
  170. /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
  171. * means it runs two rows/cols behind the decoding loop. */
  172. if (!s->first_slice_line) {
  173. if (s->mb_x) {
  174. if (s->mb_y >= s->start_mb_y + 2) {
  175. v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
  176. if (s->mb_x >= 2)
  177. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
  178. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
  179. for (j = 0; j < 2; j++) {
  180. v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
  181. if (s->mb_x >= 2) {
  182. v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
  183. }
  184. }
  185. }
  186. v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
  187. }
  188. if (s->mb_x == s->mb_width - 1) {
  189. if (s->mb_y >= s->start_mb_y + 2) {
  190. v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
  191. if (s->mb_x)
  192. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
  193. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
  194. for (j = 0; j < 2; j++) {
  195. v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
  196. if (s->mb_x >= 2) {
  197. v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
  198. }
  199. }
  200. }
  201. v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
  202. }
  203. if (s->mb_y == s->end_mb_y) {
  204. if (s->mb_x) {
  205. if (s->mb_x >= 2)
  206. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
  207. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
  208. if (s->mb_x >= 2) {
  209. for (j = 0; j < 2; j++) {
  210. v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
  211. }
  212. }
  213. }
  214. if (s->mb_x == s->mb_width - 1) {
  215. if (s->mb_x)
  216. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
  217. v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
  218. if (s->mb_x) {
  219. for (j = 0; j < 2; j++) {
  220. v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
  221. }
  222. }
  223. }
  224. }
  225. }
  226. }
  227. static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
  228. {
  229. MpegEncContext *s = &v->s;
  230. int mb_pos;
  231. if (v->condover == CONDOVER_NONE)
  232. return;
  233. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  234. /* Within a MB, the horizontal overlap always runs before the vertical.
  235. * To accomplish that, we run the H on left and internal borders of the
  236. * currently decoded MB. Then, we wait for the next overlap iteration
  237. * to do H overlap on the right edge of this MB, before moving over and
  238. * running the V overlap. Therefore, the V overlap makes us trail by one
  239. * MB col and the H overlap filter makes us trail by one MB row. This
  240. * is reflected in the time at which we run the put_pixels loop. */
  241. if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
  242. if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
  243. v->over_flags_plane[mb_pos - 1])) {
  244. v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
  245. v->block[v->cur_blk_idx][0]);
  246. v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
  247. v->block[v->cur_blk_idx][2]);
  248. if (!(s->flags & CODEC_FLAG_GRAY)) {
  249. v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
  250. v->block[v->cur_blk_idx][4]);
  251. v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
  252. v->block[v->cur_blk_idx][5]);
  253. }
  254. }
  255. v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
  256. v->block[v->cur_blk_idx][1]);
  257. v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
  258. v->block[v->cur_blk_idx][3]);
  259. if (s->mb_x == s->mb_width - 1) {
  260. if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
  261. v->over_flags_plane[mb_pos - s->mb_stride])) {
  262. v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
  263. v->block[v->cur_blk_idx][0]);
  264. v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
  265. v->block[v->cur_blk_idx][1]);
  266. if (!(s->flags & CODEC_FLAG_GRAY)) {
  267. v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
  268. v->block[v->cur_blk_idx][4]);
  269. v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
  270. v->block[v->cur_blk_idx][5]);
  271. }
  272. }
  273. v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
  274. v->block[v->cur_blk_idx][2]);
  275. v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
  276. v->block[v->cur_blk_idx][3]);
  277. }
  278. }
  279. if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
  280. if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
  281. v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
  282. v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
  283. v->block[v->left_blk_idx][0]);
  284. v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
  285. v->block[v->left_blk_idx][1]);
  286. if (!(s->flags & CODEC_FLAG_GRAY)) {
  287. v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
  288. v->block[v->left_blk_idx][4]);
  289. v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
  290. v->block[v->left_blk_idx][5]);
  291. }
  292. }
  293. v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
  294. v->block[v->left_blk_idx][2]);
  295. v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
  296. v->block[v->left_blk_idx][3]);
  297. }
  298. }
  299. /** Do motion compensation over 1 macroblock
  300. * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
  301. */
  302. static void vc1_mc_1mv(VC1Context *v, int dir)
  303. {
  304. MpegEncContext *s = &v->s;
  305. H264ChromaContext *h264chroma = &v->h264chroma;
  306. uint8_t *srcY, *srcU, *srcV;
  307. int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
  308. int v_edge_pos = s->v_edge_pos >> v->field_mode;
  309. int i;
  310. uint8_t (*luty)[256], (*lutuv)[256];
  311. int use_ic;
  312. if ((!v->field_mode ||
  313. (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
  314. !v->s.last_picture.f.data[0])
  315. return;
  316. mx = s->mv[dir][0][0];
  317. my = s->mv[dir][0][1];
  318. // store motion vectors for further use in B frames
  319. if (s->pict_type == AV_PICTURE_TYPE_P) {
  320. for (i = 0; i < 4; i++) {
  321. s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
  322. s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
  323. }
  324. }
  325. uvmx = (mx + ((mx & 3) == 3)) >> 1;
  326. uvmy = (my + ((my & 3) == 3)) >> 1;
  327. v->luma_mv[s->mb_x][0] = uvmx;
  328. v->luma_mv[s->mb_x][1] = uvmy;
  329. if (v->field_mode &&
  330. v->cur_field_type != v->ref_field_type[dir]) {
  331. my = my - 2 + 4 * v->cur_field_type;
  332. uvmy = uvmy - 2 + 4 * v->cur_field_type;
  333. }
  334. // fastuvmc shall be ignored for interlaced frame picture
  335. if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
  336. uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
  337. uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
  338. }
  339. if (!dir) {
  340. if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
  341. srcY = s->current_picture.f.data[0];
  342. srcU = s->current_picture.f.data[1];
  343. srcV = s->current_picture.f.data[2];
  344. luty = v->curr_luty;
  345. lutuv = v->curr_lutuv;
  346. use_ic = *v->curr_use_ic;
  347. } else {
  348. srcY = s->last_picture.f.data[0];
  349. srcU = s->last_picture.f.data[1];
  350. srcV = s->last_picture.f.data[2];
  351. luty = v->last_luty;
  352. lutuv = v->last_lutuv;
  353. use_ic = v->last_use_ic;
  354. }
  355. } else {
  356. srcY = s->next_picture.f.data[0];
  357. srcU = s->next_picture.f.data[1];
  358. srcV = s->next_picture.f.data[2];
  359. luty = v->next_luty;
  360. lutuv = v->next_lutuv;
  361. use_ic = v->next_use_ic;
  362. }
  363. if (!srcY || !srcU) {
  364. av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
  365. return;
  366. }
  367. src_x = s->mb_x * 16 + (mx >> 2);
  368. src_y = s->mb_y * 16 + (my >> 2);
  369. uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
  370. uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
  371. if (v->profile != PROFILE_ADVANCED) {
  372. src_x = av_clip( src_x, -16, s->mb_width * 16);
  373. src_y = av_clip( src_y, -16, s->mb_height * 16);
  374. uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
  375. uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
  376. } else {
  377. src_x = av_clip( src_x, -17, s->avctx->coded_width);
  378. src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
  379. uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
  380. uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
  381. }
  382. srcY += src_y * s->linesize + src_x;
  383. srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
  384. srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
  385. if (v->field_mode && v->ref_field_type[dir]) {
  386. srcY += s->current_picture_ptr->f.linesize[0];
  387. srcU += s->current_picture_ptr->f.linesize[1];
  388. srcV += s->current_picture_ptr->f.linesize[2];
  389. }
  390. /* for grayscale we should not try to read from unknown area */
  391. if (s->flags & CODEC_FLAG_GRAY) {
  392. srcU = s->edge_emu_buffer + 18 * s->linesize;
  393. srcV = s->edge_emu_buffer + 18 * s->linesize;
  394. }
  395. if (v->rangeredfrm || use_ic
  396. || s->h_edge_pos < 22 || v_edge_pos < 22
  397. || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
  398. || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
  399. uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
  400. srcY -= s->mspel * (1 + s->linesize);
  401. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
  402. s->linesize, s->linesize,
  403. 17 + s->mspel * 2, 17 + s->mspel * 2,
  404. src_x - s->mspel, src_y - s->mspel,
  405. s->h_edge_pos, v_edge_pos);
  406. srcY = s->edge_emu_buffer;
  407. s->vdsp.emulated_edge_mc(uvbuf, srcU,
  408. s->uvlinesize, s->uvlinesize,
  409. 8 + 1, 8 + 1,
  410. uvsrc_x, uvsrc_y,
  411. s->h_edge_pos >> 1, v_edge_pos >> 1);
  412. s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
  413. s->uvlinesize, s->uvlinesize,
  414. 8 + 1, 8 + 1,
  415. uvsrc_x, uvsrc_y,
  416. s->h_edge_pos >> 1, v_edge_pos >> 1);
  417. srcU = uvbuf;
  418. srcV = uvbuf + 16;
  419. /* if we deal with range reduction we need to scale source blocks */
  420. if (v->rangeredfrm) {
  421. int i, j;
  422. uint8_t *src, *src2;
  423. src = srcY;
  424. for (j = 0; j < 17 + s->mspel * 2; j++) {
  425. for (i = 0; i < 17 + s->mspel * 2; i++)
  426. src[i] = ((src[i] - 128) >> 1) + 128;
  427. src += s->linesize;
  428. }
  429. src = srcU;
  430. src2 = srcV;
  431. for (j = 0; j < 9; j++) {
  432. for (i = 0; i < 9; i++) {
  433. src[i] = ((src[i] - 128) >> 1) + 128;
  434. src2[i] = ((src2[i] - 128) >> 1) + 128;
  435. }
  436. src += s->uvlinesize;
  437. src2 += s->uvlinesize;
  438. }
  439. }
  440. /* if we deal with intensity compensation we need to scale source blocks */
  441. if (use_ic) {
  442. int i, j;
  443. uint8_t *src, *src2;
  444. src = srcY;
  445. for (j = 0; j < 17 + s->mspel * 2; j++) {
  446. int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
  447. for (i = 0; i < 17 + s->mspel * 2; i++)
  448. src[i] = luty[f][src[i]];
  449. src += s->linesize;
  450. }
  451. src = srcU;
  452. src2 = srcV;
  453. for (j = 0; j < 9; j++) {
  454. int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
  455. for (i = 0; i < 9; i++) {
  456. src[i] = lutuv[f][src[i]];
  457. src2[i] = lutuv[f][src2[i]];
  458. }
  459. src += s->uvlinesize;
  460. src2 += s->uvlinesize;
  461. }
  462. }
  463. srcY += s->mspel * (1 + s->linesize);
  464. }
  465. if (s->mspel) {
  466. dxy = ((my & 3) << 2) | (mx & 3);
  467. v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
  468. v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
  469. srcY += s->linesize * 8;
  470. v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
  471. v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
  472. } else { // hpel mc - always used for luma
  473. dxy = (my & 2) | ((mx & 2) >> 1);
  474. if (!v->rnd)
  475. s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
  476. else
  477. s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
  478. }
  479. if (s->flags & CODEC_FLAG_GRAY) return;
  480. /* Chroma MC always uses qpel bilinear */
  481. uvmx = (uvmx & 3) << 1;
  482. uvmy = (uvmy & 3) << 1;
  483. if (!v->rnd) {
  484. h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
  485. h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
  486. } else {
  487. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
  488. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
  489. }
  490. }
  491. static inline int median4(int a, int b, int c, int d)
  492. {
  493. if (a < b) {
  494. if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
  495. else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
  496. } else {
  497. if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
  498. else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
  499. }
  500. }
  501. /** Do motion compensation for 4-MV macroblock - luminance block
  502. */
  503. static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
  504. {
  505. MpegEncContext *s = &v->s;
  506. uint8_t *srcY;
  507. int dxy, mx, my, src_x, src_y;
  508. int off;
  509. int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
  510. int v_edge_pos = s->v_edge_pos >> v->field_mode;
  511. uint8_t (*luty)[256];
  512. int use_ic;
  513. if ((!v->field_mode ||
  514. (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
  515. !v->s.last_picture.f.data[0])
  516. return;
  517. mx = s->mv[dir][n][0];
  518. my = s->mv[dir][n][1];
  519. if (!dir) {
  520. if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
  521. srcY = s->current_picture.f.data[0];
  522. luty = v->curr_luty;
  523. use_ic = *v->curr_use_ic;
  524. } else {
  525. srcY = s->last_picture.f.data[0];
  526. luty = v->last_luty;
  527. use_ic = v->last_use_ic;
  528. }
  529. } else {
  530. srcY = s->next_picture.f.data[0];
  531. luty = v->next_luty;
  532. use_ic = v->next_use_ic;
  533. }
  534. if (!srcY) {
  535. av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
  536. return;
  537. }
  538. if (v->field_mode) {
  539. if (v->cur_field_type != v->ref_field_type[dir])
  540. my = my - 2 + 4 * v->cur_field_type;
  541. }
  542. if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
  543. int same_count = 0, opp_count = 0, k;
  544. int chosen_mv[2][4][2], f;
  545. int tx, ty;
  546. for (k = 0; k < 4; k++) {
  547. f = v->mv_f[0][s->block_index[k] + v->blocks_off];
  548. chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
  549. chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
  550. opp_count += f;
  551. same_count += 1 - f;
  552. }
  553. f = opp_count > same_count;
  554. switch (f ? opp_count : same_count) {
  555. case 4:
  556. tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
  557. chosen_mv[f][2][0], chosen_mv[f][3][0]);
  558. ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
  559. chosen_mv[f][2][1], chosen_mv[f][3][1]);
  560. break;
  561. case 3:
  562. tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
  563. ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
  564. break;
  565. case 2:
  566. tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
  567. ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
  568. break;
  569. default:
  570. av_assert0(0);
  571. }
  572. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
  573. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
  574. for (k = 0; k < 4; k++)
  575. v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
  576. }
  577. if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
  578. int qx, qy;
  579. int width = s->avctx->coded_width;
  580. int height = s->avctx->coded_height >> 1;
  581. if (s->pict_type == AV_PICTURE_TYPE_P) {
  582. s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
  583. s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
  584. }
  585. qx = (s->mb_x * 16) + (mx >> 2);
  586. qy = (s->mb_y * 8) + (my >> 3);
  587. if (qx < -17)
  588. mx -= 4 * (qx + 17);
  589. else if (qx > width)
  590. mx -= 4 * (qx - width);
  591. if (qy < -18)
  592. my -= 8 * (qy + 18);
  593. else if (qy > height + 1)
  594. my -= 8 * (qy - height - 1);
  595. }
  596. if ((v->fcm == ILACE_FRAME) && fieldmv)
  597. off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
  598. else
  599. off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
  600. src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
  601. if (!fieldmv)
  602. src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
  603. else
  604. src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
  605. if (v->profile != PROFILE_ADVANCED) {
  606. src_x = av_clip(src_x, -16, s->mb_width * 16);
  607. src_y = av_clip(src_y, -16, s->mb_height * 16);
  608. } else {
  609. src_x = av_clip(src_x, -17, s->avctx->coded_width);
  610. if (v->fcm == ILACE_FRAME) {
  611. if (src_y & 1)
  612. src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
  613. else
  614. src_y = av_clip(src_y, -18, s->avctx->coded_height);
  615. } else {
  616. src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
  617. }
  618. }
  619. srcY += src_y * s->linesize + src_x;
  620. if (v->field_mode && v->ref_field_type[dir])
  621. srcY += s->current_picture_ptr->f.linesize[0];
  622. if (fieldmv && !(src_y & 1))
  623. v_edge_pos--;
  624. if (fieldmv && (src_y & 1) && src_y < 4)
  625. src_y--;
  626. if (v->rangeredfrm || use_ic
  627. || s->h_edge_pos < 13 || v_edge_pos < 23
  628. || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
  629. || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
  630. srcY -= s->mspel * (1 + (s->linesize << fieldmv));
  631. /* check emulate edge stride and offset */
  632. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
  633. s->linesize, s->linesize,
  634. 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
  635. src_x - s->mspel, src_y - (s->mspel << fieldmv),
  636. s->h_edge_pos, v_edge_pos);
  637. srcY = s->edge_emu_buffer;
  638. /* if we deal with range reduction we need to scale source blocks */
  639. if (v->rangeredfrm) {
  640. int i, j;
  641. uint8_t *src;
  642. src = srcY;
  643. for (j = 0; j < 9 + s->mspel * 2; j++) {
  644. for (i = 0; i < 9 + s->mspel * 2; i++)
  645. src[i] = ((src[i] - 128) >> 1) + 128;
  646. src += s->linesize << fieldmv;
  647. }
  648. }
  649. /* if we deal with intensity compensation we need to scale source blocks */
  650. if (use_ic) {
  651. int i, j;
  652. uint8_t *src;
  653. src = srcY;
  654. for (j = 0; j < 9 + s->mspel * 2; j++) {
  655. int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
  656. for (i = 0; i < 9 + s->mspel * 2; i++)
  657. src[i] = luty[f][src[i]];
  658. src += s->linesize << fieldmv;
  659. }
  660. }
  661. srcY += s->mspel * (1 + (s->linesize << fieldmv));
  662. }
  663. if (s->mspel) {
  664. dxy = ((my & 3) << 2) | (mx & 3);
  665. if (avg)
  666. v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
  667. else
  668. v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
  669. } else { // hpel mc - always used for luma
  670. dxy = (my & 2) | ((mx & 2) >> 1);
  671. if (!v->rnd)
  672. s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
  673. else
  674. s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
  675. }
  676. }
  677. static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
  678. {
  679. int idx, i;
  680. static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
  681. idx = ((a[3] != flag) << 3)
  682. | ((a[2] != flag) << 2)
  683. | ((a[1] != flag) << 1)
  684. | (a[0] != flag);
  685. if (!idx) {
  686. *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
  687. *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
  688. return 4;
  689. } else if (count[idx] == 1) {
  690. switch (idx) {
  691. case 0x1:
  692. *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
  693. *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
  694. return 3;
  695. case 0x2:
  696. *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
  697. *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
  698. return 3;
  699. case 0x4:
  700. *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
  701. *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
  702. return 3;
  703. case 0x8:
  704. *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
  705. *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
  706. return 3;
  707. }
  708. } else if (count[idx] == 2) {
  709. int t1 = 0, t2 = 0;
  710. for (i = 0; i < 3; i++)
  711. if (!a[i]) {
  712. t1 = i;
  713. break;
  714. }
  715. for (i = t1 + 1; i < 4; i++)
  716. if (!a[i]) {
  717. t2 = i;
  718. break;
  719. }
  720. *tx = (mvx[t1] + mvx[t2]) / 2;
  721. *ty = (mvy[t1] + mvy[t2]) / 2;
  722. return 2;
  723. } else {
  724. return 0;
  725. }
  726. return -1;
  727. }
  728. /** Do motion compensation for 4-MV macroblock - both chroma blocks
  729. */
  730. static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
  731. {
  732. MpegEncContext *s = &v->s;
  733. H264ChromaContext *h264chroma = &v->h264chroma;
  734. uint8_t *srcU, *srcV;
  735. int uvmx, uvmy, uvsrc_x, uvsrc_y;
  736. int k, tx = 0, ty = 0;
  737. int mvx[4], mvy[4], intra[4], mv_f[4];
  738. int valid_count;
  739. int chroma_ref_type = v->cur_field_type;
  740. int v_edge_pos = s->v_edge_pos >> v->field_mode;
  741. uint8_t (*lutuv)[256];
  742. int use_ic;
  743. if (!v->field_mode && !v->s.last_picture.f.data[0])
  744. return;
  745. if (s->flags & CODEC_FLAG_GRAY)
  746. return;
  747. for (k = 0; k < 4; k++) {
  748. mvx[k] = s->mv[dir][k][0];
  749. mvy[k] = s->mv[dir][k][1];
  750. intra[k] = v->mb_type[0][s->block_index[k]];
  751. if (v->field_mode)
  752. mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
  753. }
  754. /* calculate chroma MV vector from four luma MVs */
  755. if (!v->field_mode || (v->field_mode && !v->numref)) {
  756. valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
  757. chroma_ref_type = v->reffield;
  758. if (!valid_count) {
  759. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  760. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  761. v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
  762. return; //no need to do MC for intra blocks
  763. }
  764. } else {
  765. int dominant = 0;
  766. if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
  767. dominant = 1;
  768. valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
  769. if (dominant)
  770. chroma_ref_type = !v->cur_field_type;
  771. }
  772. if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
  773. return;
  774. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
  775. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
  776. uvmx = (tx + ((tx & 3) == 3)) >> 1;
  777. uvmy = (ty + ((ty & 3) == 3)) >> 1;
  778. v->luma_mv[s->mb_x][0] = uvmx;
  779. v->luma_mv[s->mb_x][1] = uvmy;
  780. if (v->fastuvmc) {
  781. uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
  782. uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
  783. }
  784. // Field conversion bias
  785. if (v->cur_field_type != chroma_ref_type)
  786. uvmy += 2 - 4 * chroma_ref_type;
  787. uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
  788. uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
  789. if (v->profile != PROFILE_ADVANCED) {
  790. uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
  791. uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
  792. } else {
  793. uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
  794. uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
  795. }
  796. if (!dir) {
  797. if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
  798. srcU = s->current_picture.f.data[1];
  799. srcV = s->current_picture.f.data[2];
  800. lutuv = v->curr_lutuv;
  801. use_ic = *v->curr_use_ic;
  802. } else {
  803. srcU = s->last_picture.f.data[1];
  804. srcV = s->last_picture.f.data[2];
  805. lutuv = v->last_lutuv;
  806. use_ic = v->last_use_ic;
  807. }
  808. } else {
  809. srcU = s->next_picture.f.data[1];
  810. srcV = s->next_picture.f.data[2];
  811. lutuv = v->next_lutuv;
  812. use_ic = v->next_use_ic;
  813. }
  814. if (!srcU) {
  815. av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
  816. return;
  817. }
  818. srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
  819. srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
  820. if (v->field_mode) {
  821. if (chroma_ref_type) {
  822. srcU += s->current_picture_ptr->f.linesize[1];
  823. srcV += s->current_picture_ptr->f.linesize[2];
  824. }
  825. }
  826. if (v->rangeredfrm || use_ic
  827. || s->h_edge_pos < 18 || v_edge_pos < 18
  828. || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
  829. || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
  830. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
  831. s->uvlinesize, s->uvlinesize,
  832. 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
  833. s->h_edge_pos >> 1, v_edge_pos >> 1);
  834. s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
  835. s->uvlinesize, s->uvlinesize,
  836. 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
  837. s->h_edge_pos >> 1, v_edge_pos >> 1);
  838. srcU = s->edge_emu_buffer;
  839. srcV = s->edge_emu_buffer + 16;
  840. /* if we deal with range reduction we need to scale source blocks */
  841. if (v->rangeredfrm) {
  842. int i, j;
  843. uint8_t *src, *src2;
  844. src = srcU;
  845. src2 = srcV;
  846. for (j = 0; j < 9; j++) {
  847. for (i = 0; i < 9; i++) {
  848. src[i] = ((src[i] - 128) >> 1) + 128;
  849. src2[i] = ((src2[i] - 128) >> 1) + 128;
  850. }
  851. src += s->uvlinesize;
  852. src2 += s->uvlinesize;
  853. }
  854. }
  855. /* if we deal with intensity compensation we need to scale source blocks */
  856. if (use_ic) {
  857. int i, j;
  858. uint8_t *src, *src2;
  859. src = srcU;
  860. src2 = srcV;
  861. for (j = 0; j < 9; j++) {
  862. int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
  863. for (i = 0; i < 9; i++) {
  864. src[i] = lutuv[f][src[i]];
  865. src2[i] = lutuv[f][src2[i]];
  866. }
  867. src += s->uvlinesize;
  868. src2 += s->uvlinesize;
  869. }
  870. }
  871. }
  872. /* Chroma MC always uses qpel bilinear */
  873. uvmx = (uvmx & 3) << 1;
  874. uvmy = (uvmy & 3) << 1;
  875. if (!v->rnd) {
  876. h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
  877. h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
  878. } else {
  879. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
  880. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
  881. }
  882. }
  883. /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
  884. */
  885. static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
  886. {
  887. MpegEncContext *s = &v->s;
  888. H264ChromaContext *h264chroma = &v->h264chroma;
  889. uint8_t *srcU, *srcV;
  890. int uvsrc_x, uvsrc_y;
  891. int uvmx_field[4], uvmy_field[4];
  892. int i, off, tx, ty;
  893. int fieldmv = v->blk_mv_type[s->block_index[0]];
  894. static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
  895. int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
  896. int v_edge_pos = s->v_edge_pos >> 1;
  897. int use_ic;
  898. uint8_t (*lutuv)[256];
  899. if (s->flags & CODEC_FLAG_GRAY)
  900. return;
  901. for (i = 0; i < 4; i++) {
  902. int d = i < 2 ? dir: dir2;
  903. tx = s->mv[d][i][0];
  904. uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
  905. ty = s->mv[d][i][1];
  906. if (fieldmv)
  907. uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
  908. else
  909. uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
  910. }
  911. for (i = 0; i < 4; i++) {
  912. off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
  913. uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
  914. uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
  915. // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
  916. uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
  917. uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
  918. if (i < 2 ? dir : dir2) {
  919. srcU = s->next_picture.f.data[1];
  920. srcV = s->next_picture.f.data[2];
  921. lutuv = v->next_lutuv;
  922. use_ic = v->next_use_ic;
  923. } else {
  924. srcU = s->last_picture.f.data[1];
  925. srcV = s->last_picture.f.data[2];
  926. lutuv = v->last_lutuv;
  927. use_ic = v->last_use_ic;
  928. }
  929. if (!srcU)
  930. return;
  931. srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
  932. srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
  933. uvmx_field[i] = (uvmx_field[i] & 3) << 1;
  934. uvmy_field[i] = (uvmy_field[i] & 3) << 1;
  935. if (fieldmv && !(uvsrc_y & 1))
  936. v_edge_pos = (s->v_edge_pos >> 1) - 1;
  937. if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
  938. uvsrc_y--;
  939. if (use_ic
  940. || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
  941. || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
  942. || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
  943. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
  944. s->uvlinesize, s->uvlinesize,
  945. 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
  946. s->h_edge_pos >> 1, v_edge_pos);
  947. s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
  948. s->uvlinesize, s->uvlinesize,
  949. 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
  950. s->h_edge_pos >> 1, v_edge_pos);
  951. srcU = s->edge_emu_buffer;
  952. srcV = s->edge_emu_buffer + 16;
  953. /* if we deal with intensity compensation we need to scale source blocks */
  954. if (use_ic) {
  955. int i, j;
  956. uint8_t *src, *src2;
  957. src = srcU;
  958. src2 = srcV;
  959. for (j = 0; j < 5; j++) {
  960. int f = (uvsrc_y + (j << fieldmv)) & 1;
  961. for (i = 0; i < 5; i++) {
  962. src[i] = lutuv[f][src[i]];
  963. src2[i] = lutuv[f][src2[i]];
  964. }
  965. src += s->uvlinesize << fieldmv;
  966. src2 += s->uvlinesize << fieldmv;
  967. }
  968. }
  969. }
  970. if (avg) {
  971. if (!v->rnd) {
  972. h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  973. h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  974. } else {
  975. v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  976. v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  977. }
  978. } else {
  979. if (!v->rnd) {
  980. h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  981. h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  982. } else {
  983. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  984. v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
  985. }
  986. }
  987. }
  988. }
  989. /***********************************************************************/
  990. /**
  991. * @name VC-1 Block-level functions
  992. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  993. * @{
  994. */
  995. /**
  996. * @def GET_MQUANT
  997. * @brief Get macroblock-level quantizer scale
  998. */
  999. #define GET_MQUANT() \
  1000. if (v->dquantfrm) { \
  1001. int edges = 0; \
  1002. if (v->dqprofile == DQPROFILE_ALL_MBS) { \
  1003. if (v->dqbilevel) { \
  1004. mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
  1005. } else { \
  1006. mqdiff = get_bits(gb, 3); \
  1007. if (mqdiff != 7) \
  1008. mquant = v->pq + mqdiff; \
  1009. else \
  1010. mquant = get_bits(gb, 5); \
  1011. } \
  1012. } \
  1013. if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
  1014. edges = 1 << v->dqsbedge; \
  1015. else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
  1016. edges = (3 << v->dqsbedge) % 15; \
  1017. else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
  1018. edges = 15; \
  1019. if ((edges&1) && !s->mb_x) \
  1020. mquant = v->altpq; \
  1021. if ((edges&2) && s->first_slice_line) \
  1022. mquant = v->altpq; \
  1023. if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
  1024. mquant = v->altpq; \
  1025. if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
  1026. mquant = v->altpq; \
  1027. if (!mquant || mquant > 31) { \
  1028. av_log(v->s.avctx, AV_LOG_ERROR, \
  1029. "Overriding invalid mquant %d\n", mquant); \
  1030. mquant = 1; \
  1031. } \
  1032. }
  1033. /**
  1034. * @def GET_MVDATA(_dmv_x, _dmv_y)
  1035. * @brief Get MV differentials
  1036. * @see MVDATA decoding from 8.3.5.2, p(1)20
  1037. * @param _dmv_x Horizontal differential for decoded MV
  1038. * @param _dmv_y Vertical differential for decoded MV
  1039. */
  1040. #define GET_MVDATA(_dmv_x, _dmv_y) \
  1041. index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
  1042. VC1_MV_DIFF_VLC_BITS, 2); \
  1043. if (index > 36) { \
  1044. mb_has_coeffs = 1; \
  1045. index -= 37; \
  1046. } else \
  1047. mb_has_coeffs = 0; \
  1048. s->mb_intra = 0; \
  1049. if (!index) { \
  1050. _dmv_x = _dmv_y = 0; \
  1051. } else if (index == 35) { \
  1052. _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
  1053. _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
  1054. } else if (index == 36) { \
  1055. _dmv_x = 0; \
  1056. _dmv_y = 0; \
  1057. s->mb_intra = 1; \
  1058. } else { \
  1059. index1 = index % 6; \
  1060. if (!s->quarter_sample && index1 == 5) val = 1; \
  1061. else val = 0; \
  1062. if (size_table[index1] - val > 0) \
  1063. val = get_bits(gb, size_table[index1] - val); \
  1064. else val = 0; \
  1065. sign = 0 - (val&1); \
  1066. _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
  1067. \
  1068. index1 = index / 6; \
  1069. if (!s->quarter_sample && index1 == 5) val = 1; \
  1070. else val = 0; \
  1071. if (size_table[index1] - val > 0) \
  1072. val = get_bits(gb, size_table[index1] - val); \
  1073. else val = 0; \
  1074. sign = 0 - (val & 1); \
  1075. _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
  1076. }
  1077. static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
  1078. int *dmv_y, int *pred_flag)
  1079. {
  1080. int index, index1;
  1081. int extend_x = 0, extend_y = 0;
  1082. GetBitContext *gb = &v->s.gb;
  1083. int bits, esc;
  1084. int val, sign;
  1085. const int* offs_tab;
  1086. if (v->numref) {
  1087. bits = VC1_2REF_MVDATA_VLC_BITS;
  1088. esc = 125;
  1089. } else {
  1090. bits = VC1_1REF_MVDATA_VLC_BITS;
  1091. esc = 71;
  1092. }
  1093. switch (v->dmvrange) {
  1094. case 1:
  1095. extend_x = 1;
  1096. break;
  1097. case 2:
  1098. extend_y = 1;
  1099. break;
  1100. case 3:
  1101. extend_x = extend_y = 1;
  1102. break;
  1103. }
  1104. index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
  1105. if (index == esc) {
  1106. *dmv_x = get_bits(gb, v->k_x);
  1107. *dmv_y = get_bits(gb, v->k_y);
  1108. if (v->numref) {
  1109. if (pred_flag) {
  1110. *pred_flag = *dmv_y & 1;
  1111. *dmv_y = (*dmv_y + *pred_flag) >> 1;
  1112. } else {
  1113. *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
  1114. }
  1115. }
  1116. }
  1117. else {
  1118. av_assert0(index < esc);
  1119. if (extend_x)
  1120. offs_tab = offset_table2;
  1121. else
  1122. offs_tab = offset_table1;
  1123. index1 = (index + 1) % 9;
  1124. if (index1 != 0) {
  1125. val = get_bits(gb, index1 + extend_x);
  1126. sign = 0 -(val & 1);
  1127. *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
  1128. } else
  1129. *dmv_x = 0;
  1130. if (extend_y)
  1131. offs_tab = offset_table2;
  1132. else
  1133. offs_tab = offset_table1;
  1134. index1 = (index + 1) / 9;
  1135. if (index1 > v->numref) {
  1136. val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
  1137. sign = 0 - (val & 1);
  1138. *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
  1139. } else
  1140. *dmv_y = 0;
  1141. if (v->numref && pred_flag)
  1142. *pred_flag = index1 & 1;
  1143. }
  1144. }
  1145. static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
  1146. {
  1147. int scaledvalue, refdist;
  1148. int scalesame1, scalesame2;
  1149. int scalezone1_x, zone1offset_x;
  1150. int table_index = dir ^ v->second_field;
  1151. if (v->s.pict_type != AV_PICTURE_TYPE_B)
  1152. refdist = v->refdist;
  1153. else
  1154. refdist = dir ? v->brfd : v->frfd;
  1155. if (refdist > 3)
  1156. refdist = 3;
  1157. scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
  1158. scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
  1159. scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
  1160. zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
  1161. if (FFABS(n) > 255)
  1162. scaledvalue = n;
  1163. else {
  1164. if (FFABS(n) < scalezone1_x)
  1165. scaledvalue = (n * scalesame1) >> 8;
  1166. else {
  1167. if (n < 0)
  1168. scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
  1169. else
  1170. scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
  1171. }
  1172. }
  1173. return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
  1174. }
  1175. static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
  1176. {
  1177. int scaledvalue, refdist;
  1178. int scalesame1, scalesame2;
  1179. int scalezone1_y, zone1offset_y;
  1180. int table_index = dir ^ v->second_field;
  1181. if (v->s.pict_type != AV_PICTURE_TYPE_B)
  1182. refdist = v->refdist;
  1183. else
  1184. refdist = dir ? v->brfd : v->frfd;
  1185. if (refdist > 3)
  1186. refdist = 3;
  1187. scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
  1188. scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
  1189. scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
  1190. zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
  1191. if (FFABS(n) > 63)
  1192. scaledvalue = n;
  1193. else {
  1194. if (FFABS(n) < scalezone1_y)
  1195. scaledvalue = (n * scalesame1) >> 8;
  1196. else {
  1197. if (n < 0)
  1198. scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
  1199. else
  1200. scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
  1201. }
  1202. }
  1203. if (v->cur_field_type && !v->ref_field_type[dir])
  1204. return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
  1205. else
  1206. return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
  1207. }
  1208. static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
  1209. {
  1210. int scalezone1_x, zone1offset_x;
  1211. int scaleopp1, scaleopp2, brfd;
  1212. int scaledvalue;
  1213. brfd = FFMIN(v->brfd, 3);
  1214. scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
  1215. zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
  1216. scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
  1217. scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
  1218. if (FFABS(n) > 255)
  1219. scaledvalue = n;
  1220. else {
  1221. if (FFABS(n) < scalezone1_x)
  1222. scaledvalue = (n * scaleopp1) >> 8;
  1223. else {
  1224. if (n < 0)
  1225. scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
  1226. else
  1227. scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
  1228. }
  1229. }
  1230. return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
  1231. }
  1232. static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
  1233. {
  1234. int scalezone1_y, zone1offset_y;
  1235. int scaleopp1, scaleopp2, brfd;
  1236. int scaledvalue;
  1237. brfd = FFMIN(v->brfd, 3);
  1238. scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
  1239. zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
  1240. scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
  1241. scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
  1242. if (FFABS(n) > 63)
  1243. scaledvalue = n;
  1244. else {
  1245. if (FFABS(n) < scalezone1_y)
  1246. scaledvalue = (n * scaleopp1) >> 8;
  1247. else {
  1248. if (n < 0)
  1249. scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
  1250. else
  1251. scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
  1252. }
  1253. }
  1254. if (v->cur_field_type && !v->ref_field_type[dir]) {
  1255. return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
  1256. } else {
  1257. return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
  1258. }
  1259. }
  1260. static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
  1261. int dim, int dir)
  1262. {
  1263. int brfd, scalesame;
  1264. int hpel = 1 - v->s.quarter_sample;
  1265. n >>= hpel;
  1266. if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
  1267. if (dim)
  1268. n = scaleforsame_y(v, i, n, dir) << hpel;
  1269. else
  1270. n = scaleforsame_x(v, n, dir) << hpel;
  1271. return n;
  1272. }
  1273. brfd = FFMIN(v->brfd, 3);
  1274. scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
  1275. n = (n * scalesame >> 8) << hpel;
  1276. return n;
  1277. }
  1278. static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
  1279. int dim, int dir)
  1280. {
  1281. int refdist, scaleopp;
  1282. int hpel = 1 - v->s.quarter_sample;
  1283. n >>= hpel;
  1284. if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
  1285. if (dim)
  1286. n = scaleforopp_y(v, n, dir) << hpel;
  1287. else
  1288. n = scaleforopp_x(v, n) << hpel;
  1289. return n;
  1290. }
  1291. if (v->s.pict_type != AV_PICTURE_TYPE_B)
  1292. refdist = FFMIN(v->refdist, 3);
  1293. else
  1294. refdist = dir ? v->brfd : v->frfd;
  1295. scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
  1296. n = (n * scaleopp >> 8) << hpel;
  1297. return n;
  1298. }
  1299. /** Predict and set motion vector
  1300. */
  1301. static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
  1302. int mv1, int r_x, int r_y, uint8_t* is_intra,
  1303. int pred_flag, int dir)
  1304. {
  1305. MpegEncContext *s = &v->s;
  1306. int xy, wrap, off = 0;
  1307. int16_t *A, *B, *C;
  1308. int px, py;
  1309. int sum;
  1310. int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
  1311. int opposite, a_f, b_f, c_f;
  1312. int16_t field_predA[2];
  1313. int16_t field_predB[2];
  1314. int16_t field_predC[2];
  1315. int a_valid, b_valid, c_valid;
  1316. int hybridmv_thresh, y_bias = 0;
  1317. if (v->mv_mode == MV_PMODE_MIXED_MV ||
  1318. ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
  1319. mixedmv_pic = 1;
  1320. else
  1321. mixedmv_pic = 0;
  1322. /* scale MV difference to be quad-pel */
  1323. dmv_x <<= 1 - s->quarter_sample;
  1324. dmv_y <<= 1 - s->quarter_sample;
  1325. wrap = s->b8_stride;
  1326. xy = s->block_index[n];
  1327. if (s->mb_intra) {
  1328. s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
  1329. s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
  1330. s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
  1331. s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
  1332. if (mv1) { /* duplicate motion data for 1-MV block */
  1333. s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
  1334. s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
  1335. s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
  1336. s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
  1337. s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
  1338. s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
  1339. v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
  1340. s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
  1341. s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
  1342. s->current_picture.motion_val[1][xy + wrap][0] = 0;
  1343. s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
  1344. s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
  1345. s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
  1346. }
  1347. return;
  1348. }
  1349. C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
  1350. A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
  1351. if (mv1) {
  1352. if (v->field_mode && mixedmv_pic)
  1353. off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
  1354. else
  1355. off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
  1356. } else {
  1357. //in 4-MV mode different blocks have different B predictor position
  1358. switch (n) {
  1359. case 0:
  1360. off = (s->mb_x > 0) ? -1 : 1;
  1361. break;
  1362. case 1:
  1363. off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
  1364. break;
  1365. case 2:
  1366. off = 1;
  1367. break;
  1368. case 3:
  1369. off = -1;
  1370. }
  1371. }
  1372. B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
  1373. a_valid = !s->first_slice_line || (n == 2 || n == 3);
  1374. b_valid = a_valid && (s->mb_width > 1);
  1375. c_valid = s->mb_x || (n == 1 || n == 3);
  1376. if (v->field_mode) {
  1377. a_valid = a_valid && !is_intra[xy - wrap];
  1378. b_valid = b_valid && !is_intra[xy - wrap + off];
  1379. c_valid = c_valid && !is_intra[xy - 1];
  1380. }
  1381. if (a_valid) {
  1382. a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
  1383. num_oppfield += a_f;
  1384. num_samefield += 1 - a_f;
  1385. field_predA[0] = A[0];
  1386. field_predA[1] = A[1];
  1387. } else {
  1388. field_predA[0] = field_predA[1] = 0;
  1389. a_f = 0;
  1390. }
  1391. if (b_valid) {
  1392. b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
  1393. num_oppfield += b_f;
  1394. num_samefield += 1 - b_f;
  1395. field_predB[0] = B[0];
  1396. field_predB[1] = B[1];
  1397. } else {
  1398. field_predB[0] = field_predB[1] = 0;
  1399. b_f = 0;
  1400. }
  1401. if (c_valid) {
  1402. c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
  1403. num_oppfield += c_f;
  1404. num_samefield += 1 - c_f;
  1405. field_predC[0] = C[0];
  1406. field_predC[1] = C[1];
  1407. } else {
  1408. field_predC[0] = field_predC[1] = 0;
  1409. c_f = 0;
  1410. }
  1411. if (v->field_mode) {
  1412. if (!v->numref)
  1413. // REFFIELD determines if the last field or the second-last field is
  1414. // to be used as reference
  1415. opposite = 1 - v->reffield;
  1416. else {
  1417. if (num_samefield <= num_oppfield)
  1418. opposite = 1 - pred_flag;
  1419. else
  1420. opposite = pred_flag;
  1421. }
  1422. } else
  1423. opposite = 0;
  1424. if (opposite) {
  1425. if (a_valid && !a_f) {
  1426. field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
  1427. field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
  1428. }
  1429. if (b_valid && !b_f) {
  1430. field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
  1431. field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
  1432. }
  1433. if (c_valid && !c_f) {
  1434. field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
  1435. field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
  1436. }
  1437. v->mv_f[dir][xy + v->blocks_off] = 1;
  1438. v->ref_field_type[dir] = !v->cur_field_type;
  1439. } else {
  1440. if (a_valid && a_f) {
  1441. field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
  1442. field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
  1443. }
  1444. if (b_valid && b_f) {
  1445. field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
  1446. field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
  1447. }
  1448. if (c_valid && c_f) {
  1449. field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
  1450. field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
  1451. }
  1452. v->mv_f[dir][xy + v->blocks_off] = 0;
  1453. v->ref_field_type[dir] = v->cur_field_type;
  1454. }
  1455. if (a_valid) {
  1456. px = field_predA[0];
  1457. py = field_predA[1];
  1458. } else if (c_valid) {
  1459. px = field_predC[0];
  1460. py = field_predC[1];
  1461. } else if (b_valid) {
  1462. px = field_predB[0];
  1463. py = field_predB[1];
  1464. } else {
  1465. px = 0;
  1466. py = 0;
  1467. }
  1468. if (num_samefield + num_oppfield > 1) {
  1469. px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
  1470. py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
  1471. }
  1472. /* Pullback MV as specified in 8.3.5.3.4 */
  1473. if (!v->field_mode) {
  1474. int qx, qy, X, Y;
  1475. qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
  1476. qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
  1477. X = (s->mb_width << 6) - 4;
  1478. Y = (s->mb_height << 6) - 4;
  1479. if (mv1) {
  1480. if (qx + px < -60) px = -60 - qx;
  1481. if (qy + py < -60) py = -60 - qy;
  1482. } else {
  1483. if (qx + px < -28) px = -28 - qx;
  1484. if (qy + py < -28) py = -28 - qy;
  1485. }
  1486. if (qx + px > X) px = X - qx;
  1487. if (qy + py > Y) py = Y - qy;
  1488. }
  1489. if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
  1490. /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
  1491. hybridmv_thresh = 32;
  1492. if (a_valid && c_valid) {
  1493. if (is_intra[xy - wrap])
  1494. sum = FFABS(px) + FFABS(py);
  1495. else
  1496. sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
  1497. if (sum > hybridmv_thresh) {
  1498. if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
  1499. px = field_predA[0];
  1500. py = field_predA[1];
  1501. } else {
  1502. px = field_predC[0];
  1503. py = field_predC[1];
  1504. }
  1505. } else {
  1506. if (is_intra[xy - 1])
  1507. sum = FFABS(px) + FFABS(py);
  1508. else
  1509. sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
  1510. if (sum > hybridmv_thresh) {
  1511. if (get_bits1(&s->gb)) {
  1512. px = field_predA[0];
  1513. py = field_predA[1];
  1514. } else {
  1515. px = field_predC[0];
  1516. py = field_predC[1];
  1517. }
  1518. }
  1519. }
  1520. }
  1521. }
  1522. if (v->field_mode && v->numref)
  1523. r_y >>= 1;
  1524. if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
  1525. y_bias = 1;
  1526. /* store MV using signed modulus of MV range defined in 4.11 */
  1527. s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
  1528. s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
  1529. if (mv1) { /* duplicate motion data for 1-MV block */
  1530. s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
  1531. s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
  1532. s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
  1533. s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
  1534. s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
  1535. s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
  1536. v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
  1537. v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
  1538. }
  1539. }
  1540. /** Predict and set motion vector for interlaced frame picture MBs
  1541. */
  1542. static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
  1543. int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
  1544. {
  1545. MpegEncContext *s = &v->s;
  1546. int xy, wrap, off = 0;
  1547. int A[2], B[2], C[2];
  1548. int px = 0, py = 0;
  1549. int a_valid = 0, b_valid = 0, c_valid = 0;
  1550. int field_a, field_b, field_c; // 0: same, 1: opposit
  1551. int total_valid, num_samefield, num_oppfield;
  1552. int pos_c, pos_b, n_adj;
  1553. wrap = s->b8_stride;
  1554. xy = s->block_index[n];
  1555. if (s->mb_intra) {
  1556. s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
  1557. s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
  1558. s->current_picture.motion_val[1][xy][0] = 0;
  1559. s->current_picture.motion_val[1][xy][1] = 0;
  1560. if (mvn == 1) { /* duplicate motion data for 1-MV block */
  1561. s->current_picture.motion_val[0][xy + 1][0] = 0;
  1562. s->current_picture.motion_val[0][xy + 1][1] = 0;
  1563. s->current_picture.motion_val[0][xy + wrap][0] = 0;
  1564. s->current_picture.motion_val[0][xy + wrap][1] = 0;
  1565. s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
  1566. s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
  1567. v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
  1568. s->current_picture.motion_val[1][xy + 1][0] = 0;
  1569. s->current_picture.motion_val[1][xy + 1][1] = 0;
  1570. s->current_picture.motion_val[1][xy + wrap][0] = 0;
  1571. s->current_picture.motion_val[1][xy + wrap][1] = 0;
  1572. s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
  1573. s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
  1574. }
  1575. return;
  1576. }
  1577. off = ((n == 0) || (n == 1)) ? 1 : -1;
  1578. /* predict A */
  1579. if (s->mb_x || (n == 1) || (n == 3)) {
  1580. if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
  1581. || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
  1582. A[0] = s->current_picture.motion_val[dir][xy - 1][0];
  1583. A[1] = s->current_picture.motion_val[dir][xy - 1][1];
  1584. a_valid = 1;
  1585. } else { // current block has frame mv and cand. has field MV (so average)
  1586. A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
  1587. + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
  1588. A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
  1589. + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
  1590. a_valid = 1;
  1591. }
  1592. if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
  1593. a_valid = 0;
  1594. A[0] = A[1] = 0;
  1595. }
  1596. } else
  1597. A[0] = A[1] = 0;
  1598. /* Predict B and C */
  1599. B[0] = B[1] = C[0] = C[1] = 0;
  1600. if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
  1601. if (!s->first_slice_line) {
  1602. if (!v->is_intra[s->mb_x - s->mb_stride]) {
  1603. b_valid = 1;
  1604. n_adj = n | 2;
  1605. pos_b = s->block_index[n_adj] - 2 * wrap;
  1606. if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
  1607. n_adj = (n & 2) | (n & 1);
  1608. }
  1609. B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
  1610. B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
  1611. if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
  1612. B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
  1613. B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
  1614. }
  1615. }
  1616. if (s->mb_width > 1) {
  1617. if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
  1618. c_valid = 1;
  1619. n_adj = 2;
  1620. pos_c = s->block_index[2] - 2 * wrap + 2;
  1621. if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
  1622. n_adj = n & 2;
  1623. }
  1624. C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
  1625. C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
  1626. if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
  1627. C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
  1628. C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
  1629. }
  1630. if (s->mb_x == s->mb_width - 1) {
  1631. if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
  1632. c_valid = 1;
  1633. n_adj = 3;
  1634. pos_c = s->block_index[3] - 2 * wrap - 2;
  1635. if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
  1636. n_adj = n | 1;
  1637. }
  1638. C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
  1639. C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
  1640. if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
  1641. C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
  1642. C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
  1643. }
  1644. } else
  1645. c_valid = 0;
  1646. }
  1647. }
  1648. }
  1649. }
  1650. } else {
  1651. pos_b = s->block_index[1];
  1652. b_valid = 1;
  1653. B[0] = s->current_picture.motion_val[dir][pos_b][0];
  1654. B[1] = s->current_picture.motion_val[dir][pos_b][1];
  1655. pos_c = s->block_index[0];
  1656. c_valid = 1;
  1657. C[0] = s->current_picture.motion_val[dir][pos_c][0];
  1658. C[1] = s->current_picture.motion_val[dir][pos_c][1];
  1659. }
  1660. total_valid = a_valid + b_valid + c_valid;
  1661. // check if predictor A is out of bounds
  1662. if (!s->mb_x && !(n == 1 || n == 3)) {
  1663. A[0] = A[1] = 0;
  1664. }
  1665. // check if predictor B is out of bounds
  1666. if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
  1667. B[0] = B[1] = C[0] = C[1] = 0;
  1668. }
  1669. if (!v->blk_mv_type[xy]) {
  1670. if (s->mb_width == 1) {
  1671. px = B[0];
  1672. py = B[1];
  1673. } else {
  1674. if (total_valid >= 2) {
  1675. px = mid_pred(A[0], B[0], C[0]);
  1676. py = mid_pred(A[1], B[1], C[1]);
  1677. } else if (total_valid) {
  1678. if (a_valid) { px = A[0]; py = A[1]; }
  1679. else if (b_valid) { px = B[0]; py = B[1]; }
  1680. else { px = C[0]; py = C[1]; }
  1681. }
  1682. }
  1683. } else {
  1684. if (a_valid)
  1685. field_a = (A[1] & 4) ? 1 : 0;
  1686. else
  1687. field_a = 0;
  1688. if (b_valid)
  1689. field_b = (B[1] & 4) ? 1 : 0;
  1690. else
  1691. field_b = 0;
  1692. if (c_valid)
  1693. field_c = (C[1] & 4) ? 1 : 0;
  1694. else
  1695. field_c = 0;
  1696. num_oppfield = field_a + field_b + field_c;
  1697. num_samefield = total_valid - num_oppfield;
  1698. if (total_valid == 3) {
  1699. if ((num_samefield == 3) || (num_oppfield == 3)) {
  1700. px = mid_pred(A[0], B[0], C[0]);
  1701. py = mid_pred(A[1], B[1], C[1]);
  1702. } else if (num_samefield >= num_oppfield) {
  1703. /* take one MV from same field set depending on priority
  1704. the check for B may not be necessary */
  1705. px = !field_a ? A[0] : B[0];
  1706. py = !field_a ? A[1] : B[1];
  1707. } else {
  1708. px = field_a ? A[0] : B[0];
  1709. py = field_a ? A[1] : B[1];
  1710. }
  1711. } else if (total_valid == 2) {
  1712. if (num_samefield >= num_oppfield) {
  1713. if (!field_a && a_valid) {
  1714. px = A[0];
  1715. py = A[1];
  1716. } else if (!field_b && b_valid) {
  1717. px = B[0];
  1718. py = B[1];
  1719. } else /*if (c_valid)*/ {
  1720. av_assert1(c_valid);
  1721. px = C[0];
  1722. py = C[1];
  1723. } /*else px = py = 0;*/
  1724. } else {
  1725. if (field_a && a_valid) {
  1726. px = A[0];
  1727. py = A[1];
  1728. } else /*if (field_b && b_valid)*/ {
  1729. av_assert1(field_b && b_valid);
  1730. px = B[0];
  1731. py = B[1];
  1732. } /*else if (c_valid) {
  1733. px = C[0];
  1734. py = C[1];
  1735. }*/
  1736. }
  1737. } else if (total_valid == 1) {
  1738. px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
  1739. py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
  1740. }
  1741. }
  1742. /* store MV using signed modulus of MV range defined in 4.11 */
  1743. s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
  1744. s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
  1745. if (mvn == 1) { /* duplicate motion data for 1-MV block */
  1746. s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
  1747. s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
  1748. s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
  1749. s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
  1750. s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
  1751. s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
  1752. } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
  1753. s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
  1754. s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
  1755. s->mv[dir][n + 1][0] = s->mv[dir][n][0];
  1756. s->mv[dir][n + 1][1] = s->mv[dir][n][1];
  1757. }
  1758. }
  1759. /** Motion compensation for direct or interpolated blocks in B-frames
  1760. */
  1761. static void vc1_interp_mc(VC1Context *v)
  1762. {
  1763. MpegEncContext *s = &v->s;
  1764. H264ChromaContext *h264chroma = &v->h264chroma;
  1765. uint8_t *srcY, *srcU, *srcV;
  1766. int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
  1767. int off, off_uv;
  1768. int v_edge_pos = s->v_edge_pos >> v->field_mode;
  1769. int use_ic = v->next_use_ic;
  1770. if (!v->field_mode && !v->s.next_picture.f.data[0])
  1771. return;
  1772. mx = s->mv[1][0][0];
  1773. my = s->mv[1][0][1];
  1774. uvmx = (mx + ((mx & 3) == 3)) >> 1;
  1775. uvmy = (my + ((my & 3) == 3)) >> 1;
  1776. if (v->field_mode) {
  1777. if (v->cur_field_type != v->ref_field_type[1])
  1778. my = my - 2 + 4 * v->cur_field_type;
  1779. uvmy = uvmy - 2 + 4 * v->cur_field_type;
  1780. }
  1781. if (v->fastuvmc) {
  1782. uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
  1783. uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
  1784. }
  1785. srcY = s->next_picture.f.data[0];
  1786. srcU = s->next_picture.f.data[1];
  1787. srcV = s->next_picture.f.data[2];
  1788. src_x = s->mb_x * 16 + (mx >> 2);
  1789. src_y = s->mb_y * 16 + (my >> 2);
  1790. uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
  1791. uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
  1792. if (v->profile != PROFILE_ADVANCED) {
  1793. src_x = av_clip( src_x, -16, s->mb_width * 16);
  1794. src_y = av_clip( src_y, -16, s->mb_height * 16);
  1795. uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
  1796. uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
  1797. } else {
  1798. src_x = av_clip( src_x, -17, s->avctx->coded_width);
  1799. src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
  1800. uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
  1801. uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
  1802. }
  1803. srcY += src_y * s->linesize + src_x;
  1804. srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
  1805. srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
  1806. if (v->field_mode && v->ref_field_type[1]) {
  1807. srcY += s->current_picture_ptr->f.linesize[0];
  1808. srcU += s->current_picture_ptr->f.linesize[1];
  1809. srcV += s->current_picture_ptr->f.linesize[2];
  1810. }
  1811. /* for grayscale we should not try to read from unknown area */
  1812. if (s->flags & CODEC_FLAG_GRAY) {
  1813. srcU = s->edge_emu_buffer + 18 * s->linesize;
  1814. srcV = s->edge_emu_buffer + 18 * s->linesize;
  1815. }
  1816. if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
  1817. || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
  1818. || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
  1819. uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
  1820. srcY -= s->mspel * (1 + s->linesize);
  1821. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
  1822. s->linesize, s->linesize,
  1823. 17 + s->mspel * 2, 17 + s->mspel * 2,
  1824. src_x - s->mspel, src_y - s->mspel,
  1825. s->h_edge_pos, v_edge_pos);
  1826. srcY = s->edge_emu_buffer;
  1827. s->vdsp.emulated_edge_mc(uvbuf, srcU,
  1828. s->uvlinesize, s->uvlinesize,
  1829. 8 + 1, 8 + 1,
  1830. uvsrc_x, uvsrc_y,
  1831. s->h_edge_pos >> 1, v_edge_pos >> 1);
  1832. s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
  1833. s->uvlinesize, s->uvlinesize,
  1834. 8 + 1, 8 + 1,
  1835. uvsrc_x, uvsrc_y,
  1836. s->h_edge_pos >> 1, v_edge_pos >> 1);
  1837. srcU = uvbuf;
  1838. srcV = uvbuf + 16;
  1839. /* if we deal with range reduction we need to scale source blocks */
  1840. if (v->rangeredfrm) {
  1841. int i, j;
  1842. uint8_t *src, *src2;
  1843. src = srcY;
  1844. for (j = 0; j < 17 + s->mspel * 2; j++) {
  1845. for (i = 0; i < 17 + s->mspel * 2; i++)
  1846. src[i] = ((src[i] - 128) >> 1) + 128;
  1847. src += s->linesize;
  1848. }
  1849. src = srcU;
  1850. src2 = srcV;
  1851. for (j = 0; j < 9; j++) {
  1852. for (i = 0; i < 9; i++) {
  1853. src[i] = ((src[i] - 128) >> 1) + 128;
  1854. src2[i] = ((src2[i] - 128) >> 1) + 128;
  1855. }
  1856. src += s->uvlinesize;
  1857. src2 += s->uvlinesize;
  1858. }
  1859. }
  1860. if (use_ic) {
  1861. uint8_t (*luty )[256] = v->next_luty;
  1862. uint8_t (*lutuv)[256] = v->next_lutuv;
  1863. int i, j;
  1864. uint8_t *src, *src2;
  1865. src = srcY;
  1866. for (j = 0; j < 17 + s->mspel * 2; j++) {
  1867. int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
  1868. for (i = 0; i < 17 + s->mspel * 2; i++)
  1869. src[i] = luty[f][src[i]];
  1870. src += s->linesize;
  1871. }
  1872. src = srcU;
  1873. src2 = srcV;
  1874. for (j = 0; j < 9; j++) {
  1875. int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
  1876. for (i = 0; i < 9; i++) {
  1877. src[i] = lutuv[f][src[i]];
  1878. src2[i] = lutuv[f][src2[i]];
  1879. }
  1880. src += s->uvlinesize;
  1881. src2 += s->uvlinesize;
  1882. }
  1883. }
  1884. srcY += s->mspel * (1 + s->linesize);
  1885. }
  1886. off = 0;
  1887. off_uv = 0;
  1888. if (s->mspel) {
  1889. dxy = ((my & 3) << 2) | (mx & 3);
  1890. v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
  1891. v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
  1892. srcY += s->linesize * 8;
  1893. v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
  1894. v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
  1895. } else { // hpel mc
  1896. dxy = (my & 2) | ((mx & 2) >> 1);
  1897. if (!v->rnd)
  1898. s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
  1899. else
  1900. s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
  1901. }
  1902. if (s->flags & CODEC_FLAG_GRAY) return;
  1903. /* Chroma MC always uses qpel blilinear */
  1904. uvmx = (uvmx & 3) << 1;
  1905. uvmy = (uvmy & 3) << 1;
  1906. if (!v->rnd) {
  1907. h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
  1908. h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
  1909. } else {
  1910. v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
  1911. v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
  1912. }
  1913. }
  1914. static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
  1915. {
  1916. int n = bfrac;
  1917. #if B_FRACTION_DEN==256
  1918. if (inv)
  1919. n -= 256;
  1920. if (!qs)
  1921. return 2 * ((value * n + 255) >> 9);
  1922. return (value * n + 128) >> 8;
  1923. #else
  1924. if (inv)
  1925. n -= B_FRACTION_DEN;
  1926. if (!qs)
  1927. return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
  1928. return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
  1929. #endif
  1930. }
  1931. /** Reconstruct motion vector for B-frame and do motion compensation
  1932. */
  1933. static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
  1934. int direct, int mode)
  1935. {
  1936. if (direct) {
  1937. vc1_mc_1mv(v, 0);
  1938. vc1_interp_mc(v);
  1939. return;
  1940. }
  1941. if (mode == BMV_TYPE_INTERPOLATED) {
  1942. vc1_mc_1mv(v, 0);
  1943. vc1_interp_mc(v);
  1944. return;
  1945. }
  1946. vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
  1947. }
  1948. static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
  1949. int direct, int mvtype)
  1950. {
  1951. MpegEncContext *s = &v->s;
  1952. int xy, wrap, off = 0;
  1953. int16_t *A, *B, *C;
  1954. int px, py;
  1955. int sum;
  1956. int r_x, r_y;
  1957. const uint8_t *is_intra = v->mb_type[0];
  1958. av_assert0(!v->field_mode);
  1959. r_x = v->range_x;
  1960. r_y = v->range_y;
  1961. /* scale MV difference to be quad-pel */
  1962. dmv_x[0] <<= 1 - s->quarter_sample;
  1963. dmv_y[0] <<= 1 - s->quarter_sample;
  1964. dmv_x[1] <<= 1 - s->quarter_sample;
  1965. dmv_y[1] <<= 1 - s->quarter_sample;
  1966. wrap = s->b8_stride;
  1967. xy = s->block_index[0];
  1968. if (s->mb_intra) {
  1969. s->current_picture.motion_val[0][xy][0] =
  1970. s->current_picture.motion_val[0][xy][1] =
  1971. s->current_picture.motion_val[1][xy][0] =
  1972. s->current_picture.motion_val[1][xy][1] = 0;
  1973. return;
  1974. }
  1975. if (direct && s->next_picture_ptr->field_picture)
  1976. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  1977. s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
  1978. s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
  1979. s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
  1980. s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
  1981. /* Pullback predicted motion vectors as specified in 8.4.5.4 */
  1982. s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
  1983. s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
  1984. s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
  1985. s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
  1986. if (direct) {
  1987. s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
  1988. s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
  1989. s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
  1990. s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
  1991. return;
  1992. }
  1993. if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
  1994. C = s->current_picture.motion_val[0][xy - 2];
  1995. A = s->current_picture.motion_val[0][xy - wrap * 2];
  1996. off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
  1997. B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
  1998. if (!s->mb_x) C[0] = C[1] = 0;
  1999. if (!s->first_slice_line) { // predictor A is not out of bounds
  2000. if (s->mb_width == 1) {
  2001. px = A[0];
  2002. py = A[1];
  2003. } else {
  2004. px = mid_pred(A[0], B[0], C[0]);
  2005. py = mid_pred(A[1], B[1], C[1]);
  2006. }
  2007. } else if (s->mb_x) { // predictor C is not out of bounds
  2008. px = C[0];
  2009. py = C[1];
  2010. } else {
  2011. px = py = 0;
  2012. }
  2013. /* Pullback MV as specified in 8.3.5.3.4 */
  2014. {
  2015. int qx, qy, X, Y;
  2016. if (v->profile < PROFILE_ADVANCED) {
  2017. qx = (s->mb_x << 5);
  2018. qy = (s->mb_y << 5);
  2019. X = (s->mb_width << 5) - 4;
  2020. Y = (s->mb_height << 5) - 4;
  2021. if (qx + px < -28) px = -28 - qx;
  2022. if (qy + py < -28) py = -28 - qy;
  2023. if (qx + px > X) px = X - qx;
  2024. if (qy + py > Y) py = Y - qy;
  2025. } else {
  2026. qx = (s->mb_x << 6);
  2027. qy = (s->mb_y << 6);
  2028. X = (s->mb_width << 6) - 4;
  2029. Y = (s->mb_height << 6) - 4;
  2030. if (qx + px < -60) px = -60 - qx;
  2031. if (qy + py < -60) py = -60 - qy;
  2032. if (qx + px > X) px = X - qx;
  2033. if (qy + py > Y) py = Y - qy;
  2034. }
  2035. }
  2036. /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
  2037. if (0 && !s->first_slice_line && s->mb_x) {
  2038. if (is_intra[xy - wrap])
  2039. sum = FFABS(px) + FFABS(py);
  2040. else
  2041. sum = FFABS(px - A[0]) + FFABS(py - A[1]);
  2042. if (sum > 32) {
  2043. if (get_bits1(&s->gb)) {
  2044. px = A[0];
  2045. py = A[1];
  2046. } else {
  2047. px = C[0];
  2048. py = C[1];
  2049. }
  2050. } else {
  2051. if (is_intra[xy - 2])
  2052. sum = FFABS(px) + FFABS(py);
  2053. else
  2054. sum = FFABS(px - C[0]) + FFABS(py - C[1]);
  2055. if (sum > 32) {
  2056. if (get_bits1(&s->gb)) {
  2057. px = A[0];
  2058. py = A[1];
  2059. } else {
  2060. px = C[0];
  2061. py = C[1];
  2062. }
  2063. }
  2064. }
  2065. }
  2066. /* store MV using signed modulus of MV range defined in 4.11 */
  2067. s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
  2068. s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
  2069. }
  2070. if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
  2071. C = s->current_picture.motion_val[1][xy - 2];
  2072. A = s->current_picture.motion_val[1][xy - wrap * 2];
  2073. off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
  2074. B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
  2075. if (!s->mb_x)
  2076. C[0] = C[1] = 0;
  2077. if (!s->first_slice_line) { // predictor A is not out of bounds
  2078. if (s->mb_width == 1) {
  2079. px = A[0];
  2080. py = A[1];
  2081. } else {
  2082. px = mid_pred(A[0], B[0], C[0]);
  2083. py = mid_pred(A[1], B[1], C[1]);
  2084. }
  2085. } else if (s->mb_x) { // predictor C is not out of bounds
  2086. px = C[0];
  2087. py = C[1];
  2088. } else {
  2089. px = py = 0;
  2090. }
  2091. /* Pullback MV as specified in 8.3.5.3.4 */
  2092. {
  2093. int qx, qy, X, Y;
  2094. if (v->profile < PROFILE_ADVANCED) {
  2095. qx = (s->mb_x << 5);
  2096. qy = (s->mb_y << 5);
  2097. X = (s->mb_width << 5) - 4;
  2098. Y = (s->mb_height << 5) - 4;
  2099. if (qx + px < -28) px = -28 - qx;
  2100. if (qy + py < -28) py = -28 - qy;
  2101. if (qx + px > X) px = X - qx;
  2102. if (qy + py > Y) py = Y - qy;
  2103. } else {
  2104. qx = (s->mb_x << 6);
  2105. qy = (s->mb_y << 6);
  2106. X = (s->mb_width << 6) - 4;
  2107. Y = (s->mb_height << 6) - 4;
  2108. if (qx + px < -60) px = -60 - qx;
  2109. if (qy + py < -60) py = -60 - qy;
  2110. if (qx + px > X) px = X - qx;
  2111. if (qy + py > Y) py = Y - qy;
  2112. }
  2113. }
  2114. /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
  2115. if (0 && !s->first_slice_line && s->mb_x) {
  2116. if (is_intra[xy - wrap])
  2117. sum = FFABS(px) + FFABS(py);
  2118. else
  2119. sum = FFABS(px - A[0]) + FFABS(py - A[1]);
  2120. if (sum > 32) {
  2121. if (get_bits1(&s->gb)) {
  2122. px = A[0];
  2123. py = A[1];
  2124. } else {
  2125. px = C[0];
  2126. py = C[1];
  2127. }
  2128. } else {
  2129. if (is_intra[xy - 2])
  2130. sum = FFABS(px) + FFABS(py);
  2131. else
  2132. sum = FFABS(px - C[0]) + FFABS(py - C[1]);
  2133. if (sum > 32) {
  2134. if (get_bits1(&s->gb)) {
  2135. px = A[0];
  2136. py = A[1];
  2137. } else {
  2138. px = C[0];
  2139. py = C[1];
  2140. }
  2141. }
  2142. }
  2143. }
  2144. /* store MV using signed modulus of MV range defined in 4.11 */
  2145. s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
  2146. s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
  2147. }
  2148. s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
  2149. s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
  2150. s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
  2151. s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
  2152. }
  2153. static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
  2154. {
  2155. int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
  2156. MpegEncContext *s = &v->s;
  2157. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2158. if (v->bmvtype == BMV_TYPE_DIRECT) {
  2159. int total_opp, k, f;
  2160. if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
  2161. s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
  2162. v->bfraction, 0, s->quarter_sample);
  2163. s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
  2164. v->bfraction, 0, s->quarter_sample);
  2165. s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
  2166. v->bfraction, 1, s->quarter_sample);
  2167. s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
  2168. v->bfraction, 1, s->quarter_sample);
  2169. total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
  2170. + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
  2171. + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
  2172. + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
  2173. f = (total_opp > 2) ? 1 : 0;
  2174. } else {
  2175. s->mv[0][0][0] = s->mv[0][0][1] = 0;
  2176. s->mv[1][0][0] = s->mv[1][0][1] = 0;
  2177. f = 0;
  2178. }
  2179. v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
  2180. for (k = 0; k < 4; k++) {
  2181. s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
  2182. s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
  2183. s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
  2184. s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
  2185. v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
  2186. v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
  2187. }
  2188. return;
  2189. }
  2190. if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
  2191. vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
  2192. vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
  2193. return;
  2194. }
  2195. if (dir) { // backward
  2196. vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
  2197. if (n == 3 || mv1) {
  2198. vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  2199. }
  2200. } else { // forward
  2201. vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
  2202. if (n == 3 || mv1) {
  2203. vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
  2204. }
  2205. }
  2206. }
  2207. /** Get predicted DC value for I-frames only
  2208. * prediction dir: left=0, top=1
  2209. * @param s MpegEncContext
  2210. * @param overlap flag indicating that overlap filtering is used
  2211. * @param pq integer part of picture quantizer
  2212. * @param[in] n block index in the current MB
  2213. * @param dc_val_ptr Pointer to DC predictor
  2214. * @param dir_ptr Prediction direction for use in AC prediction
  2215. */
  2216. static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  2217. int16_t **dc_val_ptr, int *dir_ptr)
  2218. {
  2219. int a, b, c, wrap, pred, scale;
  2220. int16_t *dc_val;
  2221. static const uint16_t dcpred[32] = {
  2222. -1, 1024, 512, 341, 256, 205, 171, 146, 128,
  2223. 114, 102, 93, 85, 79, 73, 68, 64,
  2224. 60, 57, 54, 51, 49, 47, 45, 43,
  2225. 41, 39, 38, 37, 35, 34, 33
  2226. };
  2227. /* find prediction - wmv3_dc_scale always used here in fact */
  2228. if (n < 4) scale = s->y_dc_scale;
  2229. else scale = s->c_dc_scale;
  2230. wrap = s->block_wrap[n];
  2231. dc_val = s->dc_val[0] + s->block_index[n];
  2232. /* B A
  2233. * C X
  2234. */
  2235. c = dc_val[ - 1];
  2236. b = dc_val[ - 1 - wrap];
  2237. a = dc_val[ - wrap];
  2238. if (pq < 9 || !overlap) {
  2239. /* Set outer values */
  2240. if (s->first_slice_line && (n != 2 && n != 3))
  2241. b = a = dcpred[scale];
  2242. if (s->mb_x == 0 && (n != 1 && n != 3))
  2243. b = c = dcpred[scale];
  2244. } else {
  2245. /* Set outer values */
  2246. if (s->first_slice_line && (n != 2 && n != 3))
  2247. b = a = 0;
  2248. if (s->mb_x == 0 && (n != 1 && n != 3))
  2249. b = c = 0;
  2250. }
  2251. if (abs(a - b) <= abs(b - c)) {
  2252. pred = c;
  2253. *dir_ptr = 1; // left
  2254. } else {
  2255. pred = a;
  2256. *dir_ptr = 0; // top
  2257. }
  2258. /* update predictor */
  2259. *dc_val_ptr = &dc_val[0];
  2260. return pred;
  2261. }
  2262. /** Get predicted DC value
  2263. * prediction dir: left=0, top=1
  2264. * @param s MpegEncContext
  2265. * @param overlap flag indicating that overlap filtering is used
  2266. * @param pq integer part of picture quantizer
  2267. * @param[in] n block index in the current MB
  2268. * @param a_avail flag indicating top block availability
  2269. * @param c_avail flag indicating left block availability
  2270. * @param dc_val_ptr Pointer to DC predictor
  2271. * @param dir_ptr Prediction direction for use in AC prediction
  2272. */
  2273. static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  2274. int a_avail, int c_avail,
  2275. int16_t **dc_val_ptr, int *dir_ptr)
  2276. {
  2277. int a, b, c, wrap, pred;
  2278. int16_t *dc_val;
  2279. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2280. int q1, q2 = 0;
  2281. int dqscale_index;
  2282. wrap = s->block_wrap[n];
  2283. dc_val = s->dc_val[0] + s->block_index[n];
  2284. /* B A
  2285. * C X
  2286. */
  2287. c = dc_val[ - 1];
  2288. b = dc_val[ - 1 - wrap];
  2289. a = dc_val[ - wrap];
  2290. /* scale predictors if needed */
  2291. q1 = s->current_picture.qscale_table[mb_pos];
  2292. dqscale_index = s->y_dc_scale_table[q1] - 1;
  2293. if (dqscale_index < 0)
  2294. return 0;
  2295. if (c_avail && (n != 1 && n != 3)) {
  2296. q2 = s->current_picture.qscale_table[mb_pos - 1];
  2297. if (q2 && q2 != q1)
  2298. c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  2299. }
  2300. if (a_avail && (n != 2 && n != 3)) {
  2301. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  2302. if (q2 && q2 != q1)
  2303. a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  2304. }
  2305. if (a_avail && c_avail && (n != 3)) {
  2306. int off = mb_pos;
  2307. if (n != 1)
  2308. off--;
  2309. if (n != 2)
  2310. off -= s->mb_stride;
  2311. q2 = s->current_picture.qscale_table[off];
  2312. if (q2 && q2 != q1)
  2313. b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
  2314. }
  2315. if (a_avail && c_avail) {
  2316. if (abs(a - b) <= abs(b - c)) {
  2317. pred = c;
  2318. *dir_ptr = 1; // left
  2319. } else {
  2320. pred = a;
  2321. *dir_ptr = 0; // top
  2322. }
  2323. } else if (a_avail) {
  2324. pred = a;
  2325. *dir_ptr = 0; // top
  2326. } else if (c_avail) {
  2327. pred = c;
  2328. *dir_ptr = 1; // left
  2329. } else {
  2330. pred = 0;
  2331. *dir_ptr = 1; // left
  2332. }
  2333. /* update predictor */
  2334. *dc_val_ptr = &dc_val[0];
  2335. return pred;
  2336. }
  2337. /** @} */ // Block group
  2338. /**
  2339. * @name VC1 Macroblock-level functions in Simple/Main Profiles
  2340. * @see 7.1.4, p91 and 8.1.1.7, p(1)04
  2341. * @{
  2342. */
  2343. static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
  2344. uint8_t **coded_block_ptr)
  2345. {
  2346. int xy, wrap, pred, a, b, c;
  2347. xy = s->block_index[n];
  2348. wrap = s->b8_stride;
  2349. /* B C
  2350. * A X
  2351. */
  2352. a = s->coded_block[xy - 1 ];
  2353. b = s->coded_block[xy - 1 - wrap];
  2354. c = s->coded_block[xy - wrap];
  2355. if (b == c) {
  2356. pred = a;
  2357. } else {
  2358. pred = c;
  2359. }
  2360. /* store value */
  2361. *coded_block_ptr = &s->coded_block[xy];
  2362. return pred;
  2363. }
  2364. /**
  2365. * Decode one AC coefficient
  2366. * @param v The VC1 context
  2367. * @param last Last coefficient
  2368. * @param skip How much zero coefficients to skip
  2369. * @param value Decoded AC coefficient value
  2370. * @param codingset set of VLC to decode data
  2371. * @see 8.1.3.4
  2372. */
  2373. static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
  2374. int *value, int codingset)
  2375. {
  2376. GetBitContext *gb = &v->s.gb;
  2377. int index, escape, run = 0, level = 0, lst = 0;
  2378. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  2379. if (index != ff_vc1_ac_sizes[codingset] - 1) {
  2380. run = vc1_index_decode_table[codingset][index][0];
  2381. level = vc1_index_decode_table[codingset][index][1];
  2382. lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
  2383. if (get_bits1(gb))
  2384. level = -level;
  2385. } else {
  2386. escape = decode210(gb);
  2387. if (escape != 2) {
  2388. index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
  2389. run = vc1_index_decode_table[codingset][index][0];
  2390. level = vc1_index_decode_table[codingset][index][1];
  2391. lst = index >= vc1_last_decode_table[codingset];
  2392. if (escape == 0) {
  2393. if (lst)
  2394. level += vc1_last_delta_level_table[codingset][run];
  2395. else
  2396. level += vc1_delta_level_table[codingset][run];
  2397. } else {
  2398. if (lst)
  2399. run += vc1_last_delta_run_table[codingset][level] + 1;
  2400. else
  2401. run += vc1_delta_run_table[codingset][level] + 1;
  2402. }
  2403. if (get_bits1(gb))
  2404. level = -level;
  2405. } else {
  2406. int sign;
  2407. lst = get_bits1(gb);
  2408. if (v->s.esc3_level_length == 0) {
  2409. if (v->pq < 8 || v->dquantfrm) { // table 59
  2410. v->s.esc3_level_length = get_bits(gb, 3);
  2411. if (!v->s.esc3_level_length)
  2412. v->s.esc3_level_length = get_bits(gb, 2) + 8;
  2413. } else { // table 60
  2414. v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
  2415. }
  2416. v->s.esc3_run_length = 3 + get_bits(gb, 2);
  2417. }
  2418. run = get_bits(gb, v->s.esc3_run_length);
  2419. sign = get_bits1(gb);
  2420. level = get_bits(gb, v->s.esc3_level_length);
  2421. if (sign)
  2422. level = -level;
  2423. }
  2424. }
  2425. *last = lst;
  2426. *skip = run;
  2427. *value = level;
  2428. }
  2429. /** Decode intra block in intra frames - should be faster than decode_intra_block
  2430. * @param v VC1Context
  2431. * @param block block to decode
  2432. * @param[in] n subblock index
  2433. * @param coded are AC coeffs present or not
  2434. * @param codingset set of VLC to decode data
  2435. */
  2436. static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
  2437. int coded, int codingset)
  2438. {
  2439. GetBitContext *gb = &v->s.gb;
  2440. MpegEncContext *s = &v->s;
  2441. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  2442. int i;
  2443. int16_t *dc_val;
  2444. int16_t *ac_val, *ac_val2;
  2445. int dcdiff;
  2446. /* Get DC differential */
  2447. if (n < 4) {
  2448. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2449. } else {
  2450. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2451. }
  2452. if (dcdiff < 0) {
  2453. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  2454. return -1;
  2455. }
  2456. if (dcdiff) {
  2457. if (dcdiff == 119 /* ESC index value */) {
  2458. /* TODO: Optimize */
  2459. if (v->pq == 1) dcdiff = get_bits(gb, 10);
  2460. else if (v->pq == 2) dcdiff = get_bits(gb, 9);
  2461. else dcdiff = get_bits(gb, 8);
  2462. } else {
  2463. if (v->pq == 1)
  2464. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  2465. else if (v->pq == 2)
  2466. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  2467. }
  2468. if (get_bits1(gb))
  2469. dcdiff = -dcdiff;
  2470. }
  2471. /* Prediction */
  2472. dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
  2473. *dc_val = dcdiff;
  2474. /* Store the quantized DC coeff, used for prediction */
  2475. if (n < 4) {
  2476. block[0] = dcdiff * s->y_dc_scale;
  2477. } else {
  2478. block[0] = dcdiff * s->c_dc_scale;
  2479. }
  2480. /* Skip ? */
  2481. if (!coded) {
  2482. goto not_coded;
  2483. }
  2484. // AC Decoding
  2485. i = 1;
  2486. {
  2487. int last = 0, skip, value;
  2488. const uint8_t *zz_table;
  2489. int scale;
  2490. int k;
  2491. scale = v->pq * 2 + v->halfpq;
  2492. if (v->s.ac_pred) {
  2493. if (!dc_pred_dir)
  2494. zz_table = v->zz_8x8[2];
  2495. else
  2496. zz_table = v->zz_8x8[3];
  2497. } else
  2498. zz_table = v->zz_8x8[1];
  2499. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  2500. ac_val2 = ac_val;
  2501. if (dc_pred_dir) // left
  2502. ac_val -= 16;
  2503. else // top
  2504. ac_val -= 16 * s->block_wrap[n];
  2505. while (!last) {
  2506. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  2507. i += skip;
  2508. if (i > 63)
  2509. break;
  2510. block[zz_table[i++]] = value;
  2511. }
  2512. /* apply AC prediction if needed */
  2513. if (s->ac_pred) {
  2514. if (dc_pred_dir) { // left
  2515. for (k = 1; k < 8; k++)
  2516. block[k << v->left_blk_sh] += ac_val[k];
  2517. } else { // top
  2518. for (k = 1; k < 8; k++)
  2519. block[k << v->top_blk_sh] += ac_val[k + 8];
  2520. }
  2521. }
  2522. /* save AC coeffs for further prediction */
  2523. for (k = 1; k < 8; k++) {
  2524. ac_val2[k] = block[k << v->left_blk_sh];
  2525. ac_val2[k + 8] = block[k << v->top_blk_sh];
  2526. }
  2527. /* scale AC coeffs */
  2528. for (k = 1; k < 64; k++)
  2529. if (block[k]) {
  2530. block[k] *= scale;
  2531. if (!v->pquantizer)
  2532. block[k] += (block[k] < 0) ? -v->pq : v->pq;
  2533. }
  2534. if (s->ac_pred) i = 63;
  2535. }
  2536. not_coded:
  2537. if (!coded) {
  2538. int k, scale;
  2539. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  2540. ac_val2 = ac_val;
  2541. i = 0;
  2542. scale = v->pq * 2 + v->halfpq;
  2543. memset(ac_val2, 0, 16 * 2);
  2544. if (dc_pred_dir) { // left
  2545. ac_val -= 16;
  2546. if (s->ac_pred)
  2547. memcpy(ac_val2, ac_val, 8 * 2);
  2548. } else { // top
  2549. ac_val -= 16 * s->block_wrap[n];
  2550. if (s->ac_pred)
  2551. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  2552. }
  2553. /* apply AC prediction if needed */
  2554. if (s->ac_pred) {
  2555. if (dc_pred_dir) { //left
  2556. for (k = 1; k < 8; k++) {
  2557. block[k << v->left_blk_sh] = ac_val[k] * scale;
  2558. if (!v->pquantizer && block[k << v->left_blk_sh])
  2559. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
  2560. }
  2561. } else { // top
  2562. for (k = 1; k < 8; k++) {
  2563. block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
  2564. if (!v->pquantizer && block[k << v->top_blk_sh])
  2565. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
  2566. }
  2567. }
  2568. i = 63;
  2569. }
  2570. }
  2571. s->block_last_index[n] = i;
  2572. return 0;
  2573. }
  2574. /** Decode intra block in intra frames - should be faster than decode_intra_block
  2575. * @param v VC1Context
  2576. * @param block block to decode
  2577. * @param[in] n subblock number
  2578. * @param coded are AC coeffs present or not
  2579. * @param codingset set of VLC to decode data
  2580. * @param mquant quantizer value for this macroblock
  2581. */
  2582. static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
  2583. int coded, int codingset, int mquant)
  2584. {
  2585. GetBitContext *gb = &v->s.gb;
  2586. MpegEncContext *s = &v->s;
  2587. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  2588. int i;
  2589. int16_t *dc_val = NULL;
  2590. int16_t *ac_val, *ac_val2;
  2591. int dcdiff;
  2592. int a_avail = v->a_avail, c_avail = v->c_avail;
  2593. int use_pred = s->ac_pred;
  2594. int scale;
  2595. int q1, q2 = 0;
  2596. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2597. /* Get DC differential */
  2598. if (n < 4) {
  2599. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2600. } else {
  2601. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2602. }
  2603. if (dcdiff < 0) {
  2604. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  2605. return -1;
  2606. }
  2607. if (dcdiff) {
  2608. if (dcdiff == 119 /* ESC index value */) {
  2609. /* TODO: Optimize */
  2610. if (mquant == 1) dcdiff = get_bits(gb, 10);
  2611. else if (mquant == 2) dcdiff = get_bits(gb, 9);
  2612. else dcdiff = get_bits(gb, 8);
  2613. } else {
  2614. if (mquant == 1)
  2615. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  2616. else if (mquant == 2)
  2617. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  2618. }
  2619. if (get_bits1(gb))
  2620. dcdiff = -dcdiff;
  2621. }
  2622. /* Prediction */
  2623. dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
  2624. *dc_val = dcdiff;
  2625. /* Store the quantized DC coeff, used for prediction */
  2626. if (n < 4) {
  2627. block[0] = dcdiff * s->y_dc_scale;
  2628. } else {
  2629. block[0] = dcdiff * s->c_dc_scale;
  2630. }
  2631. //AC Decoding
  2632. i = 1;
  2633. /* check if AC is needed at all */
  2634. if (!a_avail && !c_avail)
  2635. use_pred = 0;
  2636. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  2637. ac_val2 = ac_val;
  2638. scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
  2639. if (dc_pred_dir) // left
  2640. ac_val -= 16;
  2641. else // top
  2642. ac_val -= 16 * s->block_wrap[n];
  2643. q1 = s->current_picture.qscale_table[mb_pos];
  2644. if ( dc_pred_dir && c_avail && mb_pos)
  2645. q2 = s->current_picture.qscale_table[mb_pos - 1];
  2646. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  2647. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  2648. if ( dc_pred_dir && n == 1)
  2649. q2 = q1;
  2650. if (!dc_pred_dir && n == 2)
  2651. q2 = q1;
  2652. if (n == 3)
  2653. q2 = q1;
  2654. if (coded) {
  2655. int last = 0, skip, value;
  2656. const uint8_t *zz_table;
  2657. int k;
  2658. if (v->s.ac_pred) {
  2659. if (!use_pred && v->fcm == ILACE_FRAME) {
  2660. zz_table = v->zzi_8x8;
  2661. } else {
  2662. if (!dc_pred_dir) // top
  2663. zz_table = v->zz_8x8[2];
  2664. else // left
  2665. zz_table = v->zz_8x8[3];
  2666. }
  2667. } else {
  2668. if (v->fcm != ILACE_FRAME)
  2669. zz_table = v->zz_8x8[1];
  2670. else
  2671. zz_table = v->zzi_8x8;
  2672. }
  2673. while (!last) {
  2674. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  2675. i += skip;
  2676. if (i > 63)
  2677. break;
  2678. block[zz_table[i++]] = value;
  2679. }
  2680. /* apply AC prediction if needed */
  2681. if (use_pred) {
  2682. /* scale predictors if needed*/
  2683. if (q2 && q1 != q2) {
  2684. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2685. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2686. if (q1 < 1)
  2687. return AVERROR_INVALIDDATA;
  2688. if (dc_pred_dir) { // left
  2689. for (k = 1; k < 8; k++)
  2690. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2691. } else { // top
  2692. for (k = 1; k < 8; k++)
  2693. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2694. }
  2695. } else {
  2696. if (dc_pred_dir) { //left
  2697. for (k = 1; k < 8; k++)
  2698. block[k << v->left_blk_sh] += ac_val[k];
  2699. } else { //top
  2700. for (k = 1; k < 8; k++)
  2701. block[k << v->top_blk_sh] += ac_val[k + 8];
  2702. }
  2703. }
  2704. }
  2705. /* save AC coeffs for further prediction */
  2706. for (k = 1; k < 8; k++) {
  2707. ac_val2[k ] = block[k << v->left_blk_sh];
  2708. ac_val2[k + 8] = block[k << v->top_blk_sh];
  2709. }
  2710. /* scale AC coeffs */
  2711. for (k = 1; k < 64; k++)
  2712. if (block[k]) {
  2713. block[k] *= scale;
  2714. if (!v->pquantizer)
  2715. block[k] += (block[k] < 0) ? -mquant : mquant;
  2716. }
  2717. if (use_pred) i = 63;
  2718. } else { // no AC coeffs
  2719. int k;
  2720. memset(ac_val2, 0, 16 * 2);
  2721. if (dc_pred_dir) { // left
  2722. if (use_pred) {
  2723. memcpy(ac_val2, ac_val, 8 * 2);
  2724. if (q2 && q1 != q2) {
  2725. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2726. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2727. if (q1 < 1)
  2728. return AVERROR_INVALIDDATA;
  2729. for (k = 1; k < 8; k++)
  2730. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2731. }
  2732. }
  2733. } else { // top
  2734. if (use_pred) {
  2735. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  2736. if (q2 && q1 != q2) {
  2737. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2738. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2739. if (q1 < 1)
  2740. return AVERROR_INVALIDDATA;
  2741. for (k = 1; k < 8; k++)
  2742. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2743. }
  2744. }
  2745. }
  2746. /* apply AC prediction if needed */
  2747. if (use_pred) {
  2748. if (dc_pred_dir) { // left
  2749. for (k = 1; k < 8; k++) {
  2750. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  2751. if (!v->pquantizer && block[k << v->left_blk_sh])
  2752. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  2753. }
  2754. } else { // top
  2755. for (k = 1; k < 8; k++) {
  2756. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  2757. if (!v->pquantizer && block[k << v->top_blk_sh])
  2758. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  2759. }
  2760. }
  2761. i = 63;
  2762. }
  2763. }
  2764. s->block_last_index[n] = i;
  2765. return 0;
  2766. }
  2767. /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
  2768. * @param v VC1Context
  2769. * @param block block to decode
  2770. * @param[in] n subblock index
  2771. * @param coded are AC coeffs present or not
  2772. * @param mquant block quantizer
  2773. * @param codingset set of VLC to decode data
  2774. */
  2775. static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
  2776. int coded, int mquant, int codingset)
  2777. {
  2778. GetBitContext *gb = &v->s.gb;
  2779. MpegEncContext *s = &v->s;
  2780. int dc_pred_dir = 0; /* Direction of the DC prediction used */
  2781. int i;
  2782. int16_t *dc_val = NULL;
  2783. int16_t *ac_val, *ac_val2;
  2784. int dcdiff;
  2785. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  2786. int a_avail = v->a_avail, c_avail = v->c_avail;
  2787. int use_pred = s->ac_pred;
  2788. int scale;
  2789. int q1, q2 = 0;
  2790. s->dsp.clear_block(block);
  2791. /* XXX: Guard against dumb values of mquant */
  2792. mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
  2793. /* Set DC scale - y and c use the same */
  2794. s->y_dc_scale = s->y_dc_scale_table[mquant];
  2795. s->c_dc_scale = s->c_dc_scale_table[mquant];
  2796. /* Get DC differential */
  2797. if (n < 4) {
  2798. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2799. } else {
  2800. dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
  2801. }
  2802. if (dcdiff < 0) {
  2803. av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
  2804. return -1;
  2805. }
  2806. if (dcdiff) {
  2807. if (dcdiff == 119 /* ESC index value */) {
  2808. /* TODO: Optimize */
  2809. if (mquant == 1) dcdiff = get_bits(gb, 10);
  2810. else if (mquant == 2) dcdiff = get_bits(gb, 9);
  2811. else dcdiff = get_bits(gb, 8);
  2812. } else {
  2813. if (mquant == 1)
  2814. dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
  2815. else if (mquant == 2)
  2816. dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
  2817. }
  2818. if (get_bits1(gb))
  2819. dcdiff = -dcdiff;
  2820. }
  2821. /* Prediction */
  2822. dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
  2823. *dc_val = dcdiff;
  2824. /* Store the quantized DC coeff, used for prediction */
  2825. if (n < 4) {
  2826. block[0] = dcdiff * s->y_dc_scale;
  2827. } else {
  2828. block[0] = dcdiff * s->c_dc_scale;
  2829. }
  2830. //AC Decoding
  2831. i = 1;
  2832. /* check if AC is needed at all and adjust direction if needed */
  2833. if (!a_avail) dc_pred_dir = 1;
  2834. if (!c_avail) dc_pred_dir = 0;
  2835. if (!a_avail && !c_avail) use_pred = 0;
  2836. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  2837. ac_val2 = ac_val;
  2838. scale = mquant * 2 + v->halfpq;
  2839. if (dc_pred_dir) //left
  2840. ac_val -= 16;
  2841. else //top
  2842. ac_val -= 16 * s->block_wrap[n];
  2843. q1 = s->current_picture.qscale_table[mb_pos];
  2844. if (dc_pred_dir && c_avail && mb_pos)
  2845. q2 = s->current_picture.qscale_table[mb_pos - 1];
  2846. if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
  2847. q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
  2848. if ( dc_pred_dir && n == 1)
  2849. q2 = q1;
  2850. if (!dc_pred_dir && n == 2)
  2851. q2 = q1;
  2852. if (n == 3) q2 = q1;
  2853. if (coded) {
  2854. int last = 0, skip, value;
  2855. int k;
  2856. while (!last) {
  2857. vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
  2858. i += skip;
  2859. if (i > 63)
  2860. break;
  2861. if (v->fcm == PROGRESSIVE)
  2862. block[v->zz_8x8[0][i++]] = value;
  2863. else {
  2864. if (use_pred && (v->fcm == ILACE_FRAME)) {
  2865. if (!dc_pred_dir) // top
  2866. block[v->zz_8x8[2][i++]] = value;
  2867. else // left
  2868. block[v->zz_8x8[3][i++]] = value;
  2869. } else {
  2870. block[v->zzi_8x8[i++]] = value;
  2871. }
  2872. }
  2873. }
  2874. /* apply AC prediction if needed */
  2875. if (use_pred) {
  2876. /* scale predictors if needed*/
  2877. if (q2 && q1 != q2) {
  2878. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2879. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2880. if (q1 < 1)
  2881. return AVERROR_INVALIDDATA;
  2882. if (dc_pred_dir) { // left
  2883. for (k = 1; k < 8; k++)
  2884. block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2885. } else { //top
  2886. for (k = 1; k < 8; k++)
  2887. block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2888. }
  2889. } else {
  2890. if (dc_pred_dir) { // left
  2891. for (k = 1; k < 8; k++)
  2892. block[k << v->left_blk_sh] += ac_val[k];
  2893. } else { // top
  2894. for (k = 1; k < 8; k++)
  2895. block[k << v->top_blk_sh] += ac_val[k + 8];
  2896. }
  2897. }
  2898. }
  2899. /* save AC coeffs for further prediction */
  2900. for (k = 1; k < 8; k++) {
  2901. ac_val2[k ] = block[k << v->left_blk_sh];
  2902. ac_val2[k + 8] = block[k << v->top_blk_sh];
  2903. }
  2904. /* scale AC coeffs */
  2905. for (k = 1; k < 64; k++)
  2906. if (block[k]) {
  2907. block[k] *= scale;
  2908. if (!v->pquantizer)
  2909. block[k] += (block[k] < 0) ? -mquant : mquant;
  2910. }
  2911. if (use_pred) i = 63;
  2912. } else { // no AC coeffs
  2913. int k;
  2914. memset(ac_val2, 0, 16 * 2);
  2915. if (dc_pred_dir) { // left
  2916. if (use_pred) {
  2917. memcpy(ac_val2, ac_val, 8 * 2);
  2918. if (q2 && q1 != q2) {
  2919. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2920. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2921. if (q1 < 1)
  2922. return AVERROR_INVALIDDATA;
  2923. for (k = 1; k < 8; k++)
  2924. ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2925. }
  2926. }
  2927. } else { // top
  2928. if (use_pred) {
  2929. memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
  2930. if (q2 && q1 != q2) {
  2931. q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
  2932. q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
  2933. if (q1 < 1)
  2934. return AVERROR_INVALIDDATA;
  2935. for (k = 1; k < 8; k++)
  2936. ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
  2937. }
  2938. }
  2939. }
  2940. /* apply AC prediction if needed */
  2941. if (use_pred) {
  2942. if (dc_pred_dir) { // left
  2943. for (k = 1; k < 8; k++) {
  2944. block[k << v->left_blk_sh] = ac_val2[k] * scale;
  2945. if (!v->pquantizer && block[k << v->left_blk_sh])
  2946. block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
  2947. }
  2948. } else { // top
  2949. for (k = 1; k < 8; k++) {
  2950. block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
  2951. if (!v->pquantizer && block[k << v->top_blk_sh])
  2952. block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
  2953. }
  2954. }
  2955. i = 63;
  2956. }
  2957. }
  2958. s->block_last_index[n] = i;
  2959. return 0;
  2960. }
  2961. /** Decode P block
  2962. */
  2963. static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
  2964. int mquant, int ttmb, int first_block,
  2965. uint8_t *dst, int linesize, int skip_block,
  2966. int *ttmb_out)
  2967. {
  2968. MpegEncContext *s = &v->s;
  2969. GetBitContext *gb = &s->gb;
  2970. int i, j;
  2971. int subblkpat = 0;
  2972. int scale, off, idx, last, skip, value;
  2973. int ttblk = ttmb & 7;
  2974. int pat = 0;
  2975. s->dsp.clear_block(block);
  2976. if (ttmb == -1) {
  2977. ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
  2978. }
  2979. if (ttblk == TT_4X4) {
  2980. subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
  2981. }
  2982. if ((ttblk != TT_8X8 && ttblk != TT_4X4)
  2983. && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
  2984. || (!v->res_rtm_flag && !first_block))) {
  2985. subblkpat = decode012(gb);
  2986. if (subblkpat)
  2987. subblkpat ^= 3; // swap decoded pattern bits
  2988. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
  2989. ttblk = TT_8X4;
  2990. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
  2991. ttblk = TT_4X8;
  2992. }
  2993. scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
  2994. // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
  2995. if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
  2996. subblkpat = 2 - (ttblk == TT_8X4_TOP);
  2997. ttblk = TT_8X4;
  2998. }
  2999. if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
  3000. subblkpat = 2 - (ttblk == TT_4X8_LEFT);
  3001. ttblk = TT_4X8;
  3002. }
  3003. switch (ttblk) {
  3004. case TT_8X8:
  3005. pat = 0xF;
  3006. i = 0;
  3007. last = 0;
  3008. while (!last) {
  3009. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  3010. i += skip;
  3011. if (i > 63)
  3012. break;
  3013. if (!v->fcm)
  3014. idx = v->zz_8x8[0][i++];
  3015. else
  3016. idx = v->zzi_8x8[i++];
  3017. block[idx] = value * scale;
  3018. if (!v->pquantizer)
  3019. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  3020. }
  3021. if (!skip_block) {
  3022. if (i == 1)
  3023. v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
  3024. else {
  3025. v->vc1dsp.vc1_inv_trans_8x8(block);
  3026. s->dsp.add_pixels_clamped(block, dst, linesize);
  3027. }
  3028. }
  3029. break;
  3030. case TT_4X4:
  3031. pat = ~subblkpat & 0xF;
  3032. for (j = 0; j < 4; j++) {
  3033. last = subblkpat & (1 << (3 - j));
  3034. i = 0;
  3035. off = (j & 1) * 4 + (j & 2) * 16;
  3036. while (!last) {
  3037. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  3038. i += skip;
  3039. if (i > 15)
  3040. break;
  3041. if (!v->fcm)
  3042. idx = ff_vc1_simple_progressive_4x4_zz[i++];
  3043. else
  3044. idx = ff_vc1_adv_interlaced_4x4_zz[i++];
  3045. block[idx + off] = value * scale;
  3046. if (!v->pquantizer)
  3047. block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
  3048. }
  3049. if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
  3050. if (i == 1)
  3051. v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  3052. else
  3053. v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
  3054. }
  3055. }
  3056. break;
  3057. case TT_8X4:
  3058. pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
  3059. for (j = 0; j < 2; j++) {
  3060. last = subblkpat & (1 << (1 - j));
  3061. i = 0;
  3062. off = j * 32;
  3063. while (!last) {
  3064. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  3065. i += skip;
  3066. if (i > 31)
  3067. break;
  3068. if (!v->fcm)
  3069. idx = v->zz_8x4[i++] + off;
  3070. else
  3071. idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
  3072. block[idx] = value * scale;
  3073. if (!v->pquantizer)
  3074. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  3075. }
  3076. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  3077. if (i == 1)
  3078. v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
  3079. else
  3080. v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
  3081. }
  3082. }
  3083. break;
  3084. case TT_4X8:
  3085. pat = ~(subblkpat * 5) & 0xF;
  3086. for (j = 0; j < 2; j++) {
  3087. last = subblkpat & (1 << (1 - j));
  3088. i = 0;
  3089. off = j * 4;
  3090. while (!last) {
  3091. vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
  3092. i += skip;
  3093. if (i > 31)
  3094. break;
  3095. if (!v->fcm)
  3096. idx = v->zz_4x8[i++] + off;
  3097. else
  3098. idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
  3099. block[idx] = value * scale;
  3100. if (!v->pquantizer)
  3101. block[idx] += (block[idx] < 0) ? -mquant : mquant;
  3102. }
  3103. if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
  3104. if (i == 1)
  3105. v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
  3106. else
  3107. v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
  3108. }
  3109. }
  3110. break;
  3111. }
  3112. if (ttmb_out)
  3113. *ttmb_out |= ttblk << (n * 4);
  3114. return pat;
  3115. }
  3116. /** @} */ // Macroblock group
  3117. static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
  3118. static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
  3119. static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
  3120. {
  3121. MpegEncContext *s = &v->s;
  3122. int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
  3123. block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
  3124. mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
  3125. block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
  3126. int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
  3127. uint8_t *dst;
  3128. if (block_num > 3) {
  3129. dst = s->dest[block_num - 3];
  3130. } else {
  3131. dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
  3132. }
  3133. if (s->mb_y != s->end_mb_y || block_num < 2) {
  3134. int16_t (*mv)[2];
  3135. int mv_stride;
  3136. if (block_num > 3) {
  3137. bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
  3138. bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
  3139. mv = &v->luma_mv[s->mb_x - s->mb_stride];
  3140. mv_stride = s->mb_stride;
  3141. } else {
  3142. bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
  3143. : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
  3144. bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
  3145. : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
  3146. mv_stride = s->b8_stride;
  3147. mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
  3148. }
  3149. if (bottom_is_intra & 1 || block_is_intra & 1 ||
  3150. mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
  3151. v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
  3152. } else {
  3153. idx = ((bottom_cbp >> 2) | block_cbp) & 3;
  3154. if (idx == 3) {
  3155. v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
  3156. } else if (idx) {
  3157. if (idx == 1)
  3158. v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
  3159. else
  3160. v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
  3161. }
  3162. }
  3163. }
  3164. dst -= 4 * linesize;
  3165. ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
  3166. if (ttblk == TT_4X4 || ttblk == TT_8X4) {
  3167. idx = (block_cbp | (block_cbp >> 2)) & 3;
  3168. if (idx == 3) {
  3169. v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
  3170. } else if (idx) {
  3171. if (idx == 1)
  3172. v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
  3173. else
  3174. v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
  3175. }
  3176. }
  3177. }
  3178. static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
  3179. {
  3180. MpegEncContext *s = &v->s;
  3181. int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
  3182. block_cbp = mb_cbp >> (block_num * 4), right_cbp,
  3183. mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
  3184. block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
  3185. int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
  3186. uint8_t *dst;
  3187. if (block_num > 3) {
  3188. dst = s->dest[block_num - 3] - 8 * linesize;
  3189. } else {
  3190. dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
  3191. }
  3192. if (s->mb_x != s->mb_width || !(block_num & 5)) {
  3193. int16_t (*mv)[2];
  3194. if (block_num > 3) {
  3195. right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
  3196. right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
  3197. mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
  3198. } else {
  3199. right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
  3200. : (mb_cbp >> ((block_num + 1) * 4));
  3201. right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
  3202. : (mb_is_intra >> ((block_num + 1) * 4));
  3203. mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
  3204. }
  3205. if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
  3206. v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
  3207. } else {
  3208. idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
  3209. if (idx == 5) {
  3210. v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
  3211. } else if (idx) {
  3212. if (idx == 1)
  3213. v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
  3214. else
  3215. v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
  3216. }
  3217. }
  3218. }
  3219. dst -= 4;
  3220. ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
  3221. if (ttblk == TT_4X4 || ttblk == TT_4X8) {
  3222. idx = (block_cbp | (block_cbp >> 1)) & 5;
  3223. if (idx == 5) {
  3224. v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
  3225. } else if (idx) {
  3226. if (idx == 1)
  3227. v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
  3228. else
  3229. v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
  3230. }
  3231. }
  3232. }
  3233. static void vc1_apply_p_loop_filter(VC1Context *v)
  3234. {
  3235. MpegEncContext *s = &v->s;
  3236. int i;
  3237. for (i = 0; i < 6; i++) {
  3238. vc1_apply_p_v_loop_filter(v, i);
  3239. }
  3240. /* V always precedes H, therefore we run H one MB before V;
  3241. * at the end of a row, we catch up to complete the row */
  3242. if (s->mb_x) {
  3243. for (i = 0; i < 6; i++) {
  3244. vc1_apply_p_h_loop_filter(v, i);
  3245. }
  3246. if (s->mb_x == s->mb_width - 1) {
  3247. s->mb_x++;
  3248. ff_update_block_index(s);
  3249. for (i = 0; i < 6; i++) {
  3250. vc1_apply_p_h_loop_filter(v, i);
  3251. }
  3252. }
  3253. }
  3254. }
  3255. /** Decode one P-frame MB
  3256. */
  3257. static int vc1_decode_p_mb(VC1Context *v)
  3258. {
  3259. MpegEncContext *s = &v->s;
  3260. GetBitContext *gb = &s->gb;
  3261. int i, j;
  3262. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  3263. int cbp; /* cbp decoding stuff */
  3264. int mqdiff, mquant; /* MB quantization */
  3265. int ttmb = v->ttfrm; /* MB Transform type */
  3266. int mb_has_coeffs = 1; /* last_flag */
  3267. int dmv_x, dmv_y; /* Differential MV components */
  3268. int index, index1; /* LUT indexes */
  3269. int val, sign; /* temp values */
  3270. int first_block = 1;
  3271. int dst_idx, off;
  3272. int skipped, fourmv;
  3273. int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
  3274. mquant = v->pq; /* lossy initialization */
  3275. if (v->mv_type_is_raw)
  3276. fourmv = get_bits1(gb);
  3277. else
  3278. fourmv = v->mv_type_mb_plane[mb_pos];
  3279. if (v->skip_is_raw)
  3280. skipped = get_bits1(gb);
  3281. else
  3282. skipped = v->s.mbskip_table[mb_pos];
  3283. if (!fourmv) { /* 1MV mode */
  3284. if (!skipped) {
  3285. GET_MVDATA(dmv_x, dmv_y);
  3286. if (s->mb_intra) {
  3287. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  3288. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  3289. }
  3290. s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
  3291. vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  3292. /* FIXME Set DC val for inter block ? */
  3293. if (s->mb_intra && !mb_has_coeffs) {
  3294. GET_MQUANT();
  3295. s->ac_pred = get_bits1(gb);
  3296. cbp = 0;
  3297. } else if (mb_has_coeffs) {
  3298. if (s->mb_intra)
  3299. s->ac_pred = get_bits1(gb);
  3300. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3301. GET_MQUANT();
  3302. } else {
  3303. mquant = v->pq;
  3304. cbp = 0;
  3305. }
  3306. s->current_picture.qscale_table[mb_pos] = mquant;
  3307. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  3308. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
  3309. VC1_TTMB_VLC_BITS, 2);
  3310. if (!s->mb_intra) vc1_mc_1mv(v, 0);
  3311. dst_idx = 0;
  3312. for (i = 0; i < 6; i++) {
  3313. s->dc_val[0][s->block_index[i]] = 0;
  3314. dst_idx += i >> 2;
  3315. val = ((cbp >> (5 - i)) & 1);
  3316. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  3317. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  3318. if (s->mb_intra) {
  3319. /* check if prediction blocks A and C are available */
  3320. v->a_avail = v->c_avail = 0;
  3321. if (i == 2 || i == 3 || !s->first_slice_line)
  3322. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3323. if (i == 1 || i == 3 || s->mb_x)
  3324. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3325. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  3326. (i & 4) ? v->codingset2 : v->codingset);
  3327. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  3328. continue;
  3329. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3330. if (v->rangeredfrm)
  3331. for (j = 0; j < 64; j++)
  3332. s->block[i][j] <<= 1;
  3333. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3334. if (v->pq >= 9 && v->overlap) {
  3335. if (v->c_avail)
  3336. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3337. if (v->a_avail)
  3338. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3339. }
  3340. block_cbp |= 0xF << (i << 2);
  3341. block_intra |= 1 << i;
  3342. } else if (val) {
  3343. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
  3344. s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
  3345. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  3346. block_cbp |= pat << (i << 2);
  3347. if (!v->ttmbf && ttmb < 8)
  3348. ttmb = -1;
  3349. first_block = 0;
  3350. }
  3351. }
  3352. } else { // skipped
  3353. s->mb_intra = 0;
  3354. for (i = 0; i < 6; i++) {
  3355. v->mb_type[0][s->block_index[i]] = 0;
  3356. s->dc_val[0][s->block_index[i]] = 0;
  3357. }
  3358. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  3359. s->current_picture.qscale_table[mb_pos] = 0;
  3360. vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  3361. vc1_mc_1mv(v, 0);
  3362. }
  3363. } else { // 4MV mode
  3364. if (!skipped /* unskipped MB */) {
  3365. int intra_count = 0, coded_inter = 0;
  3366. int is_intra[6], is_coded[6];
  3367. /* Get CBPCY */
  3368. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3369. for (i = 0; i < 6; i++) {
  3370. val = ((cbp >> (5 - i)) & 1);
  3371. s->dc_val[0][s->block_index[i]] = 0;
  3372. s->mb_intra = 0;
  3373. if (i < 4) {
  3374. dmv_x = dmv_y = 0;
  3375. s->mb_intra = 0;
  3376. mb_has_coeffs = 0;
  3377. if (val) {
  3378. GET_MVDATA(dmv_x, dmv_y);
  3379. }
  3380. vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  3381. if (!s->mb_intra)
  3382. vc1_mc_4mv_luma(v, i, 0, 0);
  3383. intra_count += s->mb_intra;
  3384. is_intra[i] = s->mb_intra;
  3385. is_coded[i] = mb_has_coeffs;
  3386. }
  3387. if (i & 4) {
  3388. is_intra[i] = (intra_count >= 3);
  3389. is_coded[i] = val;
  3390. }
  3391. if (i == 4)
  3392. vc1_mc_4mv_chroma(v, 0);
  3393. v->mb_type[0][s->block_index[i]] = is_intra[i];
  3394. if (!coded_inter)
  3395. coded_inter = !is_intra[i] & is_coded[i];
  3396. }
  3397. // if there are no coded blocks then don't do anything more
  3398. dst_idx = 0;
  3399. if (!intra_count && !coded_inter)
  3400. goto end;
  3401. GET_MQUANT();
  3402. s->current_picture.qscale_table[mb_pos] = mquant;
  3403. /* test if block is intra and has pred */
  3404. {
  3405. int intrapred = 0;
  3406. for (i = 0; i < 6; i++)
  3407. if (is_intra[i]) {
  3408. if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
  3409. || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
  3410. intrapred = 1;
  3411. break;
  3412. }
  3413. }
  3414. if (intrapred)
  3415. s->ac_pred = get_bits1(gb);
  3416. else
  3417. s->ac_pred = 0;
  3418. }
  3419. if (!v->ttmbf && coded_inter)
  3420. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  3421. for (i = 0; i < 6; i++) {
  3422. dst_idx += i >> 2;
  3423. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  3424. s->mb_intra = is_intra[i];
  3425. if (is_intra[i]) {
  3426. /* check if prediction blocks A and C are available */
  3427. v->a_avail = v->c_avail = 0;
  3428. if (i == 2 || i == 3 || !s->first_slice_line)
  3429. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3430. if (i == 1 || i == 3 || s->mb_x)
  3431. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3432. vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
  3433. (i & 4) ? v->codingset2 : v->codingset);
  3434. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  3435. continue;
  3436. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3437. if (v->rangeredfrm)
  3438. for (j = 0; j < 64; j++)
  3439. s->block[i][j] <<= 1;
  3440. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
  3441. (i & 4) ? s->uvlinesize : s->linesize);
  3442. if (v->pq >= 9 && v->overlap) {
  3443. if (v->c_avail)
  3444. v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3445. if (v->a_avail)
  3446. v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3447. }
  3448. block_cbp |= 0xF << (i << 2);
  3449. block_intra |= 1 << i;
  3450. } else if (is_coded[i]) {
  3451. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  3452. first_block, s->dest[dst_idx] + off,
  3453. (i & 4) ? s->uvlinesize : s->linesize,
  3454. (i & 4) && (s->flags & CODEC_FLAG_GRAY),
  3455. &block_tt);
  3456. block_cbp |= pat << (i << 2);
  3457. if (!v->ttmbf && ttmb < 8)
  3458. ttmb = -1;
  3459. first_block = 0;
  3460. }
  3461. }
  3462. } else { // skipped MB
  3463. s->mb_intra = 0;
  3464. s->current_picture.qscale_table[mb_pos] = 0;
  3465. for (i = 0; i < 6; i++) {
  3466. v->mb_type[0][s->block_index[i]] = 0;
  3467. s->dc_val[0][s->block_index[i]] = 0;
  3468. }
  3469. for (i = 0; i < 4; i++) {
  3470. vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
  3471. vc1_mc_4mv_luma(v, i, 0, 0);
  3472. }
  3473. vc1_mc_4mv_chroma(v, 0);
  3474. s->current_picture.qscale_table[mb_pos] = 0;
  3475. }
  3476. }
  3477. end:
  3478. v->cbp[s->mb_x] = block_cbp;
  3479. v->ttblk[s->mb_x] = block_tt;
  3480. v->is_intra[s->mb_x] = block_intra;
  3481. return 0;
  3482. }
  3483. /* Decode one macroblock in an interlaced frame p picture */
  3484. static int vc1_decode_p_mb_intfr(VC1Context *v)
  3485. {
  3486. MpegEncContext *s = &v->s;
  3487. GetBitContext *gb = &s->gb;
  3488. int i;
  3489. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  3490. int cbp = 0; /* cbp decoding stuff */
  3491. int mqdiff, mquant; /* MB quantization */
  3492. int ttmb = v->ttfrm; /* MB Transform type */
  3493. int mb_has_coeffs = 1; /* last_flag */
  3494. int dmv_x, dmv_y; /* Differential MV components */
  3495. int val; /* temp value */
  3496. int first_block = 1;
  3497. int dst_idx, off;
  3498. int skipped, fourmv = 0, twomv = 0;
  3499. int block_cbp = 0, pat, block_tt = 0;
  3500. int idx_mbmode = 0, mvbp;
  3501. int stride_y, fieldtx;
  3502. mquant = v->pq; /* Lossy initialization */
  3503. if (v->skip_is_raw)
  3504. skipped = get_bits1(gb);
  3505. else
  3506. skipped = v->s.mbskip_table[mb_pos];
  3507. if (!skipped) {
  3508. if (v->fourmvswitch)
  3509. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
  3510. else
  3511. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
  3512. switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
  3513. /* store the motion vector type in a flag (useful later) */
  3514. case MV_PMODE_INTFR_4MV:
  3515. fourmv = 1;
  3516. v->blk_mv_type[s->block_index[0]] = 0;
  3517. v->blk_mv_type[s->block_index[1]] = 0;
  3518. v->blk_mv_type[s->block_index[2]] = 0;
  3519. v->blk_mv_type[s->block_index[3]] = 0;
  3520. break;
  3521. case MV_PMODE_INTFR_4MV_FIELD:
  3522. fourmv = 1;
  3523. v->blk_mv_type[s->block_index[0]] = 1;
  3524. v->blk_mv_type[s->block_index[1]] = 1;
  3525. v->blk_mv_type[s->block_index[2]] = 1;
  3526. v->blk_mv_type[s->block_index[3]] = 1;
  3527. break;
  3528. case MV_PMODE_INTFR_2MV_FIELD:
  3529. twomv = 1;
  3530. v->blk_mv_type[s->block_index[0]] = 1;
  3531. v->blk_mv_type[s->block_index[1]] = 1;
  3532. v->blk_mv_type[s->block_index[2]] = 1;
  3533. v->blk_mv_type[s->block_index[3]] = 1;
  3534. break;
  3535. case MV_PMODE_INTFR_1MV:
  3536. v->blk_mv_type[s->block_index[0]] = 0;
  3537. v->blk_mv_type[s->block_index[1]] = 0;
  3538. v->blk_mv_type[s->block_index[2]] = 0;
  3539. v->blk_mv_type[s->block_index[3]] = 0;
  3540. break;
  3541. }
  3542. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  3543. for (i = 0; i < 4; i++) {
  3544. s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  3545. s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  3546. }
  3547. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  3548. s->mb_intra = v->is_intra[s->mb_x] = 1;
  3549. for (i = 0; i < 6; i++)
  3550. v->mb_type[0][s->block_index[i]] = 1;
  3551. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  3552. mb_has_coeffs = get_bits1(gb);
  3553. if (mb_has_coeffs)
  3554. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3555. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  3556. GET_MQUANT();
  3557. s->current_picture.qscale_table[mb_pos] = mquant;
  3558. /* Set DC scale - y and c use the same (not sure if necessary here) */
  3559. s->y_dc_scale = s->y_dc_scale_table[mquant];
  3560. s->c_dc_scale = s->c_dc_scale_table[mquant];
  3561. dst_idx = 0;
  3562. for (i = 0; i < 6; i++) {
  3563. s->dc_val[0][s->block_index[i]] = 0;
  3564. dst_idx += i >> 2;
  3565. val = ((cbp >> (5 - i)) & 1);
  3566. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  3567. v->a_avail = v->c_avail = 0;
  3568. if (i == 2 || i == 3 || !s->first_slice_line)
  3569. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3570. if (i == 1 || i == 3 || s->mb_x)
  3571. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3572. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  3573. (i & 4) ? v->codingset2 : v->codingset);
  3574. if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
  3575. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3576. if (i < 4) {
  3577. stride_y = s->linesize << fieldtx;
  3578. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  3579. } else {
  3580. stride_y = s->uvlinesize;
  3581. off = 0;
  3582. }
  3583. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
  3584. //TODO: loop filter
  3585. }
  3586. } else { // inter MB
  3587. mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
  3588. if (mb_has_coeffs)
  3589. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3590. if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  3591. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  3592. } else {
  3593. if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
  3594. || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
  3595. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  3596. }
  3597. }
  3598. s->mb_intra = v->is_intra[s->mb_x] = 0;
  3599. for (i = 0; i < 6; i++)
  3600. v->mb_type[0][s->block_index[i]] = 0;
  3601. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
  3602. /* for all motion vector read MVDATA and motion compensate each block */
  3603. dst_idx = 0;
  3604. if (fourmv) {
  3605. mvbp = v->fourmvbp;
  3606. for (i = 0; i < 6; i++) {
  3607. if (i < 4) {
  3608. dmv_x = dmv_y = 0;
  3609. val = ((mvbp >> (3 - i)) & 1);
  3610. if (val) {
  3611. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  3612. }
  3613. vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
  3614. vc1_mc_4mv_luma(v, i, 0, 0);
  3615. } else if (i == 4) {
  3616. vc1_mc_4mv_chroma4(v, 0, 0, 0);
  3617. }
  3618. }
  3619. } else if (twomv) {
  3620. mvbp = v->twomvbp;
  3621. dmv_x = dmv_y = 0;
  3622. if (mvbp & 2) {
  3623. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  3624. }
  3625. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  3626. vc1_mc_4mv_luma(v, 0, 0, 0);
  3627. vc1_mc_4mv_luma(v, 1, 0, 0);
  3628. dmv_x = dmv_y = 0;
  3629. if (mvbp & 1) {
  3630. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  3631. }
  3632. vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
  3633. vc1_mc_4mv_luma(v, 2, 0, 0);
  3634. vc1_mc_4mv_luma(v, 3, 0, 0);
  3635. vc1_mc_4mv_chroma4(v, 0, 0, 0);
  3636. } else {
  3637. mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
  3638. dmv_x = dmv_y = 0;
  3639. if (mvbp) {
  3640. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  3641. }
  3642. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  3643. vc1_mc_1mv(v, 0);
  3644. }
  3645. if (cbp)
  3646. GET_MQUANT(); // p. 227
  3647. s->current_picture.qscale_table[mb_pos] = mquant;
  3648. if (!v->ttmbf && cbp)
  3649. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  3650. for (i = 0; i < 6; i++) {
  3651. s->dc_val[0][s->block_index[i]] = 0;
  3652. dst_idx += i >> 2;
  3653. val = ((cbp >> (5 - i)) & 1);
  3654. if (!fieldtx)
  3655. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  3656. else
  3657. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  3658. if (val) {
  3659. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  3660. first_block, s->dest[dst_idx] + off,
  3661. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  3662. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  3663. block_cbp |= pat << (i << 2);
  3664. if (!v->ttmbf && ttmb < 8)
  3665. ttmb = -1;
  3666. first_block = 0;
  3667. }
  3668. }
  3669. }
  3670. } else { // skipped
  3671. s->mb_intra = v->is_intra[s->mb_x] = 0;
  3672. for (i = 0; i < 6; i++) {
  3673. v->mb_type[0][s->block_index[i]] = 0;
  3674. s->dc_val[0][s->block_index[i]] = 0;
  3675. }
  3676. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  3677. s->current_picture.qscale_table[mb_pos] = 0;
  3678. v->blk_mv_type[s->block_index[0]] = 0;
  3679. v->blk_mv_type[s->block_index[1]] = 0;
  3680. v->blk_mv_type[s->block_index[2]] = 0;
  3681. v->blk_mv_type[s->block_index[3]] = 0;
  3682. vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  3683. vc1_mc_1mv(v, 0);
  3684. }
  3685. if (s->mb_x == s->mb_width - 1)
  3686. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
  3687. return 0;
  3688. }
  3689. static int vc1_decode_p_mb_intfi(VC1Context *v)
  3690. {
  3691. MpegEncContext *s = &v->s;
  3692. GetBitContext *gb = &s->gb;
  3693. int i;
  3694. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  3695. int cbp = 0; /* cbp decoding stuff */
  3696. int mqdiff, mquant; /* MB quantization */
  3697. int ttmb = v->ttfrm; /* MB Transform type */
  3698. int mb_has_coeffs = 1; /* last_flag */
  3699. int dmv_x, dmv_y; /* Differential MV components */
  3700. int val; /* temp values */
  3701. int first_block = 1;
  3702. int dst_idx, off;
  3703. int pred_flag = 0;
  3704. int block_cbp = 0, pat, block_tt = 0;
  3705. int idx_mbmode = 0;
  3706. mquant = v->pq; /* Lossy initialization */
  3707. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  3708. if (idx_mbmode <= 1) { // intra MB
  3709. s->mb_intra = v->is_intra[s->mb_x] = 1;
  3710. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  3711. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  3712. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  3713. GET_MQUANT();
  3714. s->current_picture.qscale_table[mb_pos] = mquant;
  3715. /* Set DC scale - y and c use the same (not sure if necessary here) */
  3716. s->y_dc_scale = s->y_dc_scale_table[mquant];
  3717. s->c_dc_scale = s->c_dc_scale_table[mquant];
  3718. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  3719. mb_has_coeffs = idx_mbmode & 1;
  3720. if (mb_has_coeffs)
  3721. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  3722. dst_idx = 0;
  3723. for (i = 0; i < 6; i++) {
  3724. s->dc_val[0][s->block_index[i]] = 0;
  3725. v->mb_type[0][s->block_index[i]] = 1;
  3726. dst_idx += i >> 2;
  3727. val = ((cbp >> (5 - i)) & 1);
  3728. v->a_avail = v->c_avail = 0;
  3729. if (i == 2 || i == 3 || !s->first_slice_line)
  3730. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3731. if (i == 1 || i == 3 || s->mb_x)
  3732. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3733. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  3734. (i & 4) ? v->codingset2 : v->codingset);
  3735. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  3736. continue;
  3737. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3738. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  3739. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
  3740. // TODO: loop filter
  3741. }
  3742. } else {
  3743. s->mb_intra = v->is_intra[s->mb_x] = 0;
  3744. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  3745. for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
  3746. if (idx_mbmode <= 5) { // 1-MV
  3747. dmv_x = dmv_y = pred_flag = 0;
  3748. if (idx_mbmode & 1) {
  3749. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  3750. }
  3751. vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  3752. vc1_mc_1mv(v, 0);
  3753. mb_has_coeffs = !(idx_mbmode & 2);
  3754. } else { // 4-MV
  3755. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  3756. for (i = 0; i < 6; i++) {
  3757. if (i < 4) {
  3758. dmv_x = dmv_y = pred_flag = 0;
  3759. val = ((v->fourmvbp >> (3 - i)) & 1);
  3760. if (val) {
  3761. get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
  3762. }
  3763. vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
  3764. vc1_mc_4mv_luma(v, i, 0, 0);
  3765. } else if (i == 4)
  3766. vc1_mc_4mv_chroma(v, 0);
  3767. }
  3768. mb_has_coeffs = idx_mbmode & 1;
  3769. }
  3770. if (mb_has_coeffs)
  3771. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3772. if (cbp) {
  3773. GET_MQUANT();
  3774. }
  3775. s->current_picture.qscale_table[mb_pos] = mquant;
  3776. if (!v->ttmbf && cbp) {
  3777. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  3778. }
  3779. dst_idx = 0;
  3780. for (i = 0; i < 6; i++) {
  3781. s->dc_val[0][s->block_index[i]] = 0;
  3782. dst_idx += i >> 2;
  3783. val = ((cbp >> (5 - i)) & 1);
  3784. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  3785. if (val) {
  3786. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  3787. first_block, s->dest[dst_idx] + off,
  3788. (i & 4) ? s->uvlinesize : s->linesize,
  3789. (i & 4) && (s->flags & CODEC_FLAG_GRAY),
  3790. &block_tt);
  3791. block_cbp |= pat << (i << 2);
  3792. if (!v->ttmbf && ttmb < 8) ttmb = -1;
  3793. first_block = 0;
  3794. }
  3795. }
  3796. }
  3797. if (s->mb_x == s->mb_width - 1)
  3798. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  3799. return 0;
  3800. }
  3801. /** Decode one B-frame MB (in Main profile)
  3802. */
  3803. static void vc1_decode_b_mb(VC1Context *v)
  3804. {
  3805. MpegEncContext *s = &v->s;
  3806. GetBitContext *gb = &s->gb;
  3807. int i, j;
  3808. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  3809. int cbp = 0; /* cbp decoding stuff */
  3810. int mqdiff, mquant; /* MB quantization */
  3811. int ttmb = v->ttfrm; /* MB Transform type */
  3812. int mb_has_coeffs = 0; /* last_flag */
  3813. int index, index1; /* LUT indexes */
  3814. int val, sign; /* temp values */
  3815. int first_block = 1;
  3816. int dst_idx, off;
  3817. int skipped, direct;
  3818. int dmv_x[2], dmv_y[2];
  3819. int bmvtype = BMV_TYPE_BACKWARD;
  3820. mquant = v->pq; /* lossy initialization */
  3821. s->mb_intra = 0;
  3822. if (v->dmb_is_raw)
  3823. direct = get_bits1(gb);
  3824. else
  3825. direct = v->direct_mb_plane[mb_pos];
  3826. if (v->skip_is_raw)
  3827. skipped = get_bits1(gb);
  3828. else
  3829. skipped = v->s.mbskip_table[mb_pos];
  3830. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  3831. for (i = 0; i < 6; i++) {
  3832. v->mb_type[0][s->block_index[i]] = 0;
  3833. s->dc_val[0][s->block_index[i]] = 0;
  3834. }
  3835. s->current_picture.qscale_table[mb_pos] = 0;
  3836. if (!direct) {
  3837. if (!skipped) {
  3838. GET_MVDATA(dmv_x[0], dmv_y[0]);
  3839. dmv_x[1] = dmv_x[0];
  3840. dmv_y[1] = dmv_y[0];
  3841. }
  3842. if (skipped || !s->mb_intra) {
  3843. bmvtype = decode012(gb);
  3844. switch (bmvtype) {
  3845. case 0:
  3846. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  3847. break;
  3848. case 1:
  3849. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  3850. break;
  3851. case 2:
  3852. bmvtype = BMV_TYPE_INTERPOLATED;
  3853. dmv_x[0] = dmv_y[0] = 0;
  3854. }
  3855. }
  3856. }
  3857. for (i = 0; i < 6; i++)
  3858. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  3859. if (skipped) {
  3860. if (direct)
  3861. bmvtype = BMV_TYPE_INTERPOLATED;
  3862. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3863. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  3864. return;
  3865. }
  3866. if (direct) {
  3867. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3868. GET_MQUANT();
  3869. s->mb_intra = 0;
  3870. s->current_picture.qscale_table[mb_pos] = mquant;
  3871. if (!v->ttmbf)
  3872. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  3873. dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
  3874. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3875. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  3876. } else {
  3877. if (!mb_has_coeffs && !s->mb_intra) {
  3878. /* no coded blocks - effectively skipped */
  3879. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3880. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  3881. return;
  3882. }
  3883. if (s->mb_intra && !mb_has_coeffs) {
  3884. GET_MQUANT();
  3885. s->current_picture.qscale_table[mb_pos] = mquant;
  3886. s->ac_pred = get_bits1(gb);
  3887. cbp = 0;
  3888. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3889. } else {
  3890. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  3891. GET_MVDATA(dmv_x[0], dmv_y[0]);
  3892. if (!mb_has_coeffs) {
  3893. /* interpolated skipped block */
  3894. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3895. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  3896. return;
  3897. }
  3898. }
  3899. vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
  3900. if (!s->mb_intra) {
  3901. vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
  3902. }
  3903. if (s->mb_intra)
  3904. s->ac_pred = get_bits1(gb);
  3905. cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  3906. GET_MQUANT();
  3907. s->current_picture.qscale_table[mb_pos] = mquant;
  3908. if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
  3909. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  3910. }
  3911. }
  3912. dst_idx = 0;
  3913. for (i = 0; i < 6; i++) {
  3914. s->dc_val[0][s->block_index[i]] = 0;
  3915. dst_idx += i >> 2;
  3916. val = ((cbp >> (5 - i)) & 1);
  3917. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  3918. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  3919. if (s->mb_intra) {
  3920. /* check if prediction blocks A and C are available */
  3921. v->a_avail = v->c_avail = 0;
  3922. if (i == 2 || i == 3 || !s->first_slice_line)
  3923. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3924. if (i == 1 || i == 3 || s->mb_x)
  3925. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3926. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  3927. (i & 4) ? v->codingset2 : v->codingset);
  3928. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  3929. continue;
  3930. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3931. if (v->rangeredfrm)
  3932. for (j = 0; j < 64; j++)
  3933. s->block[i][j] <<= 1;
  3934. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
  3935. } else if (val) {
  3936. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  3937. first_block, s->dest[dst_idx] + off,
  3938. (i & 4) ? s->uvlinesize : s->linesize,
  3939. (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
  3940. if (!v->ttmbf && ttmb < 8)
  3941. ttmb = -1;
  3942. first_block = 0;
  3943. }
  3944. }
  3945. }
  3946. /** Decode one B-frame MB (in interlaced field B picture)
  3947. */
  3948. static void vc1_decode_b_mb_intfi(VC1Context *v)
  3949. {
  3950. MpegEncContext *s = &v->s;
  3951. GetBitContext *gb = &s->gb;
  3952. int i, j;
  3953. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  3954. int cbp = 0; /* cbp decoding stuff */
  3955. int mqdiff, mquant; /* MB quantization */
  3956. int ttmb = v->ttfrm; /* MB Transform type */
  3957. int mb_has_coeffs = 0; /* last_flag */
  3958. int val; /* temp value */
  3959. int first_block = 1;
  3960. int dst_idx, off;
  3961. int fwd;
  3962. int dmv_x[2], dmv_y[2], pred_flag[2];
  3963. int bmvtype = BMV_TYPE_BACKWARD;
  3964. int idx_mbmode;
  3965. mquant = v->pq; /* Lossy initialization */
  3966. s->mb_intra = 0;
  3967. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
  3968. if (idx_mbmode <= 1) { // intra MB
  3969. s->mb_intra = v->is_intra[s->mb_x] = 1;
  3970. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  3971. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  3972. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  3973. GET_MQUANT();
  3974. s->current_picture.qscale_table[mb_pos] = mquant;
  3975. /* Set DC scale - y and c use the same (not sure if necessary here) */
  3976. s->y_dc_scale = s->y_dc_scale_table[mquant];
  3977. s->c_dc_scale = s->c_dc_scale_table[mquant];
  3978. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  3979. mb_has_coeffs = idx_mbmode & 1;
  3980. if (mb_has_coeffs)
  3981. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
  3982. dst_idx = 0;
  3983. for (i = 0; i < 6; i++) {
  3984. s->dc_val[0][s->block_index[i]] = 0;
  3985. dst_idx += i >> 2;
  3986. val = ((cbp >> (5 - i)) & 1);
  3987. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  3988. v->a_avail = v->c_avail = 0;
  3989. if (i == 2 || i == 3 || !s->first_slice_line)
  3990. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  3991. if (i == 1 || i == 3 || s->mb_x)
  3992. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  3993. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  3994. (i & 4) ? v->codingset2 : v->codingset);
  3995. if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
  3996. continue;
  3997. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  3998. if (v->rangeredfrm)
  3999. for (j = 0; j < 64; j++)
  4000. s->block[i][j] <<= 1;
  4001. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  4002. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
  4003. // TODO: yet to perform loop filter
  4004. }
  4005. } else {
  4006. s->mb_intra = v->is_intra[s->mb_x] = 0;
  4007. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
  4008. for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
  4009. if (v->fmb_is_raw)
  4010. fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
  4011. else
  4012. fwd = v->forward_mb_plane[mb_pos];
  4013. if (idx_mbmode <= 5) { // 1-MV
  4014. int interpmvp = 0;
  4015. dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
  4016. pred_flag[0] = pred_flag[1] = 0;
  4017. if (fwd)
  4018. bmvtype = BMV_TYPE_FORWARD;
  4019. else {
  4020. bmvtype = decode012(gb);
  4021. switch (bmvtype) {
  4022. case 0:
  4023. bmvtype = BMV_TYPE_BACKWARD;
  4024. break;
  4025. case 1:
  4026. bmvtype = BMV_TYPE_DIRECT;
  4027. break;
  4028. case 2:
  4029. bmvtype = BMV_TYPE_INTERPOLATED;
  4030. interpmvp = get_bits1(gb);
  4031. }
  4032. }
  4033. v->bmvtype = bmvtype;
  4034. if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
  4035. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  4036. }
  4037. if (interpmvp) {
  4038. get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
  4039. }
  4040. if (bmvtype == BMV_TYPE_DIRECT) {
  4041. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  4042. dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
  4043. if (!s->next_picture_ptr->field_picture) {
  4044. av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
  4045. return;
  4046. }
  4047. }
  4048. vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
  4049. vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
  4050. mb_has_coeffs = !(idx_mbmode & 2);
  4051. } else { // 4-MV
  4052. if (fwd)
  4053. bmvtype = BMV_TYPE_FORWARD;
  4054. v->bmvtype = bmvtype;
  4055. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  4056. for (i = 0; i < 6; i++) {
  4057. if (i < 4) {
  4058. dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
  4059. dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
  4060. val = ((v->fourmvbp >> (3 - i)) & 1);
  4061. if (val) {
  4062. get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
  4063. &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
  4064. &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
  4065. }
  4066. vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
  4067. vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
  4068. } else if (i == 4)
  4069. vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
  4070. }
  4071. mb_has_coeffs = idx_mbmode & 1;
  4072. }
  4073. if (mb_has_coeffs)
  4074. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  4075. if (cbp) {
  4076. GET_MQUANT();
  4077. }
  4078. s->current_picture.qscale_table[mb_pos] = mquant;
  4079. if (!v->ttmbf && cbp) {
  4080. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  4081. }
  4082. dst_idx = 0;
  4083. for (i = 0; i < 6; i++) {
  4084. s->dc_val[0][s->block_index[i]] = 0;
  4085. dst_idx += i >> 2;
  4086. val = ((cbp >> (5 - i)) & 1);
  4087. off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
  4088. if (val) {
  4089. vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  4090. first_block, s->dest[dst_idx] + off,
  4091. (i & 4) ? s->uvlinesize : s->linesize,
  4092. (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
  4093. if (!v->ttmbf && ttmb < 8)
  4094. ttmb = -1;
  4095. first_block = 0;
  4096. }
  4097. }
  4098. }
  4099. }
  4100. /** Decode one B-frame MB (in interlaced frame B picture)
  4101. */
  4102. static int vc1_decode_b_mb_intfr(VC1Context *v)
  4103. {
  4104. MpegEncContext *s = &v->s;
  4105. GetBitContext *gb = &s->gb;
  4106. int i, j;
  4107. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  4108. int cbp = 0; /* cbp decoding stuff */
  4109. int mqdiff, mquant; /* MB quantization */
  4110. int ttmb = v->ttfrm; /* MB Transform type */
  4111. int mvsw = 0; /* motion vector switch */
  4112. int mb_has_coeffs = 1; /* last_flag */
  4113. int dmv_x, dmv_y; /* Differential MV components */
  4114. int val; /* temp value */
  4115. int first_block = 1;
  4116. int dst_idx, off;
  4117. int skipped, direct, twomv = 0;
  4118. int block_cbp = 0, pat, block_tt = 0;
  4119. int idx_mbmode = 0, mvbp;
  4120. int stride_y, fieldtx;
  4121. int bmvtype = BMV_TYPE_BACKWARD;
  4122. int dir, dir2;
  4123. mquant = v->pq; /* Lossy initialization */
  4124. s->mb_intra = 0;
  4125. if (v->skip_is_raw)
  4126. skipped = get_bits1(gb);
  4127. else
  4128. skipped = v->s.mbskip_table[mb_pos];
  4129. if (!skipped) {
  4130. idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
  4131. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
  4132. twomv = 1;
  4133. v->blk_mv_type[s->block_index[0]] = 1;
  4134. v->blk_mv_type[s->block_index[1]] = 1;
  4135. v->blk_mv_type[s->block_index[2]] = 1;
  4136. v->blk_mv_type[s->block_index[3]] = 1;
  4137. } else {
  4138. v->blk_mv_type[s->block_index[0]] = 0;
  4139. v->blk_mv_type[s->block_index[1]] = 0;
  4140. v->blk_mv_type[s->block_index[2]] = 0;
  4141. v->blk_mv_type[s->block_index[3]] = 0;
  4142. }
  4143. }
  4144. if (v->dmb_is_raw)
  4145. direct = get_bits1(gb);
  4146. else
  4147. direct = v->direct_mb_plane[mb_pos];
  4148. if (direct) {
  4149. if (s->next_picture_ptr->field_picture)
  4150. av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
  4151. s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
  4152. s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
  4153. s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
  4154. s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
  4155. if (twomv) {
  4156. s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
  4157. s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
  4158. s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
  4159. s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
  4160. for (i = 1; i < 4; i += 2) {
  4161. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
  4162. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
  4163. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
  4164. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
  4165. }
  4166. } else {
  4167. for (i = 1; i < 4; i++) {
  4168. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
  4169. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
  4170. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
  4171. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
  4172. }
  4173. }
  4174. }
  4175. if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
  4176. for (i = 0; i < 4; i++) {
  4177. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
  4178. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
  4179. s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
  4180. s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
  4181. }
  4182. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  4183. s->mb_intra = v->is_intra[s->mb_x] = 1;
  4184. for (i = 0; i < 6; i++)
  4185. v->mb_type[0][s->block_index[i]] = 1;
  4186. fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
  4187. mb_has_coeffs = get_bits1(gb);
  4188. if (mb_has_coeffs)
  4189. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  4190. v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
  4191. GET_MQUANT();
  4192. s->current_picture.qscale_table[mb_pos] = mquant;
  4193. /* Set DC scale - y and c use the same (not sure if necessary here) */
  4194. s->y_dc_scale = s->y_dc_scale_table[mquant];
  4195. s->c_dc_scale = s->c_dc_scale_table[mquant];
  4196. dst_idx = 0;
  4197. for (i = 0; i < 6; i++) {
  4198. s->dc_val[0][s->block_index[i]] = 0;
  4199. dst_idx += i >> 2;
  4200. val = ((cbp >> (5 - i)) & 1);
  4201. v->mb_type[0][s->block_index[i]] = s->mb_intra;
  4202. v->a_avail = v->c_avail = 0;
  4203. if (i == 2 || i == 3 || !s->first_slice_line)
  4204. v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
  4205. if (i == 1 || i == 3 || s->mb_x)
  4206. v->c_avail = v->mb_type[0][s->block_index[i] - 1];
  4207. vc1_decode_intra_block(v, s->block[i], i, val, mquant,
  4208. (i & 4) ? v->codingset2 : v->codingset);
  4209. if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
  4210. continue;
  4211. v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
  4212. if (i < 4) {
  4213. stride_y = s->linesize << fieldtx;
  4214. off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
  4215. } else {
  4216. stride_y = s->uvlinesize;
  4217. off = 0;
  4218. }
  4219. s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
  4220. }
  4221. } else {
  4222. s->mb_intra = v->is_intra[s->mb_x] = 0;
  4223. if (!direct) {
  4224. if (skipped || !s->mb_intra) {
  4225. bmvtype = decode012(gb);
  4226. switch (bmvtype) {
  4227. case 0:
  4228. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
  4229. break;
  4230. case 1:
  4231. bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
  4232. break;
  4233. case 2:
  4234. bmvtype = BMV_TYPE_INTERPOLATED;
  4235. }
  4236. }
  4237. if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
  4238. mvsw = get_bits1(gb);
  4239. }
  4240. if (!skipped) { // inter MB
  4241. mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
  4242. if (mb_has_coeffs)
  4243. cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
  4244. if (!direct) {
  4245. if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
  4246. v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
  4247. } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
  4248. v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
  4249. }
  4250. }
  4251. for (i = 0; i < 6; i++)
  4252. v->mb_type[0][s->block_index[i]] = 0;
  4253. fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
  4254. /* for all motion vector read MVDATA and motion compensate each block */
  4255. dst_idx = 0;
  4256. if (direct) {
  4257. if (twomv) {
  4258. for (i = 0; i < 4; i++) {
  4259. vc1_mc_4mv_luma(v, i, 0, 0);
  4260. vc1_mc_4mv_luma(v, i, 1, 1);
  4261. }
  4262. vc1_mc_4mv_chroma4(v, 0, 0, 0);
  4263. vc1_mc_4mv_chroma4(v, 1, 1, 1);
  4264. } else {
  4265. vc1_mc_1mv(v, 0);
  4266. vc1_interp_mc(v);
  4267. }
  4268. } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
  4269. mvbp = v->fourmvbp;
  4270. for (i = 0; i < 4; i++) {
  4271. dir = i==1 || i==3;
  4272. dmv_x = dmv_y = 0;
  4273. val = ((mvbp >> (3 - i)) & 1);
  4274. if (val)
  4275. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4276. j = i > 1 ? 2 : 0;
  4277. vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  4278. vc1_mc_4mv_luma(v, j, dir, dir);
  4279. vc1_mc_4mv_luma(v, j+1, dir, dir);
  4280. }
  4281. vc1_mc_4mv_chroma4(v, 0, 0, 0);
  4282. vc1_mc_4mv_chroma4(v, 1, 1, 1);
  4283. } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
  4284. mvbp = v->twomvbp;
  4285. dmv_x = dmv_y = 0;
  4286. if (mvbp & 2)
  4287. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4288. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  4289. vc1_mc_1mv(v, 0);
  4290. dmv_x = dmv_y = 0;
  4291. if (mvbp & 1)
  4292. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4293. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  4294. vc1_interp_mc(v);
  4295. } else if (twomv) {
  4296. dir = bmvtype == BMV_TYPE_BACKWARD;
  4297. dir2 = dir;
  4298. if (mvsw)
  4299. dir2 = !dir;
  4300. mvbp = v->twomvbp;
  4301. dmv_x = dmv_y = 0;
  4302. if (mvbp & 2)
  4303. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4304. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
  4305. dmv_x = dmv_y = 0;
  4306. if (mvbp & 1)
  4307. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4308. vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
  4309. if (mvsw) {
  4310. for (i = 0; i < 2; i++) {
  4311. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  4312. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  4313. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  4314. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  4315. }
  4316. } else {
  4317. vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  4318. vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
  4319. }
  4320. vc1_mc_4mv_luma(v, 0, dir, 0);
  4321. vc1_mc_4mv_luma(v, 1, dir, 0);
  4322. vc1_mc_4mv_luma(v, 2, dir2, 0);
  4323. vc1_mc_4mv_luma(v, 3, dir2, 0);
  4324. vc1_mc_4mv_chroma4(v, dir, dir2, 0);
  4325. } else {
  4326. dir = bmvtype == BMV_TYPE_BACKWARD;
  4327. mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
  4328. dmv_x = dmv_y = 0;
  4329. if (mvbp)
  4330. get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
  4331. vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  4332. v->blk_mv_type[s->block_index[0]] = 1;
  4333. v->blk_mv_type[s->block_index[1]] = 1;
  4334. v->blk_mv_type[s->block_index[2]] = 1;
  4335. v->blk_mv_type[s->block_index[3]] = 1;
  4336. vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  4337. for (i = 0; i < 2; i++) {
  4338. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  4339. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  4340. }
  4341. vc1_mc_1mv(v, dir);
  4342. }
  4343. if (cbp)
  4344. GET_MQUANT(); // p. 227
  4345. s->current_picture.qscale_table[mb_pos] = mquant;
  4346. if (!v->ttmbf && cbp)
  4347. ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
  4348. for (i = 0; i < 6; i++) {
  4349. s->dc_val[0][s->block_index[i]] = 0;
  4350. dst_idx += i >> 2;
  4351. val = ((cbp >> (5 - i)) & 1);
  4352. if (!fieldtx)
  4353. off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
  4354. else
  4355. off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
  4356. if (val) {
  4357. pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
  4358. first_block, s->dest[dst_idx] + off,
  4359. (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
  4360. (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
  4361. block_cbp |= pat << (i << 2);
  4362. if (!v->ttmbf && ttmb < 8)
  4363. ttmb = -1;
  4364. first_block = 0;
  4365. }
  4366. }
  4367. } else { // skipped
  4368. dir = 0;
  4369. for (i = 0; i < 6; i++) {
  4370. v->mb_type[0][s->block_index[i]] = 0;
  4371. s->dc_val[0][s->block_index[i]] = 0;
  4372. }
  4373. s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
  4374. s->current_picture.qscale_table[mb_pos] = 0;
  4375. v->blk_mv_type[s->block_index[0]] = 0;
  4376. v->blk_mv_type[s->block_index[1]] = 0;
  4377. v->blk_mv_type[s->block_index[2]] = 0;
  4378. v->blk_mv_type[s->block_index[3]] = 0;
  4379. if (!direct) {
  4380. if (bmvtype == BMV_TYPE_INTERPOLATED) {
  4381. vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
  4382. vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
  4383. } else {
  4384. dir = bmvtype == BMV_TYPE_BACKWARD;
  4385. vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
  4386. if (mvsw) {
  4387. int dir2 = dir;
  4388. if (mvsw)
  4389. dir2 = !dir;
  4390. for (i = 0; i < 2; i++) {
  4391. s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
  4392. s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
  4393. s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
  4394. s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
  4395. }
  4396. } else {
  4397. v->blk_mv_type[s->block_index[0]] = 1;
  4398. v->blk_mv_type[s->block_index[1]] = 1;
  4399. v->blk_mv_type[s->block_index[2]] = 1;
  4400. v->blk_mv_type[s->block_index[3]] = 1;
  4401. vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
  4402. for (i = 0; i < 2; i++) {
  4403. s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
  4404. s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
  4405. }
  4406. }
  4407. }
  4408. }
  4409. vc1_mc_1mv(v, dir);
  4410. if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
  4411. vc1_interp_mc(v);
  4412. }
  4413. }
  4414. }
  4415. if (s->mb_x == s->mb_width - 1)
  4416. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  4417. v->cbp[s->mb_x] = block_cbp;
  4418. v->ttblk[s->mb_x] = block_tt;
  4419. return 0;
  4420. }
  4421. /** Decode blocks of I-frame
  4422. */
  4423. static void vc1_decode_i_blocks(VC1Context *v)
  4424. {
  4425. int k, j;
  4426. MpegEncContext *s = &v->s;
  4427. int cbp, val;
  4428. uint8_t *coded_val;
  4429. int mb_pos;
  4430. /* select codingmode used for VLC tables selection */
  4431. switch (v->y_ac_table_index) {
  4432. case 0:
  4433. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  4434. break;
  4435. case 1:
  4436. v->codingset = CS_HIGH_MOT_INTRA;
  4437. break;
  4438. case 2:
  4439. v->codingset = CS_MID_RATE_INTRA;
  4440. break;
  4441. }
  4442. switch (v->c_ac_table_index) {
  4443. case 0:
  4444. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  4445. break;
  4446. case 1:
  4447. v->codingset2 = CS_HIGH_MOT_INTER;
  4448. break;
  4449. case 2:
  4450. v->codingset2 = CS_MID_RATE_INTER;
  4451. break;
  4452. }
  4453. /* Set DC scale - y and c use the same */
  4454. s->y_dc_scale = s->y_dc_scale_table[v->pq];
  4455. s->c_dc_scale = s->c_dc_scale_table[v->pq];
  4456. //do frame decode
  4457. s->mb_x = s->mb_y = 0;
  4458. s->mb_intra = 1;
  4459. s->first_slice_line = 1;
  4460. for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
  4461. s->mb_x = 0;
  4462. init_block_index(v);
  4463. for (; s->mb_x < v->end_mb_x; s->mb_x++) {
  4464. uint8_t *dst[6];
  4465. ff_update_block_index(s);
  4466. dst[0] = s->dest[0];
  4467. dst[1] = dst[0] + 8;
  4468. dst[2] = s->dest[0] + s->linesize * 8;
  4469. dst[3] = dst[2] + 8;
  4470. dst[4] = s->dest[1];
  4471. dst[5] = s->dest[2];
  4472. s->dsp.clear_blocks(s->block[0]);
  4473. mb_pos = s->mb_x + s->mb_y * s->mb_width;
  4474. s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
  4475. s->current_picture.qscale_table[mb_pos] = v->pq;
  4476. s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
  4477. s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
  4478. // do actual MB decoding and displaying
  4479. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  4480. v->s.ac_pred = get_bits1(&v->s.gb);
  4481. for (k = 0; k < 6; k++) {
  4482. val = ((cbp >> (5 - k)) & 1);
  4483. if (k < 4) {
  4484. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  4485. val = val ^ pred;
  4486. *coded_val = val;
  4487. }
  4488. cbp |= val << (5 - k);
  4489. vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
  4490. if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
  4491. continue;
  4492. v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
  4493. if (v->pq >= 9 && v->overlap) {
  4494. if (v->rangeredfrm)
  4495. for (j = 0; j < 64; j++)
  4496. s->block[k][j] <<= 1;
  4497. s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
  4498. } else {
  4499. if (v->rangeredfrm)
  4500. for (j = 0; j < 64; j++)
  4501. s->block[k][j] = (s->block[k][j] - 64) << 1;
  4502. s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
  4503. }
  4504. }
  4505. if (v->pq >= 9 && v->overlap) {
  4506. if (s->mb_x) {
  4507. v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
  4508. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  4509. if (!(s->flags & CODEC_FLAG_GRAY)) {
  4510. v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
  4511. v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
  4512. }
  4513. }
  4514. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
  4515. v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  4516. if (!s->first_slice_line) {
  4517. v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
  4518. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
  4519. if (!(s->flags & CODEC_FLAG_GRAY)) {
  4520. v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
  4521. v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
  4522. }
  4523. }
  4524. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
  4525. v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
  4526. }
  4527. if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
  4528. if (get_bits_count(&s->gb) > v->bits) {
  4529. ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
  4530. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  4531. get_bits_count(&s->gb), v->bits);
  4532. return;
  4533. }
  4534. }
  4535. if (!v->s.loop_filter)
  4536. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  4537. else if (s->mb_y)
  4538. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  4539. s->first_slice_line = 0;
  4540. }
  4541. if (v->s.loop_filter)
  4542. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  4543. /* This is intentionally mb_height and not end_mb_y - unlike in advanced
  4544. * profile, these only differ are when decoding MSS2 rectangles. */
  4545. ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
  4546. }
  4547. /** Decode blocks of I-frame for advanced profile
  4548. */
  4549. static void vc1_decode_i_blocks_adv(VC1Context *v)
  4550. {
  4551. int k;
  4552. MpegEncContext *s = &v->s;
  4553. int cbp, val;
  4554. uint8_t *coded_val;
  4555. int mb_pos;
  4556. int mquant = v->pq;
  4557. int mqdiff;
  4558. GetBitContext *gb = &s->gb;
  4559. /* select codingmode used for VLC tables selection */
  4560. switch (v->y_ac_table_index) {
  4561. case 0:
  4562. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  4563. break;
  4564. case 1:
  4565. v->codingset = CS_HIGH_MOT_INTRA;
  4566. break;
  4567. case 2:
  4568. v->codingset = CS_MID_RATE_INTRA;
  4569. break;
  4570. }
  4571. switch (v->c_ac_table_index) {
  4572. case 0:
  4573. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  4574. break;
  4575. case 1:
  4576. v->codingset2 = CS_HIGH_MOT_INTER;
  4577. break;
  4578. case 2:
  4579. v->codingset2 = CS_MID_RATE_INTER;
  4580. break;
  4581. }
  4582. // do frame decode
  4583. s->mb_x = s->mb_y = 0;
  4584. s->mb_intra = 1;
  4585. s->first_slice_line = 1;
  4586. s->mb_y = s->start_mb_y;
  4587. if (s->start_mb_y) {
  4588. s->mb_x = 0;
  4589. init_block_index(v);
  4590. memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
  4591. (1 + s->b8_stride) * sizeof(*s->coded_block));
  4592. }
  4593. for (; s->mb_y < s->end_mb_y; s->mb_y++) {
  4594. s->mb_x = 0;
  4595. init_block_index(v);
  4596. for (;s->mb_x < s->mb_width; s->mb_x++) {
  4597. int16_t (*block)[64] = v->block[v->cur_blk_idx];
  4598. ff_update_block_index(s);
  4599. s->dsp.clear_blocks(block[0]);
  4600. mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  4601. s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
  4602. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
  4603. s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
  4604. // do actual MB decoding and displaying
  4605. if (v->fieldtx_is_raw)
  4606. v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
  4607. cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
  4608. if ( v->acpred_is_raw)
  4609. v->s.ac_pred = get_bits1(&v->s.gb);
  4610. else
  4611. v->s.ac_pred = v->acpred_plane[mb_pos];
  4612. if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
  4613. v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
  4614. GET_MQUANT();
  4615. s->current_picture.qscale_table[mb_pos] = mquant;
  4616. /* Set DC scale - y and c use the same */
  4617. s->y_dc_scale = s->y_dc_scale_table[mquant];
  4618. s->c_dc_scale = s->c_dc_scale_table[mquant];
  4619. for (k = 0; k < 6; k++) {
  4620. val = ((cbp >> (5 - k)) & 1);
  4621. if (k < 4) {
  4622. int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
  4623. val = val ^ pred;
  4624. *coded_val = val;
  4625. }
  4626. cbp |= val << (5 - k);
  4627. v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
  4628. v->c_avail = !!s->mb_x || (k == 1 || k == 3);
  4629. vc1_decode_i_block_adv(v, block[k], k, val,
  4630. (k < 4) ? v->codingset : v->codingset2, mquant);
  4631. if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
  4632. continue;
  4633. v->vc1dsp.vc1_inv_trans_8x8(block[k]);
  4634. }
  4635. vc1_smooth_overlap_filter_iblk(v);
  4636. vc1_put_signed_blocks_clamped(v);
  4637. if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
  4638. if (get_bits_count(&s->gb) > v->bits) {
  4639. // TODO: may need modification to handle slice coding
  4640. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  4641. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
  4642. get_bits_count(&s->gb), v->bits);
  4643. return;
  4644. }
  4645. }
  4646. if (!v->s.loop_filter)
  4647. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  4648. else if (s->mb_y)
  4649. ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
  4650. s->first_slice_line = 0;
  4651. }
  4652. /* raw bottom MB row */
  4653. s->mb_x = 0;
  4654. init_block_index(v);
  4655. for (;s->mb_x < s->mb_width; s->mb_x++) {
  4656. ff_update_block_index(s);
  4657. vc1_put_signed_blocks_clamped(v);
  4658. if (v->s.loop_filter)
  4659. vc1_loop_filter_iblk_delayed(v, v->pq);
  4660. }
  4661. if (v->s.loop_filter)
  4662. ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
  4663. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  4664. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  4665. }
  4666. static void vc1_decode_p_blocks(VC1Context *v)
  4667. {
  4668. MpegEncContext *s = &v->s;
  4669. int apply_loop_filter;
  4670. /* select codingmode used for VLC tables selection */
  4671. switch (v->c_ac_table_index) {
  4672. case 0:
  4673. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  4674. break;
  4675. case 1:
  4676. v->codingset = CS_HIGH_MOT_INTRA;
  4677. break;
  4678. case 2:
  4679. v->codingset = CS_MID_RATE_INTRA;
  4680. break;
  4681. }
  4682. switch (v->c_ac_table_index) {
  4683. case 0:
  4684. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  4685. break;
  4686. case 1:
  4687. v->codingset2 = CS_HIGH_MOT_INTER;
  4688. break;
  4689. case 2:
  4690. v->codingset2 = CS_MID_RATE_INTER;
  4691. break;
  4692. }
  4693. apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
  4694. v->fcm == PROGRESSIVE;
  4695. s->first_slice_line = 1;
  4696. memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
  4697. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  4698. s->mb_x = 0;
  4699. init_block_index(v);
  4700. for (; s->mb_x < s->mb_width; s->mb_x++) {
  4701. ff_update_block_index(s);
  4702. if (v->fcm == ILACE_FIELD)
  4703. vc1_decode_p_mb_intfi(v);
  4704. else if (v->fcm == ILACE_FRAME)
  4705. vc1_decode_p_mb_intfr(v);
  4706. else vc1_decode_p_mb(v);
  4707. if (s->mb_y != s->start_mb_y && apply_loop_filter)
  4708. vc1_apply_p_loop_filter(v);
  4709. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  4710. // TODO: may need modification to handle slice coding
  4711. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  4712. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  4713. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  4714. return;
  4715. }
  4716. }
  4717. memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
  4718. memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
  4719. memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
  4720. memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
  4721. if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  4722. s->first_slice_line = 0;
  4723. }
  4724. if (apply_loop_filter) {
  4725. s->mb_x = 0;
  4726. init_block_index(v);
  4727. for (; s->mb_x < s->mb_width; s->mb_x++) {
  4728. ff_update_block_index(s);
  4729. vc1_apply_p_loop_filter(v);
  4730. }
  4731. }
  4732. if (s->end_mb_y >= s->start_mb_y)
  4733. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  4734. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  4735. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  4736. }
  4737. static void vc1_decode_b_blocks(VC1Context *v)
  4738. {
  4739. MpegEncContext *s = &v->s;
  4740. /* select codingmode used for VLC tables selection */
  4741. switch (v->c_ac_table_index) {
  4742. case 0:
  4743. v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
  4744. break;
  4745. case 1:
  4746. v->codingset = CS_HIGH_MOT_INTRA;
  4747. break;
  4748. case 2:
  4749. v->codingset = CS_MID_RATE_INTRA;
  4750. break;
  4751. }
  4752. switch (v->c_ac_table_index) {
  4753. case 0:
  4754. v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
  4755. break;
  4756. case 1:
  4757. v->codingset2 = CS_HIGH_MOT_INTER;
  4758. break;
  4759. case 2:
  4760. v->codingset2 = CS_MID_RATE_INTER;
  4761. break;
  4762. }
  4763. s->first_slice_line = 1;
  4764. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  4765. s->mb_x = 0;
  4766. init_block_index(v);
  4767. for (; s->mb_x < s->mb_width; s->mb_x++) {
  4768. ff_update_block_index(s);
  4769. if (v->fcm == ILACE_FIELD)
  4770. vc1_decode_b_mb_intfi(v);
  4771. else if (v->fcm == ILACE_FRAME)
  4772. vc1_decode_b_mb_intfr(v);
  4773. else
  4774. vc1_decode_b_mb(v);
  4775. if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
  4776. // TODO: may need modification to handle slice coding
  4777. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
  4778. av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
  4779. get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
  4780. return;
  4781. }
  4782. if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
  4783. }
  4784. if (!v->s.loop_filter)
  4785. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  4786. else if (s->mb_y)
  4787. ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
  4788. s->first_slice_line = 0;
  4789. }
  4790. if (v->s.loop_filter)
  4791. ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
  4792. ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
  4793. (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
  4794. }
  4795. static void vc1_decode_skip_blocks(VC1Context *v)
  4796. {
  4797. MpegEncContext *s = &v->s;
  4798. if (!v->s.last_picture.f.data[0])
  4799. return;
  4800. ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
  4801. s->first_slice_line = 1;
  4802. for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
  4803. s->mb_x = 0;
  4804. init_block_index(v);
  4805. ff_update_block_index(s);
  4806. memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
  4807. memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  4808. memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
  4809. ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
  4810. s->first_slice_line = 0;
  4811. }
  4812. s->pict_type = AV_PICTURE_TYPE_P;
  4813. }
  4814. void ff_vc1_decode_blocks(VC1Context *v)
  4815. {
  4816. v->s.esc3_level_length = 0;
  4817. if (v->x8_type) {
  4818. ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
  4819. } else {
  4820. v->cur_blk_idx = 0;
  4821. v->left_blk_idx = -1;
  4822. v->topleft_blk_idx = 1;
  4823. v->top_blk_idx = 2;
  4824. switch (v->s.pict_type) {
  4825. case AV_PICTURE_TYPE_I:
  4826. if (v->profile == PROFILE_ADVANCED)
  4827. vc1_decode_i_blocks_adv(v);
  4828. else
  4829. vc1_decode_i_blocks(v);
  4830. break;
  4831. case AV_PICTURE_TYPE_P:
  4832. if (v->p_frame_skipped)
  4833. vc1_decode_skip_blocks(v);
  4834. else
  4835. vc1_decode_p_blocks(v);
  4836. break;
  4837. case AV_PICTURE_TYPE_B:
  4838. if (v->bi_type) {
  4839. if (v->profile == PROFILE_ADVANCED)
  4840. vc1_decode_i_blocks_adv(v);
  4841. else
  4842. vc1_decode_i_blocks(v);
  4843. } else
  4844. vc1_decode_b_blocks(v);
  4845. break;
  4846. }
  4847. }
  4848. }
  4849. #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
  4850. typedef struct {
  4851. /**
  4852. * Transform coefficients for both sprites in 16.16 fixed point format,
  4853. * in the order they appear in the bitstream:
  4854. * x scale
  4855. * rotation 1 (unused)
  4856. * x offset
  4857. * rotation 2 (unused)
  4858. * y scale
  4859. * y offset
  4860. * alpha
  4861. */
  4862. int coefs[2][7];
  4863. int effect_type, effect_flag;
  4864. int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
  4865. int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
  4866. } SpriteData;
  4867. static inline int get_fp_val(GetBitContext* gb)
  4868. {
  4869. return (get_bits_long(gb, 30) - (1 << 29)) << 1;
  4870. }
  4871. static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
  4872. {
  4873. c[1] = c[3] = 0;
  4874. switch (get_bits(gb, 2)) {
  4875. case 0:
  4876. c[0] = 1 << 16;
  4877. c[2] = get_fp_val(gb);
  4878. c[4] = 1 << 16;
  4879. break;
  4880. case 1:
  4881. c[0] = c[4] = get_fp_val(gb);
  4882. c[2] = get_fp_val(gb);
  4883. break;
  4884. case 2:
  4885. c[0] = get_fp_val(gb);
  4886. c[2] = get_fp_val(gb);
  4887. c[4] = get_fp_val(gb);
  4888. break;
  4889. case 3:
  4890. c[0] = get_fp_val(gb);
  4891. c[1] = get_fp_val(gb);
  4892. c[2] = get_fp_val(gb);
  4893. c[3] = get_fp_val(gb);
  4894. c[4] = get_fp_val(gb);
  4895. break;
  4896. }
  4897. c[5] = get_fp_val(gb);
  4898. if (get_bits1(gb))
  4899. c[6] = get_fp_val(gb);
  4900. else
  4901. c[6] = 1 << 16;
  4902. }
  4903. static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
  4904. {
  4905. AVCodecContext *avctx = v->s.avctx;
  4906. int sprite, i;
  4907. for (sprite = 0; sprite <= v->two_sprites; sprite++) {
  4908. vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
  4909. if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
  4910. avpriv_request_sample(avctx, "Non-zero rotation coefficients");
  4911. av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
  4912. for (i = 0; i < 7; i++)
  4913. av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
  4914. sd->coefs[sprite][i] / (1<<16),
  4915. (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
  4916. av_log(avctx, AV_LOG_DEBUG, "\n");
  4917. }
  4918. skip_bits(gb, 2);
  4919. if (sd->effect_type = get_bits_long(gb, 30)) {
  4920. switch (sd->effect_pcount1 = get_bits(gb, 4)) {
  4921. case 7:
  4922. vc1_sprite_parse_transform(gb, sd->effect_params1);
  4923. break;
  4924. case 14:
  4925. vc1_sprite_parse_transform(gb, sd->effect_params1);
  4926. vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
  4927. break;
  4928. default:
  4929. for (i = 0; i < sd->effect_pcount1; i++)
  4930. sd->effect_params1[i] = get_fp_val(gb);
  4931. }
  4932. if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
  4933. // effect 13 is simple alpha blending and matches the opacity above
  4934. av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
  4935. for (i = 0; i < sd->effect_pcount1; i++)
  4936. av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
  4937. sd->effect_params1[i] / (1 << 16),
  4938. (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
  4939. av_log(avctx, AV_LOG_DEBUG, "\n");
  4940. }
  4941. sd->effect_pcount2 = get_bits(gb, 16);
  4942. if (sd->effect_pcount2 > 10) {
  4943. av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
  4944. return AVERROR_INVALIDDATA;
  4945. } else if (sd->effect_pcount2) {
  4946. i = -1;
  4947. av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
  4948. while (++i < sd->effect_pcount2) {
  4949. sd->effect_params2[i] = get_fp_val(gb);
  4950. av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
  4951. sd->effect_params2[i] / (1 << 16),
  4952. (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
  4953. }
  4954. av_log(avctx, AV_LOG_DEBUG, "\n");
  4955. }
  4956. }
  4957. if (sd->effect_flag = get_bits1(gb))
  4958. av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
  4959. if (get_bits_count(gb) >= gb->size_in_bits +
  4960. (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0)) {
  4961. av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
  4962. return AVERROR_INVALIDDATA;
  4963. }
  4964. if (get_bits_count(gb) < gb->size_in_bits - 8)
  4965. av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
  4966. return 0;
  4967. }
  4968. static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
  4969. {
  4970. int i, plane, row, sprite;
  4971. int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
  4972. uint8_t* src_h[2][2];
  4973. int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
  4974. int ysub[2];
  4975. MpegEncContext *s = &v->s;
  4976. for (i = 0; i <= v->two_sprites; i++) {
  4977. xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
  4978. xadv[i] = sd->coefs[i][0];
  4979. if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
  4980. xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
  4981. yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
  4982. yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
  4983. }
  4984. alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
  4985. for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
  4986. int width = v->output_width>>!!plane;
  4987. for (row = 0; row < v->output_height>>!!plane; row++) {
  4988. uint8_t *dst = v->sprite_output_frame->data[plane] +
  4989. v->sprite_output_frame->linesize[plane] * row;
  4990. for (sprite = 0; sprite <= v->two_sprites; sprite++) {
  4991. uint8_t *iplane = s->current_picture.f.data[plane];
  4992. int iline = s->current_picture.f.linesize[plane];
  4993. int ycoord = yoff[sprite] + yadv[sprite] * row;
  4994. int yline = ycoord >> 16;
  4995. int next_line;
  4996. ysub[sprite] = ycoord & 0xFFFF;
  4997. if (sprite) {
  4998. iplane = s->last_picture.f.data[plane];
  4999. iline = s->last_picture.f.linesize[plane];
  5000. }
  5001. next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
  5002. if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
  5003. src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
  5004. if (ysub[sprite])
  5005. src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
  5006. } else {
  5007. if (sr_cache[sprite][0] != yline) {
  5008. if (sr_cache[sprite][1] == yline) {
  5009. FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
  5010. FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
  5011. } else {
  5012. v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
  5013. sr_cache[sprite][0] = yline;
  5014. }
  5015. }
  5016. if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
  5017. v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
  5018. iplane + next_line, xoff[sprite],
  5019. xadv[sprite], width);
  5020. sr_cache[sprite][1] = yline + 1;
  5021. }
  5022. src_h[sprite][0] = v->sr_rows[sprite][0];
  5023. src_h[sprite][1] = v->sr_rows[sprite][1];
  5024. }
  5025. }
  5026. if (!v->two_sprites) {
  5027. if (ysub[0]) {
  5028. v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
  5029. } else {
  5030. memcpy(dst, src_h[0][0], width);
  5031. }
  5032. } else {
  5033. if (ysub[0] && ysub[1]) {
  5034. v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
  5035. src_h[1][0], src_h[1][1], ysub[1], alpha, width);
  5036. } else if (ysub[0]) {
  5037. v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
  5038. src_h[1][0], alpha, width);
  5039. } else if (ysub[1]) {
  5040. v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
  5041. src_h[0][0], (1<<16)-1-alpha, width);
  5042. } else {
  5043. v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
  5044. }
  5045. }
  5046. }
  5047. if (!plane) {
  5048. for (i = 0; i <= v->two_sprites; i++) {
  5049. xoff[i] >>= 1;
  5050. yoff[i] >>= 1;
  5051. }
  5052. }
  5053. }
  5054. }
  5055. static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
  5056. {
  5057. int ret;
  5058. MpegEncContext *s = &v->s;
  5059. AVCodecContext *avctx = s->avctx;
  5060. SpriteData sd;
  5061. memset(&sd, 0, sizeof(sd));
  5062. ret = vc1_parse_sprites(v, gb, &sd);
  5063. if (ret < 0)
  5064. return ret;
  5065. if (!s->current_picture.f.data[0]) {
  5066. av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
  5067. return -1;
  5068. }
  5069. if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
  5070. av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
  5071. v->two_sprites = 0;
  5072. }
  5073. av_frame_unref(v->sprite_output_frame);
  5074. if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
  5075. return ret;
  5076. vc1_draw_sprites(v, &sd);
  5077. return 0;
  5078. }
  5079. static void vc1_sprite_flush(AVCodecContext *avctx)
  5080. {
  5081. VC1Context *v = avctx->priv_data;
  5082. MpegEncContext *s = &v->s;
  5083. AVFrame *f = &s->current_picture.f;
  5084. int plane, i;
  5085. /* Windows Media Image codecs have a convergence interval of two keyframes.
  5086. Since we can't enforce it, clear to black the missing sprite. This is
  5087. wrong but it looks better than doing nothing. */
  5088. if (f->data[0])
  5089. for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
  5090. for (i = 0; i < v->sprite_height>>!!plane; i++)
  5091. memset(f->data[plane] + i * f->linesize[plane],
  5092. plane ? 128 : 0, f->linesize[plane]);
  5093. }
  5094. #endif
  5095. av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
  5096. {
  5097. MpegEncContext *s = &v->s;
  5098. int i;
  5099. int mb_height = FFALIGN(s->mb_height, 2);
  5100. /* Allocate mb bitplanes */
  5101. v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
  5102. v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
  5103. v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
  5104. v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
  5105. v->acpred_plane = av_malloc (s->mb_stride * mb_height);
  5106. v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
  5107. v->n_allocated_blks = s->mb_width + 2;
  5108. v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
  5109. v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
  5110. v->cbp = v->cbp_base + s->mb_stride;
  5111. v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
  5112. v->ttblk = v->ttblk_base + s->mb_stride;
  5113. v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
  5114. v->is_intra = v->is_intra_base + s->mb_stride;
  5115. v->luma_mv_base = av_mallocz(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
  5116. v->luma_mv = v->luma_mv_base + s->mb_stride;
  5117. /* allocate block type info in that way so it could be used with s->block_index[] */
  5118. v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
  5119. v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
  5120. v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
  5121. v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
  5122. /* allocate memory to store block level MV info */
  5123. v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
  5124. v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
  5125. v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
  5126. v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
  5127. v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
  5128. v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
  5129. v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
  5130. v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
  5131. /* Init coded blocks info */
  5132. if (v->profile == PROFILE_ADVANCED) {
  5133. // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
  5134. // return -1;
  5135. // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
  5136. // return -1;
  5137. }
  5138. ff_intrax8_common_init(&v->x8,s);
  5139. if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
  5140. for (i = 0; i < 4; i++)
  5141. if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
  5142. return AVERROR(ENOMEM);
  5143. }
  5144. if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
  5145. !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
  5146. !v->mb_type_base) {
  5147. av_freep(&v->mv_type_mb_plane);
  5148. av_freep(&v->direct_mb_plane);
  5149. av_freep(&v->acpred_plane);
  5150. av_freep(&v->over_flags_plane);
  5151. av_freep(&v->block);
  5152. av_freep(&v->cbp_base);
  5153. av_freep(&v->ttblk_base);
  5154. av_freep(&v->is_intra_base);
  5155. av_freep(&v->luma_mv_base);
  5156. av_freep(&v->mb_type_base);
  5157. return AVERROR(ENOMEM);
  5158. }
  5159. return 0;
  5160. }
  5161. av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
  5162. {
  5163. int i;
  5164. for (i = 0; i < 64; i++) {
  5165. #define transpose(x) ((x >> 3) | ((x & 7) << 3))
  5166. v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
  5167. v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
  5168. v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
  5169. v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
  5170. v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
  5171. }
  5172. v->left_blk_sh = 0;
  5173. v->top_blk_sh = 3;
  5174. }
  5175. /** Initialize a VC1/WMV3 decoder
  5176. * @todo TODO: Handle VC-1 IDUs (Transport level?)
  5177. * @todo TODO: Decypher remaining bits in extra_data
  5178. */
  5179. static av_cold int vc1_decode_init(AVCodecContext *avctx)
  5180. {
  5181. VC1Context *v = avctx->priv_data;
  5182. MpegEncContext *s = &v->s;
  5183. GetBitContext gb;
  5184. int ret;
  5185. /* save the container output size for WMImage */
  5186. v->output_width = avctx->width;
  5187. v->output_height = avctx->height;
  5188. if (!avctx->extradata_size || !avctx->extradata)
  5189. return -1;
  5190. if (!(avctx->flags & CODEC_FLAG_GRAY))
  5191. avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
  5192. else
  5193. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  5194. avctx->hwaccel = ff_find_hwaccel(avctx);
  5195. v->s.avctx = avctx;
  5196. if ((ret = ff_vc1_init_common(v)) < 0)
  5197. return ret;
  5198. // ensure static VLC tables are initialized
  5199. if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
  5200. return ret;
  5201. if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
  5202. return ret;
  5203. // Hack to ensure the above functions will be called
  5204. // again once we know all necessary settings.
  5205. // That this is necessary might indicate a bug.
  5206. ff_vc1_decode_end(avctx);
  5207. ff_h264chroma_init(&v->h264chroma, 8);
  5208. ff_vc1dsp_init(&v->vc1dsp);
  5209. if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
  5210. int count = 0;
  5211. // looks like WMV3 has a sequence header stored in the extradata
  5212. // advanced sequence header may be before the first frame
  5213. // the last byte of the extradata is a version number, 1 for the
  5214. // samples we can decode
  5215. init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
  5216. if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
  5217. return ret;
  5218. count = avctx->extradata_size*8 - get_bits_count(&gb);
  5219. if (count > 0) {
  5220. av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
  5221. count, get_bits(&gb, count));
  5222. } else if (count < 0) {
  5223. av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
  5224. }
  5225. } else { // VC1/WVC1/WVP2
  5226. const uint8_t *start = avctx->extradata;
  5227. uint8_t *end = avctx->extradata + avctx->extradata_size;
  5228. const uint8_t *next;
  5229. int size, buf2_size;
  5230. uint8_t *buf2 = NULL;
  5231. int seq_initialized = 0, ep_initialized = 0;
  5232. if (avctx->extradata_size < 16) {
  5233. av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
  5234. return -1;
  5235. }
  5236. buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  5237. start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
  5238. next = start;
  5239. for (; next < end; start = next) {
  5240. next = find_next_marker(start + 4, end);
  5241. size = next - start - 4;
  5242. if (size <= 0)
  5243. continue;
  5244. buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
  5245. init_get_bits(&gb, buf2, buf2_size * 8);
  5246. switch (AV_RB32(start)) {
  5247. case VC1_CODE_SEQHDR:
  5248. if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
  5249. av_free(buf2);
  5250. return ret;
  5251. }
  5252. seq_initialized = 1;
  5253. break;
  5254. case VC1_CODE_ENTRYPOINT:
  5255. if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
  5256. av_free(buf2);
  5257. return ret;
  5258. }
  5259. ep_initialized = 1;
  5260. break;
  5261. }
  5262. }
  5263. av_free(buf2);
  5264. if (!seq_initialized || !ep_initialized) {
  5265. av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
  5266. return -1;
  5267. }
  5268. v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
  5269. }
  5270. v->sprite_output_frame = av_frame_alloc();
  5271. if (!v->sprite_output_frame)
  5272. return AVERROR(ENOMEM);
  5273. avctx->profile = v->profile;
  5274. if (v->profile == PROFILE_ADVANCED)
  5275. avctx->level = v->level;
  5276. avctx->has_b_frames = !!avctx->max_b_frames;
  5277. s->mb_width = (avctx->coded_width + 15) >> 4;
  5278. s->mb_height = (avctx->coded_height + 15) >> 4;
  5279. if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
  5280. ff_vc1_init_transposed_scantables(v);
  5281. } else {
  5282. memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
  5283. v->left_blk_sh = 3;
  5284. v->top_blk_sh = 0;
  5285. }
  5286. if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
  5287. v->sprite_width = avctx->coded_width;
  5288. v->sprite_height = avctx->coded_height;
  5289. avctx->coded_width = avctx->width = v->output_width;
  5290. avctx->coded_height = avctx->height = v->output_height;
  5291. // prevent 16.16 overflows
  5292. if (v->sprite_width > 1 << 14 ||
  5293. v->sprite_height > 1 << 14 ||
  5294. v->output_width > 1 << 14 ||
  5295. v->output_height > 1 << 14) return -1;
  5296. if ((v->sprite_width&1) || (v->sprite_height&1)) {
  5297. avpriv_request_sample(avctx, "odd sprites support");
  5298. return AVERROR_PATCHWELCOME;
  5299. }
  5300. }
  5301. return 0;
  5302. }
  5303. /** Close a VC1/WMV3 decoder
  5304. * @warning Initial try at using MpegEncContext stuff
  5305. */
  5306. av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
  5307. {
  5308. VC1Context *v = avctx->priv_data;
  5309. int i;
  5310. av_frame_free(&v->sprite_output_frame);
  5311. for (i = 0; i < 4; i++)
  5312. av_freep(&v->sr_rows[i >> 1][i & 1]);
  5313. av_freep(&v->hrd_rate);
  5314. av_freep(&v->hrd_buffer);
  5315. ff_MPV_common_end(&v->s);
  5316. av_freep(&v->mv_type_mb_plane);
  5317. av_freep(&v->direct_mb_plane);
  5318. av_freep(&v->forward_mb_plane);
  5319. av_freep(&v->fieldtx_plane);
  5320. av_freep(&v->acpred_plane);
  5321. av_freep(&v->over_flags_plane);
  5322. av_freep(&v->mb_type_base);
  5323. av_freep(&v->blk_mv_type_base);
  5324. av_freep(&v->mv_f_base);
  5325. av_freep(&v->mv_f_next_base);
  5326. av_freep(&v->block);
  5327. av_freep(&v->cbp_base);
  5328. av_freep(&v->ttblk_base);
  5329. av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
  5330. av_freep(&v->luma_mv_base);
  5331. ff_intrax8_common_end(&v->x8);
  5332. return 0;
  5333. }
  5334. /** Decode a VC1/WMV3 frame
  5335. * @todo TODO: Handle VC-1 IDUs (Transport level?)
  5336. */
  5337. static int vc1_decode_frame(AVCodecContext *avctx, void *data,
  5338. int *got_frame, AVPacket *avpkt)
  5339. {
  5340. const uint8_t *buf = avpkt->data;
  5341. int buf_size = avpkt->size, n_slices = 0, i, ret;
  5342. VC1Context *v = avctx->priv_data;
  5343. MpegEncContext *s = &v->s;
  5344. AVFrame *pict = data;
  5345. uint8_t *buf2 = NULL;
  5346. const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
  5347. int mb_height, n_slices1=-1;
  5348. struct {
  5349. uint8_t *buf;
  5350. GetBitContext gb;
  5351. int mby_start;
  5352. } *slices = NULL, *tmp;
  5353. v->second_field = 0;
  5354. if(s->flags & CODEC_FLAG_LOW_DELAY)
  5355. s->low_delay = 1;
  5356. /* no supplementary picture */
  5357. if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
  5358. /* special case for last picture */
  5359. if (s->low_delay == 0 && s->next_picture_ptr) {
  5360. if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
  5361. return ret;
  5362. s->next_picture_ptr = NULL;
  5363. *got_frame = 1;
  5364. }
  5365. return buf_size;
  5366. }
  5367. if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
  5368. if (v->profile < PROFILE_ADVANCED)
  5369. avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
  5370. else
  5371. avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
  5372. }
  5373. //for advanced profile we may need to parse and unescape data
  5374. if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
  5375. int buf_size2 = 0;
  5376. buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
  5377. if (!buf2)
  5378. return AVERROR(ENOMEM);
  5379. if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
  5380. const uint8_t *start, *end, *next;
  5381. int size;
  5382. next = buf;
  5383. for (start = buf, end = buf + buf_size; next < end; start = next) {
  5384. next = find_next_marker(start + 4, end);
  5385. size = next - start - 4;
  5386. if (size <= 0) continue;
  5387. switch (AV_RB32(start)) {
  5388. case VC1_CODE_FRAME:
  5389. if (avctx->hwaccel ||
  5390. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  5391. buf_start = start;
  5392. buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
  5393. break;
  5394. case VC1_CODE_FIELD: {
  5395. int buf_size3;
  5396. if (avctx->hwaccel ||
  5397. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  5398. buf_start_second_field = start;
  5399. tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
  5400. if (!tmp)
  5401. goto err;
  5402. slices = tmp;
  5403. slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
  5404. if (!slices[n_slices].buf)
  5405. goto err;
  5406. buf_size3 = vc1_unescape_buffer(start + 4, size,
  5407. slices[n_slices].buf);
  5408. init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
  5409. buf_size3 << 3);
  5410. /* assuming that the field marker is at the exact middle,
  5411. hope it's correct */
  5412. slices[n_slices].mby_start = s->mb_height + 1 >> 1;
  5413. n_slices1 = n_slices - 1; // index of the last slice of the first field
  5414. n_slices++;
  5415. break;
  5416. }
  5417. case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
  5418. buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
  5419. init_get_bits(&s->gb, buf2, buf_size2 * 8);
  5420. ff_vc1_decode_entry_point(avctx, v, &s->gb);
  5421. break;
  5422. case VC1_CODE_SLICE: {
  5423. int buf_size3;
  5424. tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
  5425. if (!tmp)
  5426. goto err;
  5427. slices = tmp;
  5428. slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
  5429. if (!slices[n_slices].buf)
  5430. goto err;
  5431. buf_size3 = vc1_unescape_buffer(start + 4, size,
  5432. slices[n_slices].buf);
  5433. init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
  5434. buf_size3 << 3);
  5435. slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
  5436. n_slices++;
  5437. break;
  5438. }
  5439. }
  5440. }
  5441. } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
  5442. const uint8_t *divider;
  5443. int buf_size3;
  5444. divider = find_next_marker(buf, buf + buf_size);
  5445. if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
  5446. av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
  5447. goto err;
  5448. } else { // found field marker, unescape second field
  5449. if (avctx->hwaccel ||
  5450. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
  5451. buf_start_second_field = divider;
  5452. tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
  5453. if (!tmp)
  5454. goto err;
  5455. slices = tmp;
  5456. slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
  5457. if (!slices[n_slices].buf)
  5458. goto err;
  5459. buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
  5460. init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
  5461. buf_size3 << 3);
  5462. slices[n_slices].mby_start = s->mb_height + 1 >> 1;
  5463. n_slices1 = n_slices - 1;
  5464. n_slices++;
  5465. }
  5466. buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
  5467. } else {
  5468. buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
  5469. }
  5470. init_get_bits(&s->gb, buf2, buf_size2*8);
  5471. } else
  5472. init_get_bits(&s->gb, buf, buf_size*8);
  5473. if (v->res_sprite) {
  5474. v->new_sprite = !get_bits1(&s->gb);
  5475. v->two_sprites = get_bits1(&s->gb);
  5476. /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
  5477. we're using the sprite compositor. These are intentionally kept separate
  5478. so you can get the raw sprites by using the wmv3 decoder for WMVP or
  5479. the vc1 one for WVP2 */
  5480. if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
  5481. if (v->new_sprite) {
  5482. // switch AVCodecContext parameters to those of the sprites
  5483. avctx->width = avctx->coded_width = v->sprite_width;
  5484. avctx->height = avctx->coded_height = v->sprite_height;
  5485. } else {
  5486. goto image;
  5487. }
  5488. }
  5489. }
  5490. if (s->context_initialized &&
  5491. (s->width != avctx->coded_width ||
  5492. s->height != avctx->coded_height)) {
  5493. ff_vc1_decode_end(avctx);
  5494. }
  5495. if (!s->context_initialized) {
  5496. if (ff_msmpeg4_decode_init(avctx) < 0)
  5497. goto err;
  5498. if (ff_vc1_decode_init_alloc_tables(v) < 0) {
  5499. ff_MPV_common_end(s);
  5500. goto err;
  5501. }
  5502. s->low_delay = !avctx->has_b_frames || v->res_sprite;
  5503. if (v->profile == PROFILE_ADVANCED) {
  5504. if(avctx->coded_width<=1 || avctx->coded_height<=1)
  5505. goto err;
  5506. s->h_edge_pos = avctx->coded_width;
  5507. s->v_edge_pos = avctx->coded_height;
  5508. }
  5509. }
  5510. // do parse frame header
  5511. v->pic_header_flag = 0;
  5512. v->first_pic_header_flag = 1;
  5513. if (v->profile < PROFILE_ADVANCED) {
  5514. if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
  5515. goto err;
  5516. }
  5517. } else {
  5518. if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
  5519. goto err;
  5520. }
  5521. }
  5522. v->first_pic_header_flag = 0;
  5523. if (avctx->debug & FF_DEBUG_PICT_INFO)
  5524. av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
  5525. if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
  5526. && s->pict_type != AV_PICTURE_TYPE_I) {
  5527. av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
  5528. goto err;
  5529. }
  5530. if ((s->mb_height >> v->field_mode) == 0) {
  5531. av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
  5532. goto err;
  5533. }
  5534. // for skipping the frame
  5535. s->current_picture.f.pict_type = s->pict_type;
  5536. s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  5537. /* skip B-frames if we don't have reference frames */
  5538. if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
  5539. goto err;
  5540. }
  5541. if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
  5542. (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
  5543. avctx->skip_frame >= AVDISCARD_ALL) {
  5544. goto end;
  5545. }
  5546. if (s->next_p_frame_damaged) {
  5547. if (s->pict_type == AV_PICTURE_TYPE_B)
  5548. goto end;
  5549. else
  5550. s->next_p_frame_damaged = 0;
  5551. }
  5552. if (ff_MPV_frame_start(s, avctx) < 0) {
  5553. goto err;
  5554. }
  5555. v->s.current_picture_ptr->field_picture = v->field_mode;
  5556. v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE);
  5557. v->s.current_picture_ptr->f.top_field_first = v->tff;
  5558. // process pulldown flags
  5559. s->current_picture_ptr->f.repeat_pict = 0;
  5560. // Pulldown flags are only valid when 'broadcast' has been set.
  5561. // So ticks_per_frame will be 2
  5562. if (v->rff) {
  5563. // repeat field
  5564. s->current_picture_ptr->f.repeat_pict = 1;
  5565. } else if (v->rptfrm) {
  5566. // repeat frames
  5567. s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
  5568. }
  5569. s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
  5570. s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
  5571. if ((CONFIG_VC1_VDPAU_DECODER)
  5572. &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
  5573. if (v->field_mode && buf_start_second_field) {
  5574. ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
  5575. ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
  5576. } else {
  5577. ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
  5578. }
  5579. } else if (avctx->hwaccel) {
  5580. if (v->field_mode && buf_start_second_field) {
  5581. // decode first field
  5582. s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
  5583. if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
  5584. goto err;
  5585. if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
  5586. goto err;
  5587. if (avctx->hwaccel->end_frame(avctx) < 0)
  5588. goto err;
  5589. // decode second field
  5590. s->gb = slices[n_slices1 + 1].gb;
  5591. s->picture_structure = PICT_TOP_FIELD + v->tff;
  5592. v->second_field = 1;
  5593. v->pic_header_flag = 0;
  5594. if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
  5595. av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
  5596. goto err;
  5597. }
  5598. v->s.current_picture_ptr->f.pict_type = v->s.pict_type;
  5599. if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
  5600. goto err;
  5601. if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
  5602. goto err;
  5603. if (avctx->hwaccel->end_frame(avctx) < 0)
  5604. goto err;
  5605. } else {
  5606. s->picture_structure = PICT_FRAME;
  5607. if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
  5608. goto err;
  5609. if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
  5610. goto err;
  5611. if (avctx->hwaccel->end_frame(avctx) < 0)
  5612. goto err;
  5613. }
  5614. } else {
  5615. int header_ret = 0;
  5616. ff_mpeg_er_frame_start(s);
  5617. v->bits = buf_size * 8;
  5618. v->end_mb_x = s->mb_width;
  5619. if (v->field_mode) {
  5620. s->current_picture.f.linesize[0] <<= 1;
  5621. s->current_picture.f.linesize[1] <<= 1;
  5622. s->current_picture.f.linesize[2] <<= 1;
  5623. s->linesize <<= 1;
  5624. s->uvlinesize <<= 1;
  5625. }
  5626. mb_height = s->mb_height >> v->field_mode;
  5627. av_assert0 (mb_height > 0);
  5628. for (i = 0; i <= n_slices; i++) {
  5629. if (i > 0 && slices[i - 1].mby_start >= mb_height) {
  5630. if (v->field_mode <= 0) {
  5631. av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
  5632. "picture boundary (%d >= %d)\n", i,
  5633. slices[i - 1].mby_start, mb_height);
  5634. continue;
  5635. }
  5636. v->second_field = 1;
  5637. av_assert0((s->mb_height & 1) == 0);
  5638. v->blocks_off = s->b8_stride * (s->mb_height&~1);
  5639. v->mb_off = s->mb_stride * s->mb_height >> 1;
  5640. } else {
  5641. v->second_field = 0;
  5642. v->blocks_off = 0;
  5643. v->mb_off = 0;
  5644. }
  5645. if (i) {
  5646. v->pic_header_flag = 0;
  5647. if (v->field_mode && i == n_slices1 + 2) {
  5648. if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
  5649. av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
  5650. if (avctx->err_recognition & AV_EF_EXPLODE)
  5651. goto err;
  5652. continue;
  5653. }
  5654. } else if (get_bits1(&s->gb)) {
  5655. v->pic_header_flag = 1;
  5656. if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
  5657. av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
  5658. if (avctx->err_recognition & AV_EF_EXPLODE)
  5659. goto err;
  5660. continue;
  5661. }
  5662. }
  5663. }
  5664. if (header_ret < 0)
  5665. continue;
  5666. s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
  5667. if (!v->field_mode || v->second_field)
  5668. s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
  5669. else {
  5670. if (i >= n_slices) {
  5671. av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
  5672. continue;
  5673. }
  5674. s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
  5675. }
  5676. if (s->end_mb_y <= s->start_mb_y) {
  5677. av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
  5678. continue;
  5679. }
  5680. if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
  5681. av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
  5682. continue;
  5683. }
  5684. ff_vc1_decode_blocks(v);
  5685. if (i != n_slices)
  5686. s->gb = slices[i].gb;
  5687. }
  5688. if (v->field_mode) {
  5689. v->second_field = 0;
  5690. s->current_picture.f.linesize[0] >>= 1;
  5691. s->current_picture.f.linesize[1] >>= 1;
  5692. s->current_picture.f.linesize[2] >>= 1;
  5693. s->linesize >>= 1;
  5694. s->uvlinesize >>= 1;
  5695. if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
  5696. FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
  5697. FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
  5698. }
  5699. }
  5700. av_dlog(s->avctx, "Consumed %i/%i bits\n",
  5701. get_bits_count(&s->gb), s->gb.size_in_bits);
  5702. // if (get_bits_count(&s->gb) > buf_size * 8)
  5703. // return -1;
  5704. if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B)
  5705. goto err;
  5706. if (!v->field_mode)
  5707. ff_er_frame_end(&s->er);
  5708. }
  5709. ff_MPV_frame_end(s);
  5710. if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
  5711. image:
  5712. avctx->width = avctx->coded_width = v->output_width;
  5713. avctx->height = avctx->coded_height = v->output_height;
  5714. if (avctx->skip_frame >= AVDISCARD_NONREF)
  5715. goto end;
  5716. #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
  5717. if (vc1_decode_sprites(v, &s->gb))
  5718. goto err;
  5719. #endif
  5720. if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
  5721. goto err;
  5722. *got_frame = 1;
  5723. } else {
  5724. if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
  5725. if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
  5726. goto err;
  5727. ff_print_debug_info(s, s->current_picture_ptr, pict);
  5728. *got_frame = 1;
  5729. } else if (s->last_picture_ptr != NULL) {
  5730. if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
  5731. goto err;
  5732. ff_print_debug_info(s, s->last_picture_ptr, pict);
  5733. *got_frame = 1;
  5734. }
  5735. }
  5736. end:
  5737. av_free(buf2);
  5738. for (i = 0; i < n_slices; i++)
  5739. av_free(slices[i].buf);
  5740. av_free(slices);
  5741. return buf_size;
  5742. err:
  5743. av_free(buf2);
  5744. for (i = 0; i < n_slices; i++)
  5745. av_free(slices[i].buf);
  5746. av_free(slices);
  5747. return -1;
  5748. }
  5749. static const AVProfile profiles[] = {
  5750. { FF_PROFILE_VC1_SIMPLE, "Simple" },
  5751. { FF_PROFILE_VC1_MAIN, "Main" },
  5752. { FF_PROFILE_VC1_COMPLEX, "Complex" },
  5753. { FF_PROFILE_VC1_ADVANCED, "Advanced" },
  5754. { FF_PROFILE_UNKNOWN },
  5755. };
  5756. static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
  5757. #if CONFIG_DXVA2
  5758. AV_PIX_FMT_DXVA2_VLD,
  5759. #endif
  5760. #if CONFIG_VAAPI
  5761. AV_PIX_FMT_VAAPI_VLD,
  5762. #endif
  5763. #if CONFIG_VDPAU
  5764. AV_PIX_FMT_VDPAU,
  5765. #endif
  5766. AV_PIX_FMT_YUV420P,
  5767. AV_PIX_FMT_NONE
  5768. };
  5769. AVCodec ff_vc1_decoder = {
  5770. .name = "vc1",
  5771. .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
  5772. .type = AVMEDIA_TYPE_VIDEO,
  5773. .id = AV_CODEC_ID_VC1,
  5774. .priv_data_size = sizeof(VC1Context),
  5775. .init = vc1_decode_init,
  5776. .close = ff_vc1_decode_end,
  5777. .decode = vc1_decode_frame,
  5778. .flush = ff_mpeg_flush,
  5779. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  5780. .pix_fmts = vc1_hwaccel_pixfmt_list_420,
  5781. .profiles = NULL_IF_CONFIG_SMALL(profiles)
  5782. };
  5783. #if CONFIG_WMV3_DECODER
  5784. AVCodec ff_wmv3_decoder = {
  5785. .name = "wmv3",
  5786. .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
  5787. .type = AVMEDIA_TYPE_VIDEO,
  5788. .id = AV_CODEC_ID_WMV3,
  5789. .priv_data_size = sizeof(VC1Context),
  5790. .init = vc1_decode_init,
  5791. .close = ff_vc1_decode_end,
  5792. .decode = vc1_decode_frame,
  5793. .flush = ff_mpeg_flush,
  5794. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  5795. .pix_fmts = vc1_hwaccel_pixfmt_list_420,
  5796. .profiles = NULL_IF_CONFIG_SMALL(profiles)
  5797. };
  5798. #endif
  5799. #if CONFIG_WMV3_VDPAU_DECODER
  5800. AVCodec ff_wmv3_vdpau_decoder = {
  5801. .name = "wmv3_vdpau",
  5802. .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
  5803. .type = AVMEDIA_TYPE_VIDEO,
  5804. .id = AV_CODEC_ID_WMV3,
  5805. .priv_data_size = sizeof(VC1Context),
  5806. .init = vc1_decode_init,
  5807. .close = ff_vc1_decode_end,
  5808. .decode = vc1_decode_frame,
  5809. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
  5810. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
  5811. .profiles = NULL_IF_CONFIG_SMALL(profiles)
  5812. };
  5813. #endif
  5814. #if CONFIG_VC1_VDPAU_DECODER
  5815. AVCodec ff_vc1_vdpau_decoder = {
  5816. .name = "vc1_vdpau",
  5817. .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
  5818. .type = AVMEDIA_TYPE_VIDEO,
  5819. .id = AV_CODEC_ID_VC1,
  5820. .priv_data_size = sizeof(VC1Context),
  5821. .init = vc1_decode_init,
  5822. .close = ff_vc1_decode_end,
  5823. .decode = vc1_decode_frame,
  5824. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
  5825. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
  5826. .profiles = NULL_IF_CONFIG_SMALL(profiles)
  5827. };
  5828. #endif
  5829. #if CONFIG_WMV3IMAGE_DECODER
  5830. AVCodec ff_wmv3image_decoder = {
  5831. .name = "wmv3image",
  5832. .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
  5833. .type = AVMEDIA_TYPE_VIDEO,
  5834. .id = AV_CODEC_ID_WMV3IMAGE,
  5835. .priv_data_size = sizeof(VC1Context),
  5836. .init = vc1_decode_init,
  5837. .close = ff_vc1_decode_end,
  5838. .decode = vc1_decode_frame,
  5839. .capabilities = CODEC_CAP_DR1,
  5840. .flush = vc1_sprite_flush,
  5841. .pix_fmts = (const enum AVPixelFormat[]) {
  5842. AV_PIX_FMT_YUV420P,
  5843. AV_PIX_FMT_NONE
  5844. },
  5845. };
  5846. #endif
  5847. #if CONFIG_VC1IMAGE_DECODER
  5848. AVCodec ff_vc1image_decoder = {
  5849. .name = "vc1image",
  5850. .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
  5851. .type = AVMEDIA_TYPE_VIDEO,
  5852. .id = AV_CODEC_ID_VC1IMAGE,
  5853. .priv_data_size = sizeof(VC1Context),
  5854. .init = vc1_decode_init,
  5855. .close = ff_vc1_decode_end,
  5856. .decode = vc1_decode_frame,
  5857. .capabilities = CODEC_CAP_DR1,
  5858. .flush = vc1_sprite_flush,
  5859. .pix_fmts = (const enum AVPixelFormat[]) {
  5860. AV_PIX_FMT_YUV420P,
  5861. AV_PIX_FMT_NONE
  5862. },
  5863. };
  5864. #endif