You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2629 lines
99KB

  1. /*
  2. * MPEG-4 decoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "error_resilience.h"
  23. #include "idctdsp.h"
  24. #include "internal.h"
  25. #include "mpegutils.h"
  26. #include "mpegvideo.h"
  27. #include "mpegvideodata.h"
  28. #include "mpeg4video.h"
  29. #include "h263.h"
  30. #include "profiles.h"
  31. #include "thread.h"
  32. #include "xvididct.h"
  33. /* The defines below define the number of bits that are read at once for
  34. * reading vlc values. Changing these may improve speed and data cache needs
  35. * be aware though that decreasing them may need the number of stages that is
  36. * passed to get_vlc* to be increased. */
  37. #define SPRITE_TRAJ_VLC_BITS 6
  38. #define DC_VLC_BITS 9
  39. #define MB_TYPE_B_VLC_BITS 4
  40. static VLC dc_lum, dc_chrom;
  41. static VLC sprite_trajectory;
  42. static VLC mb_type_b_vlc;
  43. static const int mb_type_b_map[4] = {
  44. MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
  45. MB_TYPE_L0L1 | MB_TYPE_16x16,
  46. MB_TYPE_L1 | MB_TYPE_16x16,
  47. MB_TYPE_L0 | MB_TYPE_16x16,
  48. };
  49. static inline int check_marker(AVCodecContext *avctx, GetBitContext *s, const char *msg)
  50. {
  51. int bit = get_bits1(s);
  52. if (!bit)
  53. av_log(avctx, AV_LOG_INFO, "Marker bit missing %s\n", msg);
  54. return bit;
  55. }
  56. /**
  57. * Predict the ac.
  58. * @param n block index (0-3 are luma, 4-5 are chroma)
  59. * @param dir the ac prediction direction
  60. */
  61. void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
  62. {
  63. int i;
  64. int16_t *ac_val, *ac_val1;
  65. int8_t *const qscale_table = s->current_picture.qscale_table;
  66. /* find prediction */
  67. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  68. ac_val1 = ac_val;
  69. if (s->ac_pred) {
  70. if (dir == 0) {
  71. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  72. /* left prediction */
  73. ac_val -= 16;
  74. if (s->mb_x == 0 || s->qscale == qscale_table[xy] ||
  75. n == 1 || n == 3) {
  76. /* same qscale */
  77. for (i = 1; i < 8; i++)
  78. block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
  79. } else {
  80. /* different qscale, we must rescale */
  81. for (i = 1; i < 8; i++)
  82. block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  83. }
  84. } else {
  85. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  86. /* top prediction */
  87. ac_val -= 16 * s->block_wrap[n];
  88. if (s->mb_y == 0 || s->qscale == qscale_table[xy] ||
  89. n == 2 || n == 3) {
  90. /* same qscale */
  91. for (i = 1; i < 8; i++)
  92. block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
  93. } else {
  94. /* different qscale, we must rescale */
  95. for (i = 1; i < 8; i++)
  96. block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  97. }
  98. }
  99. }
  100. /* left copy */
  101. for (i = 1; i < 8; i++)
  102. ac_val1[i] = block[s->idsp.idct_permutation[i << 3]];
  103. /* top copy */
  104. for (i = 1; i < 8; i++)
  105. ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
  106. }
  107. /**
  108. * check if the next stuff is a resync marker or the end.
  109. * @return 0 if not
  110. */
  111. static inline int mpeg4_is_resync(MpegEncContext *s)
  112. {
  113. int bits_count = get_bits_count(&s->gb);
  114. int v = show_bits(&s->gb, 16);
  115. if (s->workaround_bugs & FF_BUG_NO_PADDING)
  116. return 0;
  117. while (v <= 0xFF) {
  118. if (s->pict_type == AV_PICTURE_TYPE_B ||
  119. (v >> (8 - s->pict_type) != 1) || s->partitioned_frame)
  120. break;
  121. skip_bits(&s->gb, 8 + s->pict_type);
  122. bits_count += 8 + s->pict_type;
  123. v = show_bits(&s->gb, 16);
  124. }
  125. if (bits_count + 8 >= s->gb.size_in_bits) {
  126. v >>= 8;
  127. v |= 0x7F >> (7 - (bits_count & 7));
  128. if (v == 0x7F)
  129. return 1;
  130. } else {
  131. if (v == ff_mpeg4_resync_prefix[bits_count & 7]) {
  132. int len;
  133. GetBitContext gb = s->gb;
  134. skip_bits(&s->gb, 1);
  135. align_get_bits(&s->gb);
  136. for (len = 0; len < 32; len++)
  137. if (get_bits1(&s->gb))
  138. break;
  139. s->gb = gb;
  140. if (len >= ff_mpeg4_get_video_packet_prefix_length(s))
  141. return 1;
  142. }
  143. }
  144. return 0;
  145. }
  146. static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
  147. {
  148. MpegEncContext *s = &ctx->m;
  149. int a = 2 << s->sprite_warping_accuracy;
  150. int rho = 3 - s->sprite_warping_accuracy;
  151. int r = 16 / a;
  152. int alpha = 0;
  153. int beta = 0;
  154. int w = s->width;
  155. int h = s->height;
  156. int min_ab, i, w2, h2, w3, h3;
  157. int sprite_ref[4][2];
  158. int virtual_ref[2][2];
  159. // only true for rectangle shapes
  160. const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 },
  161. { 0, s->height }, { s->width, s->height } };
  162. int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
  163. if (w <= 0 || h <= 0)
  164. return AVERROR_INVALIDDATA;
  165. for (i = 0; i < ctx->num_sprite_warping_points; i++) {
  166. int length;
  167. int x = 0, y = 0;
  168. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  169. if (length)
  170. x = get_xbits(gb, length);
  171. if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
  172. skip_bits1(gb); /* marker bit */
  173. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  174. if (length)
  175. y = get_xbits(gb, length);
  176. skip_bits1(gb); /* marker bit */
  177. ctx->sprite_traj[i][0] = d[i][0] = x;
  178. ctx->sprite_traj[i][1] = d[i][1] = y;
  179. }
  180. for (; i < 4; i++)
  181. ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0;
  182. while ((1 << alpha) < w)
  183. alpha++;
  184. while ((1 << beta) < h)
  185. beta++; /* typo in the MPEG-4 std for the definition of w' and h' */
  186. w2 = 1 << alpha;
  187. h2 = 1 << beta;
  188. // Note, the 4th point isn't used for GMC
  189. if (ctx->divx_version == 500 && ctx->divx_build == 413) {
  190. sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0];
  191. sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1];
  192. sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0];
  193. sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1];
  194. sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0];
  195. sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1];
  196. } else {
  197. sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]);
  198. sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]);
  199. sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]);
  200. sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]);
  201. sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]);
  202. sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]);
  203. }
  204. /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]);
  205. * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */
  206. /* This is mostly identical to the MPEG-4 std (and is totally unreadable
  207. * because of that...). Perhaps it should be reordered to be more readable.
  208. * The idea behind this virtual_ref mess is to be able to use shifts later
  209. * per pixel instead of divides so the distance between points is converted
  210. * from w&h based to w2&h2 based which are of the 2^x form. */
  211. virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
  212. ROUNDED_DIV(((w - w2) *
  213. (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  214. w2 * (r * sprite_ref[1][0] - 16 * vop_ref[1][0])), w);
  215. virtual_ref[0][1] = 16 * vop_ref[0][1] +
  216. ROUNDED_DIV(((w - w2) *
  217. (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  218. w2 * (r * sprite_ref[1][1] - 16 * vop_ref[1][1])), w);
  219. virtual_ref[1][0] = 16 * vop_ref[0][0] +
  220. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  221. h2 * (r * sprite_ref[2][0] - 16 * vop_ref[2][0])), h);
  222. virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
  223. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  224. h2 * (r * sprite_ref[2][1] - 16 * vop_ref[2][1])), h);
  225. switch (ctx->num_sprite_warping_points) {
  226. case 0:
  227. s->sprite_offset[0][0] =
  228. s->sprite_offset[0][1] =
  229. s->sprite_offset[1][0] =
  230. s->sprite_offset[1][1] = 0;
  231. s->sprite_delta[0][0] = a;
  232. s->sprite_delta[0][1] =
  233. s->sprite_delta[1][0] = 0;
  234. s->sprite_delta[1][1] = a;
  235. ctx->sprite_shift[0] =
  236. ctx->sprite_shift[1] = 0;
  237. break;
  238. case 1: // GMC only
  239. s->sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0];
  240. s->sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1];
  241. s->sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
  242. a * (vop_ref[0][0] / 2);
  243. s->sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
  244. a * (vop_ref[0][1] / 2);
  245. s->sprite_delta[0][0] = a;
  246. s->sprite_delta[0][1] =
  247. s->sprite_delta[1][0] = 0;
  248. s->sprite_delta[1][1] = a;
  249. ctx->sprite_shift[0] =
  250. ctx->sprite_shift[1] = 0;
  251. break;
  252. case 2:
  253. s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + rho)) +
  254. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  255. (-vop_ref[0][0]) +
  256. (r * sprite_ref[0][1] - virtual_ref[0][1]) *
  257. (-vop_ref[0][1]) + (1 << (alpha + rho - 1));
  258. s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + rho)) +
  259. (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  260. (-vop_ref[0][0]) +
  261. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  262. (-vop_ref[0][1]) + (1 << (alpha + rho - 1));
  263. s->sprite_offset[1][0] = ((-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  264. (-2 * vop_ref[0][0] + 1) +
  265. (r * sprite_ref[0][1] - virtual_ref[0][1]) *
  266. (-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  267. sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1)));
  268. s->sprite_offset[1][1] = ((-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  269. (-2 * vop_ref[0][0] + 1) +
  270. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  271. (-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  272. sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1)));
  273. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  274. s->sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]);
  275. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]);
  276. s->sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  277. ctx->sprite_shift[0] = alpha + rho;
  278. ctx->sprite_shift[1] = alpha + rho + 2;
  279. break;
  280. case 3:
  281. min_ab = FFMIN(alpha, beta);
  282. w3 = w2 >> min_ab;
  283. h3 = h2 >> min_ab;
  284. s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + beta + rho - min_ab)) +
  285. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  286. h3 * (-vop_ref[0][0]) +
  287. (-r * sprite_ref[0][0] + virtual_ref[1][0]) *
  288. w3 * (-vop_ref[0][1]) +
  289. (1 << (alpha + beta + rho - min_ab - 1));
  290. s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + beta + rho - min_ab)) +
  291. (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  292. h3 * (-vop_ref[0][0]) +
  293. (-r * sprite_ref[0][1] + virtual_ref[1][1]) *
  294. w3 * (-vop_ref[0][1]) +
  295. (1 << (alpha + beta + rho - min_ab - 1));
  296. s->sprite_offset[1][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  297. h3 * (-2 * vop_ref[0][0] + 1) +
  298. (-r * sprite_ref[0][0] + virtual_ref[1][0]) *
  299. w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
  300. r * sprite_ref[0][0] - 16 * w2 * h3 +
  301. (1 << (alpha + beta + rho - min_ab + 1));
  302. s->sprite_offset[1][1] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  303. h3 * (-2 * vop_ref[0][0] + 1) +
  304. (-r * sprite_ref[0][1] + virtual_ref[1][1]) *
  305. w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
  306. r * sprite_ref[0][1] - 16 * w2 * h3 +
  307. (1 << (alpha + beta + rho - min_ab + 1));
  308. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3;
  309. s->sprite_delta[0][1] = (-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3;
  310. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3;
  311. s->sprite_delta[1][1] = (-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3;
  312. ctx->sprite_shift[0] = alpha + beta + rho - min_ab;
  313. ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2;
  314. break;
  315. }
  316. /* try to simplify the situation */
  317. if (s->sprite_delta[0][0] == a << ctx->sprite_shift[0] &&
  318. s->sprite_delta[0][1] == 0 &&
  319. s->sprite_delta[1][0] == 0 &&
  320. s->sprite_delta[1][1] == a << ctx->sprite_shift[0]) {
  321. s->sprite_offset[0][0] >>= ctx->sprite_shift[0];
  322. s->sprite_offset[0][1] >>= ctx->sprite_shift[0];
  323. s->sprite_offset[1][0] >>= ctx->sprite_shift[1];
  324. s->sprite_offset[1][1] >>= ctx->sprite_shift[1];
  325. s->sprite_delta[0][0] = a;
  326. s->sprite_delta[0][1] = 0;
  327. s->sprite_delta[1][0] = 0;
  328. s->sprite_delta[1][1] = a;
  329. ctx->sprite_shift[0] = 0;
  330. ctx->sprite_shift[1] = 0;
  331. s->real_sprite_warping_points = 1;
  332. } else {
  333. int shift_y = 16 - ctx->sprite_shift[0];
  334. int shift_c = 16 - ctx->sprite_shift[1];
  335. for (i = 0; i < 2; i++) {
  336. s->sprite_offset[0][i] <<= shift_y;
  337. s->sprite_offset[1][i] <<= shift_c;
  338. s->sprite_delta[0][i] <<= shift_y;
  339. s->sprite_delta[1][i] <<= shift_y;
  340. ctx->sprite_shift[i] = 16;
  341. }
  342. s->real_sprite_warping_points = ctx->num_sprite_warping_points;
  343. }
  344. return 0;
  345. }
  346. /**
  347. * Decode the next video packet.
  348. * @return <0 if something went wrong
  349. */
  350. int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
  351. {
  352. MpegEncContext *s = &ctx->m;
  353. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  354. int header_extension = 0, mb_num, len;
  355. /* is there enough space left for a video packet + header */
  356. if (get_bits_count(&s->gb) > s->gb.size_in_bits - 20)
  357. return -1;
  358. for (len = 0; len < 32; len++)
  359. if (get_bits1(&s->gb))
  360. break;
  361. if (len != ff_mpeg4_get_video_packet_prefix_length(s)) {
  362. av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
  363. return -1;
  364. }
  365. if (ctx->shape != RECT_SHAPE) {
  366. header_extension = get_bits1(&s->gb);
  367. // FIXME more stuff here
  368. }
  369. mb_num = get_bits(&s->gb, mb_num_bits);
  370. if (mb_num >= s->mb_num) {
  371. av_log(s->avctx, AV_LOG_ERROR,
  372. "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
  373. return -1;
  374. }
  375. if (s->pict_type == AV_PICTURE_TYPE_B) {
  376. int mb_x = 0, mb_y = 0;
  377. while (s->next_picture.mbskip_table[s->mb_index2xy[mb_num]]) {
  378. if (!mb_x)
  379. ff_thread_await_progress(&s->next_picture_ptr->tf, mb_y++, 0);
  380. mb_num++;
  381. if (++mb_x == s->mb_width)
  382. mb_x = 0;
  383. }
  384. if (mb_num >= s->mb_num)
  385. return -1; // slice contains just skipped MBs (already decoded)
  386. }
  387. s->mb_x = mb_num % s->mb_width;
  388. s->mb_y = mb_num / s->mb_width;
  389. if (ctx->shape != BIN_ONLY_SHAPE) {
  390. int qscale = get_bits(&s->gb, s->quant_precision);
  391. if (qscale)
  392. s->chroma_qscale = s->qscale = qscale;
  393. }
  394. if (ctx->shape == RECT_SHAPE)
  395. header_extension = get_bits1(&s->gb);
  396. if (header_extension) {
  397. int time_incr = 0;
  398. while (get_bits1(&s->gb) != 0)
  399. time_incr++;
  400. check_marker(s->avctx, &s->gb, "before time_increment in video packed header");
  401. skip_bits(&s->gb, ctx->time_increment_bits); /* time_increment */
  402. check_marker(s->avctx, &s->gb, "before vop_coding_type in video packed header");
  403. skip_bits(&s->gb, 2); /* vop coding type */
  404. // FIXME not rect stuff here
  405. if (ctx->shape != BIN_ONLY_SHAPE) {
  406. skip_bits(&s->gb, 3); /* intra dc vlc threshold */
  407. // FIXME don't just ignore everything
  408. if (s->pict_type == AV_PICTURE_TYPE_S &&
  409. ctx->vol_sprite_usage == GMC_SPRITE) {
  410. if (mpeg4_decode_sprite_trajectory(ctx, &s->gb) < 0)
  411. return AVERROR_INVALIDDATA;
  412. av_log(s->avctx, AV_LOG_ERROR, "untested\n");
  413. }
  414. // FIXME reduced res stuff here
  415. if (s->pict_type != AV_PICTURE_TYPE_I) {
  416. int f_code = get_bits(&s->gb, 3); /* fcode_for */
  417. if (f_code == 0)
  418. av_log(s->avctx, AV_LOG_ERROR,
  419. "Error, video packet header damaged (f_code=0)\n");
  420. }
  421. if (s->pict_type == AV_PICTURE_TYPE_B) {
  422. int b_code = get_bits(&s->gb, 3);
  423. if (b_code == 0)
  424. av_log(s->avctx, AV_LOG_ERROR,
  425. "Error, video packet header damaged (b_code=0)\n");
  426. }
  427. }
  428. }
  429. // FIXME new-pred stuff
  430. return 0;
  431. }
  432. /**
  433. * Get the average motion vector for a GMC MB.
  434. * @param n either 0 for the x component or 1 for y
  435. * @return the average MV for a GMC MB
  436. */
  437. static inline int get_amv(Mpeg4DecContext *ctx, int n)
  438. {
  439. MpegEncContext *s = &ctx->m;
  440. int x, y, mb_v, sum, dx, dy, shift;
  441. int len = 1 << (s->f_code + 4);
  442. const int a = s->sprite_warping_accuracy;
  443. if (s->workaround_bugs & FF_BUG_AMV)
  444. len >>= s->quarter_sample;
  445. if (s->real_sprite_warping_points == 1) {
  446. if (ctx->divx_version == 500 && ctx->divx_build == 413)
  447. sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample));
  448. else
  449. sum = RSHIFT(s->sprite_offset[0][n] << s->quarter_sample, a);
  450. } else {
  451. dx = s->sprite_delta[n][0];
  452. dy = s->sprite_delta[n][1];
  453. shift = ctx->sprite_shift[0];
  454. if (n)
  455. dy -= 1 << (shift + a + 1);
  456. else
  457. dx -= 1 << (shift + a + 1);
  458. mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16;
  459. sum = 0;
  460. for (y = 0; y < 16; y++) {
  461. int v;
  462. v = mb_v + dy * y;
  463. // FIXME optimize
  464. for (x = 0; x < 16; x++) {
  465. sum += v >> shift;
  466. v += dx;
  467. }
  468. }
  469. sum = RSHIFT(sum, a + 8 - s->quarter_sample);
  470. }
  471. if (sum < -len)
  472. sum = -len;
  473. else if (sum >= len)
  474. sum = len - 1;
  475. return sum;
  476. }
  477. /**
  478. * Decode the dc value.
  479. * @param n block index (0-3 are luma, 4-5 are chroma)
  480. * @param dir_ptr the prediction direction will be stored here
  481. * @return the quantized dc
  482. */
  483. static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
  484. {
  485. int level, code;
  486. if (n < 4)
  487. code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1);
  488. else
  489. code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1);
  490. if (code < 0 || code > 9 /* && s->nbit < 9 */) {
  491. av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
  492. return -1;
  493. }
  494. if (code == 0) {
  495. level = 0;
  496. } else {
  497. if (IS_3IV1) {
  498. if (code == 1)
  499. level = 2 * get_bits1(&s->gb) - 1;
  500. else {
  501. if (get_bits1(&s->gb))
  502. level = get_bits(&s->gb, code - 1) + (1 << (code - 1));
  503. else
  504. level = -get_bits(&s->gb, code - 1) - (1 << (code - 1));
  505. }
  506. } else {
  507. level = get_xbits(&s->gb, code);
  508. }
  509. if (code > 8) {
  510. if (get_bits1(&s->gb) == 0) { /* marker */
  511. if (s->avctx->err_recognition & AV_EF_BITSTREAM) {
  512. av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
  513. return -1;
  514. }
  515. }
  516. }
  517. }
  518. return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0);
  519. }
  520. /**
  521. * Decode first partition.
  522. * @return number of MBs decoded or <0 if an error occurred
  523. */
  524. static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
  525. {
  526. MpegEncContext *s = &ctx->m;
  527. int mb_num = 0;
  528. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  529. /* decode first partition */
  530. s->first_slice_line = 1;
  531. for (; s->mb_y < s->mb_height; s->mb_y++) {
  532. ff_init_block_index(s);
  533. for (; s->mb_x < s->mb_width; s->mb_x++) {
  534. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  535. int cbpc;
  536. int dir = 0;
  537. mb_num++;
  538. ff_update_block_index(s);
  539. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  540. s->first_slice_line = 0;
  541. if (s->pict_type == AV_PICTURE_TYPE_I) {
  542. int i;
  543. do {
  544. if (show_bits_long(&s->gb, 19) == DC_MARKER)
  545. return mb_num - 1;
  546. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  547. if (cbpc < 0) {
  548. av_log(s->avctx, AV_LOG_ERROR,
  549. "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  550. return -1;
  551. }
  552. } while (cbpc == 8);
  553. s->cbp_table[xy] = cbpc & 3;
  554. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  555. s->mb_intra = 1;
  556. if (cbpc & 4)
  557. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  558. s->current_picture.qscale_table[xy] = s->qscale;
  559. s->mbintra_table[xy] = 1;
  560. for (i = 0; i < 6; i++) {
  561. int dc_pred_dir;
  562. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  563. if (dc < 0) {
  564. av_log(s->avctx, AV_LOG_ERROR,
  565. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  566. return -1;
  567. }
  568. dir <<= 1;
  569. if (dc_pred_dir)
  570. dir |= 1;
  571. }
  572. s->pred_dir_table[xy] = dir;
  573. } else { /* P/S_TYPE */
  574. int mx, my, pred_x, pred_y, bits;
  575. int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
  576. const int stride = s->b8_stride * 2;
  577. try_again:
  578. bits = show_bits(&s->gb, 17);
  579. if (bits == MOTION_MARKER)
  580. return mb_num - 1;
  581. skip_bits1(&s->gb);
  582. if (bits & 0x10000) {
  583. /* skip mb */
  584. if (s->pict_type == AV_PICTURE_TYPE_S &&
  585. ctx->vol_sprite_usage == GMC_SPRITE) {
  586. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  587. MB_TYPE_16x16 |
  588. MB_TYPE_GMC |
  589. MB_TYPE_L0;
  590. mx = get_amv(ctx, 0);
  591. my = get_amv(ctx, 1);
  592. } else {
  593. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  594. MB_TYPE_16x16 |
  595. MB_TYPE_L0;
  596. mx = my = 0;
  597. }
  598. mot_val[0] =
  599. mot_val[2] =
  600. mot_val[0 + stride] =
  601. mot_val[2 + stride] = mx;
  602. mot_val[1] =
  603. mot_val[3] =
  604. mot_val[1 + stride] =
  605. mot_val[3 + stride] = my;
  606. if (s->mbintra_table[xy])
  607. ff_clean_intra_table_entries(s);
  608. continue;
  609. }
  610. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  611. if (cbpc < 0) {
  612. av_log(s->avctx, AV_LOG_ERROR,
  613. "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  614. return -1;
  615. }
  616. if (cbpc == 20)
  617. goto try_again;
  618. s->cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant
  619. s->mb_intra = ((cbpc & 4) != 0);
  620. if (s->mb_intra) {
  621. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  622. s->mbintra_table[xy] = 1;
  623. mot_val[0] =
  624. mot_val[2] =
  625. mot_val[0 + stride] =
  626. mot_val[2 + stride] = 0;
  627. mot_val[1] =
  628. mot_val[3] =
  629. mot_val[1 + stride] =
  630. mot_val[3 + stride] = 0;
  631. } else {
  632. if (s->mbintra_table[xy])
  633. ff_clean_intra_table_entries(s);
  634. if (s->pict_type == AV_PICTURE_TYPE_S &&
  635. ctx->vol_sprite_usage == GMC_SPRITE &&
  636. (cbpc & 16) == 0)
  637. s->mcsel = get_bits1(&s->gb);
  638. else
  639. s->mcsel = 0;
  640. if ((cbpc & 16) == 0) {
  641. /* 16x16 motion prediction */
  642. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  643. if (!s->mcsel) {
  644. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  645. if (mx >= 0xffff)
  646. return -1;
  647. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  648. if (my >= 0xffff)
  649. return -1;
  650. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  651. MB_TYPE_L0;
  652. } else {
  653. mx = get_amv(ctx, 0);
  654. my = get_amv(ctx, 1);
  655. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  656. MB_TYPE_GMC |
  657. MB_TYPE_L0;
  658. }
  659. mot_val[0] =
  660. mot_val[2] =
  661. mot_val[0 + stride] =
  662. mot_val[2 + stride] = mx;
  663. mot_val[1] =
  664. mot_val[3] =
  665. mot_val[1 + stride] =
  666. mot_val[3 + stride] = my;
  667. } else {
  668. int i;
  669. s->current_picture.mb_type[xy] = MB_TYPE_8x8 |
  670. MB_TYPE_L0;
  671. for (i = 0; i < 4; i++) {
  672. int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  673. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  674. if (mx >= 0xffff)
  675. return -1;
  676. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  677. if (my >= 0xffff)
  678. return -1;
  679. mot_val[0] = mx;
  680. mot_val[1] = my;
  681. }
  682. }
  683. }
  684. }
  685. }
  686. s->mb_x = 0;
  687. }
  688. return mb_num;
  689. }
  690. /**
  691. * decode second partition.
  692. * @return <0 if an error occurred
  693. */
  694. static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
  695. {
  696. int mb_num = 0;
  697. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  698. s->mb_x = s->resync_mb_x;
  699. s->first_slice_line = 1;
  700. for (s->mb_y = s->resync_mb_y; mb_num < mb_count; s->mb_y++) {
  701. ff_init_block_index(s);
  702. for (; mb_num < mb_count && s->mb_x < s->mb_width; s->mb_x++) {
  703. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  704. mb_num++;
  705. ff_update_block_index(s);
  706. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  707. s->first_slice_line = 0;
  708. if (s->pict_type == AV_PICTURE_TYPE_I) {
  709. int ac_pred = get_bits1(&s->gb);
  710. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  711. if (cbpy < 0) {
  712. av_log(s->avctx, AV_LOG_ERROR,
  713. "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  714. return -1;
  715. }
  716. s->cbp_table[xy] |= cbpy << 2;
  717. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  718. } else { /* P || S_TYPE */
  719. if (IS_INTRA(s->current_picture.mb_type[xy])) {
  720. int i;
  721. int dir = 0;
  722. int ac_pred = get_bits1(&s->gb);
  723. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  724. if (cbpy < 0) {
  725. av_log(s->avctx, AV_LOG_ERROR,
  726. "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  727. return -1;
  728. }
  729. if (s->cbp_table[xy] & 8)
  730. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  731. s->current_picture.qscale_table[xy] = s->qscale;
  732. for (i = 0; i < 6; i++) {
  733. int dc_pred_dir;
  734. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  735. if (dc < 0) {
  736. av_log(s->avctx, AV_LOG_ERROR,
  737. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  738. return -1;
  739. }
  740. dir <<= 1;
  741. if (dc_pred_dir)
  742. dir |= 1;
  743. }
  744. s->cbp_table[xy] &= 3; // remove dquant
  745. s->cbp_table[xy] |= cbpy << 2;
  746. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  747. s->pred_dir_table[xy] = dir;
  748. } else if (IS_SKIP(s->current_picture.mb_type[xy])) {
  749. s->current_picture.qscale_table[xy] = s->qscale;
  750. s->cbp_table[xy] = 0;
  751. } else {
  752. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  753. if (cbpy < 0) {
  754. av_log(s->avctx, AV_LOG_ERROR,
  755. "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  756. return -1;
  757. }
  758. if (s->cbp_table[xy] & 8)
  759. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  760. s->current_picture.qscale_table[xy] = s->qscale;
  761. s->cbp_table[xy] &= 3; // remove dquant
  762. s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
  763. }
  764. }
  765. }
  766. if (mb_num >= mb_count)
  767. return 0;
  768. s->mb_x = 0;
  769. }
  770. return 0;
  771. }
  772. /**
  773. * Decode the first and second partition.
  774. * @return <0 if error (and sets error type in the error_status_table)
  775. */
  776. int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
  777. {
  778. MpegEncContext *s = &ctx->m;
  779. int mb_num;
  780. const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
  781. const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
  782. mb_num = mpeg4_decode_partition_a(ctx);
  783. if (mb_num < 0) {
  784. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  785. s->mb_x, s->mb_y, part_a_error);
  786. return -1;
  787. }
  788. if (s->resync_mb_x + s->resync_mb_y * s->mb_width + mb_num > s->mb_num) {
  789. av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n");
  790. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  791. s->mb_x, s->mb_y, part_a_error);
  792. return -1;
  793. }
  794. s->mb_num_left = mb_num;
  795. if (s->pict_type == AV_PICTURE_TYPE_I) {
  796. while (show_bits(&s->gb, 9) == 1)
  797. skip_bits(&s->gb, 9);
  798. if (get_bits_long(&s->gb, 19) != DC_MARKER) {
  799. av_log(s->avctx, AV_LOG_ERROR,
  800. "marker missing after first I partition at %d %d\n",
  801. s->mb_x, s->mb_y);
  802. return -1;
  803. }
  804. } else {
  805. while (show_bits(&s->gb, 10) == 1)
  806. skip_bits(&s->gb, 10);
  807. if (get_bits(&s->gb, 17) != MOTION_MARKER) {
  808. av_log(s->avctx, AV_LOG_ERROR,
  809. "marker missing after first P partition at %d %d\n",
  810. s->mb_x, s->mb_y);
  811. return -1;
  812. }
  813. }
  814. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  815. s->mb_x - 1, s->mb_y, part_a_end);
  816. if (mpeg4_decode_partition_b(s, mb_num) < 0) {
  817. if (s->pict_type == AV_PICTURE_TYPE_P)
  818. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  819. s->mb_x, s->mb_y, ER_DC_ERROR);
  820. return -1;
  821. } else {
  822. if (s->pict_type == AV_PICTURE_TYPE_P)
  823. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  824. s->mb_x - 1, s->mb_y, ER_DC_END);
  825. }
  826. return 0;
  827. }
  828. /**
  829. * Decode a block.
  830. * @return <0 if an error occurred
  831. */
  832. static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
  833. int n, int coded, int intra, int rvlc)
  834. {
  835. MpegEncContext *s = &ctx->m;
  836. int level, i, last, run, qmul, qadd, dc_pred_dir;
  837. RLTable *rl;
  838. RL_VLC_ELEM *rl_vlc;
  839. const uint8_t *scan_table;
  840. // Note intra & rvlc should be optimized away if this is inlined
  841. if (intra) {
  842. if (ctx->use_intra_dc_vlc) {
  843. /* DC coef */
  844. if (s->partitioned_frame) {
  845. level = s->dc_val[0][s->block_index[n]];
  846. if (n < 4)
  847. level = FASTDIV((level + (s->y_dc_scale >> 1)), s->y_dc_scale);
  848. else
  849. level = FASTDIV((level + (s->c_dc_scale >> 1)), s->c_dc_scale);
  850. dc_pred_dir = (s->pred_dir_table[s->mb_x + s->mb_y * s->mb_stride] << n) & 32;
  851. } else {
  852. level = mpeg4_decode_dc(s, n, &dc_pred_dir);
  853. if (level < 0)
  854. return -1;
  855. }
  856. block[0] = level;
  857. i = 0;
  858. } else {
  859. i = -1;
  860. ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0);
  861. }
  862. if (!coded)
  863. goto not_coded;
  864. if (rvlc) {
  865. rl = &ff_rvlc_rl_intra;
  866. rl_vlc = ff_rvlc_rl_intra.rl_vlc[0];
  867. } else {
  868. rl = &ff_mpeg4_rl_intra;
  869. rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
  870. }
  871. if (s->ac_pred) {
  872. if (dc_pred_dir == 0)
  873. scan_table = s->intra_v_scantable.permutated; /* left */
  874. else
  875. scan_table = s->intra_h_scantable.permutated; /* top */
  876. } else {
  877. scan_table = s->intra_scantable.permutated;
  878. }
  879. qmul = 1;
  880. qadd = 0;
  881. } else {
  882. i = -1;
  883. if (!coded) {
  884. s->block_last_index[n] = i;
  885. return 0;
  886. }
  887. if (rvlc)
  888. rl = &ff_rvlc_rl_inter;
  889. else
  890. rl = &ff_h263_rl_inter;
  891. scan_table = s->intra_scantable.permutated;
  892. if (s->mpeg_quant) {
  893. qmul = 1;
  894. qadd = 0;
  895. if (rvlc)
  896. rl_vlc = ff_rvlc_rl_inter.rl_vlc[0];
  897. else
  898. rl_vlc = ff_h263_rl_inter.rl_vlc[0];
  899. } else {
  900. qmul = s->qscale << 1;
  901. qadd = (s->qscale - 1) | 1;
  902. if (rvlc)
  903. rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale];
  904. else
  905. rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale];
  906. }
  907. }
  908. {
  909. OPEN_READER(re, &s->gb);
  910. for (;;) {
  911. UPDATE_CACHE(re, &s->gb);
  912. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
  913. if (level == 0) {
  914. /* escape */
  915. if (rvlc) {
  916. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  917. av_log(s->avctx, AV_LOG_ERROR,
  918. "1. marker bit missing in rvlc esc\n");
  919. return -1;
  920. }
  921. SKIP_CACHE(re, &s->gb, 1);
  922. last = SHOW_UBITS(re, &s->gb, 1);
  923. SKIP_CACHE(re, &s->gb, 1);
  924. run = SHOW_UBITS(re, &s->gb, 6);
  925. SKIP_COUNTER(re, &s->gb, 1 + 1 + 6);
  926. UPDATE_CACHE(re, &s->gb);
  927. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  928. av_log(s->avctx, AV_LOG_ERROR,
  929. "2. marker bit missing in rvlc esc\n");
  930. return -1;
  931. }
  932. SKIP_CACHE(re, &s->gb, 1);
  933. level = SHOW_UBITS(re, &s->gb, 11);
  934. SKIP_CACHE(re, &s->gb, 11);
  935. if (SHOW_UBITS(re, &s->gb, 5) != 0x10) {
  936. av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n");
  937. return -1;
  938. }
  939. SKIP_CACHE(re, &s->gb, 5);
  940. level = level * qmul + qadd;
  941. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  942. SKIP_COUNTER(re, &s->gb, 1 + 11 + 5 + 1);
  943. i += run + 1;
  944. if (last)
  945. i += 192;
  946. } else {
  947. int cache;
  948. cache = GET_CACHE(re, &s->gb);
  949. if (IS_3IV1)
  950. cache ^= 0xC0000000;
  951. if (cache & 0x80000000) {
  952. if (cache & 0x40000000) {
  953. /* third escape */
  954. SKIP_CACHE(re, &s->gb, 2);
  955. last = SHOW_UBITS(re, &s->gb, 1);
  956. SKIP_CACHE(re, &s->gb, 1);
  957. run = SHOW_UBITS(re, &s->gb, 6);
  958. SKIP_COUNTER(re, &s->gb, 2 + 1 + 6);
  959. UPDATE_CACHE(re, &s->gb);
  960. if (IS_3IV1) {
  961. level = SHOW_SBITS(re, &s->gb, 12);
  962. LAST_SKIP_BITS(re, &s->gb, 12);
  963. } else {
  964. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  965. av_log(s->avctx, AV_LOG_ERROR,
  966. "1. marker bit missing in 3. esc\n");
  967. return -1;
  968. }
  969. SKIP_CACHE(re, &s->gb, 1);
  970. level = SHOW_SBITS(re, &s->gb, 12);
  971. SKIP_CACHE(re, &s->gb, 12);
  972. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  973. av_log(s->avctx, AV_LOG_ERROR,
  974. "2. marker bit missing in 3. esc\n");
  975. return -1;
  976. }
  977. SKIP_COUNTER(re, &s->gb, 1 + 12 + 1);
  978. }
  979. if (level > 0)
  980. level = level * qmul + qadd;
  981. else
  982. level = level * qmul - qadd;
  983. if ((unsigned)(level + 2048) > 4095) {
  984. if (s->avctx->err_recognition & AV_EF_BITSTREAM) {
  985. if (level > 2560 || level < -2560) {
  986. av_log(s->avctx, AV_LOG_ERROR,
  987. "|level| overflow in 3. esc, qp=%d\n",
  988. s->qscale);
  989. return -1;
  990. }
  991. }
  992. level = level < 0 ? -2048 : 2047;
  993. }
  994. i += run + 1;
  995. if (last)
  996. i += 192;
  997. } else {
  998. /* second escape */
  999. SKIP_BITS(re, &s->gb, 2);
  1000. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1001. i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing
  1002. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1003. LAST_SKIP_BITS(re, &s->gb, 1);
  1004. }
  1005. } else {
  1006. /* first escape */
  1007. SKIP_BITS(re, &s->gb, 1);
  1008. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1009. i += run;
  1010. level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing
  1011. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1012. LAST_SKIP_BITS(re, &s->gb, 1);
  1013. }
  1014. }
  1015. } else {
  1016. i += run;
  1017. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1018. LAST_SKIP_BITS(re, &s->gb, 1);
  1019. }
  1020. if (i > 62) {
  1021. i -= 192;
  1022. if (i & (~63)) {
  1023. av_log(s->avctx, AV_LOG_ERROR,
  1024. "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1025. return -1;
  1026. }
  1027. block[scan_table[i]] = level;
  1028. break;
  1029. }
  1030. block[scan_table[i]] = level;
  1031. }
  1032. CLOSE_READER(re, &s->gb);
  1033. }
  1034. not_coded:
  1035. if (intra) {
  1036. if (!ctx->use_intra_dc_vlc) {
  1037. block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0);
  1038. i -= i >> 31; // if (i == -1) i = 0;
  1039. }
  1040. ff_mpeg4_pred_ac(s, block, n, dc_pred_dir);
  1041. if (s->ac_pred)
  1042. i = 63; // FIXME not optimal
  1043. }
  1044. s->block_last_index[n] = i;
  1045. return 0;
  1046. }
  1047. /**
  1048. * decode partition C of one MB.
  1049. * @return <0 if an error occurred
  1050. */
  1051. static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
  1052. {
  1053. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1054. int cbp, mb_type;
  1055. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1056. mb_type = s->current_picture.mb_type[xy];
  1057. cbp = s->cbp_table[xy];
  1058. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1059. if (s->current_picture.qscale_table[xy] != s->qscale)
  1060. ff_set_qscale(s, s->current_picture.qscale_table[xy]);
  1061. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1062. s->pict_type == AV_PICTURE_TYPE_S) {
  1063. int i;
  1064. for (i = 0; i < 4; i++) {
  1065. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
  1066. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
  1067. }
  1068. s->mb_intra = IS_INTRA(mb_type);
  1069. if (IS_SKIP(mb_type)) {
  1070. /* skip mb */
  1071. for (i = 0; i < 6; i++)
  1072. s->block_last_index[i] = -1;
  1073. s->mv_dir = MV_DIR_FORWARD;
  1074. s->mv_type = MV_TYPE_16X16;
  1075. if (s->pict_type == AV_PICTURE_TYPE_S
  1076. && ctx->vol_sprite_usage == GMC_SPRITE) {
  1077. s->mcsel = 1;
  1078. s->mb_skipped = 0;
  1079. } else {
  1080. s->mcsel = 0;
  1081. s->mb_skipped = 1;
  1082. }
  1083. } else if (s->mb_intra) {
  1084. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1085. } else if (!s->mb_intra) {
  1086. // s->mcsel = 0; // FIXME do we need to init that?
  1087. s->mv_dir = MV_DIR_FORWARD;
  1088. if (IS_8X8(mb_type)) {
  1089. s->mv_type = MV_TYPE_8X8;
  1090. } else {
  1091. s->mv_type = MV_TYPE_16X16;
  1092. }
  1093. }
  1094. } else { /* I-Frame */
  1095. s->mb_intra = 1;
  1096. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1097. }
  1098. if (!IS_SKIP(mb_type)) {
  1099. int i;
  1100. s->bdsp.clear_blocks(s->block[0]);
  1101. /* decode each block */
  1102. for (i = 0; i < 6; i++) {
  1103. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, s->mb_intra, ctx->rvlc) < 0) {
  1104. av_log(s->avctx, AV_LOG_ERROR,
  1105. "texture corrupted at %d %d %d\n",
  1106. s->mb_x, s->mb_y, s->mb_intra);
  1107. return -1;
  1108. }
  1109. cbp += cbp;
  1110. }
  1111. }
  1112. /* per-MB end of slice check */
  1113. if (--s->mb_num_left <= 0) {
  1114. if (mpeg4_is_resync(s))
  1115. return SLICE_END;
  1116. else
  1117. return SLICE_NOEND;
  1118. } else {
  1119. if (mpeg4_is_resync(s)) {
  1120. const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
  1121. if (s->cbp_table[xy + delta])
  1122. return SLICE_END;
  1123. }
  1124. return SLICE_OK;
  1125. }
  1126. }
  1127. static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
  1128. {
  1129. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1130. int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
  1131. int16_t *mot_val;
  1132. static int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1133. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1134. assert(s->h263_pred);
  1135. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1136. s->pict_type == AV_PICTURE_TYPE_S) {
  1137. do {
  1138. if (get_bits1(&s->gb)) {
  1139. /* skip mb */
  1140. s->mb_intra = 0;
  1141. for (i = 0; i < 6; i++)
  1142. s->block_last_index[i] = -1;
  1143. s->mv_dir = MV_DIR_FORWARD;
  1144. s->mv_type = MV_TYPE_16X16;
  1145. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1146. ctx->vol_sprite_usage == GMC_SPRITE) {
  1147. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1148. MB_TYPE_GMC |
  1149. MB_TYPE_16x16 |
  1150. MB_TYPE_L0;
  1151. s->mcsel = 1;
  1152. s->mv[0][0][0] = get_amv(ctx, 0);
  1153. s->mv[0][0][1] = get_amv(ctx, 1);
  1154. s->mb_skipped = 0;
  1155. } else {
  1156. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1157. MB_TYPE_16x16 |
  1158. MB_TYPE_L0;
  1159. s->mcsel = 0;
  1160. s->mv[0][0][0] = 0;
  1161. s->mv[0][0][1] = 0;
  1162. s->mb_skipped = 1;
  1163. }
  1164. goto end;
  1165. }
  1166. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  1167. if (cbpc < 0) {
  1168. av_log(s->avctx, AV_LOG_ERROR,
  1169. "cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1170. return -1;
  1171. }
  1172. } while (cbpc == 20);
  1173. s->bdsp.clear_blocks(s->block[0]);
  1174. dquant = cbpc & 8;
  1175. s->mb_intra = ((cbpc & 4) != 0);
  1176. if (s->mb_intra)
  1177. goto intra;
  1178. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1179. ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0)
  1180. s->mcsel = get_bits1(&s->gb);
  1181. else
  1182. s->mcsel = 0;
  1183. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
  1184. cbp = (cbpc & 3) | (cbpy << 2);
  1185. if (dquant)
  1186. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1187. if ((!s->progressive_sequence) &&
  1188. (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE)))
  1189. s->interlaced_dct = get_bits1(&s->gb);
  1190. s->mv_dir = MV_DIR_FORWARD;
  1191. if ((cbpc & 16) == 0) {
  1192. if (s->mcsel) {
  1193. s->current_picture.mb_type[xy] = MB_TYPE_GMC |
  1194. MB_TYPE_16x16 |
  1195. MB_TYPE_L0;
  1196. /* 16x16 global motion prediction */
  1197. s->mv_type = MV_TYPE_16X16;
  1198. mx = get_amv(ctx, 0);
  1199. my = get_amv(ctx, 1);
  1200. s->mv[0][0][0] = mx;
  1201. s->mv[0][0][1] = my;
  1202. } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
  1203. s->current_picture.mb_type[xy] = MB_TYPE_16x8 |
  1204. MB_TYPE_L0 |
  1205. MB_TYPE_INTERLACED;
  1206. /* 16x8 field motion prediction */
  1207. s->mv_type = MV_TYPE_FIELD;
  1208. s->field_select[0][0] = get_bits1(&s->gb);
  1209. s->field_select[0][1] = get_bits1(&s->gb);
  1210. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1211. for (i = 0; i < 2; i++) {
  1212. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1213. if (mx >= 0xffff)
  1214. return -1;
  1215. my = ff_h263_decode_motion(s, pred_y / 2, s->f_code);
  1216. if (my >= 0xffff)
  1217. return -1;
  1218. s->mv[0][i][0] = mx;
  1219. s->mv[0][i][1] = my;
  1220. }
  1221. } else {
  1222. s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  1223. /* 16x16 motion prediction */
  1224. s->mv_type = MV_TYPE_16X16;
  1225. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1226. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1227. if (mx >= 0xffff)
  1228. return -1;
  1229. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1230. if (my >= 0xffff)
  1231. return -1;
  1232. s->mv[0][0][0] = mx;
  1233. s->mv[0][0][1] = my;
  1234. }
  1235. } else {
  1236. s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
  1237. s->mv_type = MV_TYPE_8X8;
  1238. for (i = 0; i < 4; i++) {
  1239. mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  1240. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1241. if (mx >= 0xffff)
  1242. return -1;
  1243. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1244. if (my >= 0xffff)
  1245. return -1;
  1246. s->mv[0][i][0] = mx;
  1247. s->mv[0][i][1] = my;
  1248. mot_val[0] = mx;
  1249. mot_val[1] = my;
  1250. }
  1251. }
  1252. } else if (s->pict_type == AV_PICTURE_TYPE_B) {
  1253. int modb1; // first bit of modb
  1254. int modb2; // second bit of modb
  1255. int mb_type;
  1256. s->mb_intra = 0; // B-frames never contain intra blocks
  1257. s->mcsel = 0; // ... true gmc blocks
  1258. if (s->mb_x == 0) {
  1259. for (i = 0; i < 2; i++) {
  1260. s->last_mv[i][0][0] =
  1261. s->last_mv[i][0][1] =
  1262. s->last_mv[i][1][0] =
  1263. s->last_mv[i][1][1] = 0;
  1264. }
  1265. ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
  1266. }
  1267. /* if we skipped it in the future P-frame than skip it now too */
  1268. s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
  1269. if (s->mb_skipped) {
  1270. /* skip mb */
  1271. for (i = 0; i < 6; i++)
  1272. s->block_last_index[i] = -1;
  1273. s->mv_dir = MV_DIR_FORWARD;
  1274. s->mv_type = MV_TYPE_16X16;
  1275. s->mv[0][0][0] =
  1276. s->mv[0][0][1] =
  1277. s->mv[1][0][0] =
  1278. s->mv[1][0][1] = 0;
  1279. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1280. MB_TYPE_16x16 |
  1281. MB_TYPE_L0;
  1282. goto end;
  1283. }
  1284. modb1 = get_bits1(&s->gb);
  1285. if (modb1) {
  1286. // like MB_TYPE_B_DIRECT but no vectors coded
  1287. mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1;
  1288. cbp = 0;
  1289. } else {
  1290. modb2 = get_bits1(&s->gb);
  1291. mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1);
  1292. if (mb_type < 0) {
  1293. av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n");
  1294. return -1;
  1295. }
  1296. mb_type = mb_type_b_map[mb_type];
  1297. if (modb2) {
  1298. cbp = 0;
  1299. } else {
  1300. s->bdsp.clear_blocks(s->block[0]);
  1301. cbp = get_bits(&s->gb, 6);
  1302. }
  1303. if ((!IS_DIRECT(mb_type)) && cbp) {
  1304. if (get_bits1(&s->gb))
  1305. ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2);
  1306. }
  1307. if (!s->progressive_sequence) {
  1308. if (cbp)
  1309. s->interlaced_dct = get_bits1(&s->gb);
  1310. if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) {
  1311. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1312. mb_type &= ~MB_TYPE_16x16;
  1313. if (USES_LIST(mb_type, 0)) {
  1314. s->field_select[0][0] = get_bits1(&s->gb);
  1315. s->field_select[0][1] = get_bits1(&s->gb);
  1316. }
  1317. if (USES_LIST(mb_type, 1)) {
  1318. s->field_select[1][0] = get_bits1(&s->gb);
  1319. s->field_select[1][1] = get_bits1(&s->gb);
  1320. }
  1321. }
  1322. }
  1323. s->mv_dir = 0;
  1324. if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) {
  1325. s->mv_type = MV_TYPE_16X16;
  1326. if (USES_LIST(mb_type, 0)) {
  1327. s->mv_dir = MV_DIR_FORWARD;
  1328. mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
  1329. my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
  1330. s->last_mv[0][1][0] =
  1331. s->last_mv[0][0][0] =
  1332. s->mv[0][0][0] = mx;
  1333. s->last_mv[0][1][1] =
  1334. s->last_mv[0][0][1] =
  1335. s->mv[0][0][1] = my;
  1336. }
  1337. if (USES_LIST(mb_type, 1)) {
  1338. s->mv_dir |= MV_DIR_BACKWARD;
  1339. mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
  1340. my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
  1341. s->last_mv[1][1][0] =
  1342. s->last_mv[1][0][0] =
  1343. s->mv[1][0][0] = mx;
  1344. s->last_mv[1][1][1] =
  1345. s->last_mv[1][0][1] =
  1346. s->mv[1][0][1] = my;
  1347. }
  1348. } else if (!IS_DIRECT(mb_type)) {
  1349. s->mv_type = MV_TYPE_FIELD;
  1350. if (USES_LIST(mb_type, 0)) {
  1351. s->mv_dir = MV_DIR_FORWARD;
  1352. for (i = 0; i < 2; i++) {
  1353. mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code);
  1354. my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code);
  1355. s->last_mv[0][i][0] =
  1356. s->mv[0][i][0] = mx;
  1357. s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2;
  1358. }
  1359. }
  1360. if (USES_LIST(mb_type, 1)) {
  1361. s->mv_dir |= MV_DIR_BACKWARD;
  1362. for (i = 0; i < 2; i++) {
  1363. mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code);
  1364. my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code);
  1365. s->last_mv[1][i][0] =
  1366. s->mv[1][i][0] = mx;
  1367. s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2;
  1368. }
  1369. }
  1370. }
  1371. }
  1372. if (IS_DIRECT(mb_type)) {
  1373. if (IS_SKIP(mb_type)) {
  1374. mx =
  1375. my = 0;
  1376. } else {
  1377. mx = ff_h263_decode_motion(s, 0, 1);
  1378. my = ff_h263_decode_motion(s, 0, 1);
  1379. }
  1380. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
  1381. mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
  1382. }
  1383. s->current_picture.mb_type[xy] = mb_type;
  1384. } else { /* I-Frame */
  1385. do {
  1386. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  1387. if (cbpc < 0) {
  1388. av_log(s->avctx, AV_LOG_ERROR,
  1389. "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1390. return -1;
  1391. }
  1392. } while (cbpc == 8);
  1393. dquant = cbpc & 4;
  1394. s->mb_intra = 1;
  1395. intra:
  1396. s->ac_pred = get_bits1(&s->gb);
  1397. if (s->ac_pred)
  1398. s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
  1399. else
  1400. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  1401. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1402. if (cbpy < 0) {
  1403. av_log(s->avctx, AV_LOG_ERROR,
  1404. "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1405. return -1;
  1406. }
  1407. cbp = (cbpc & 3) | (cbpy << 2);
  1408. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1409. if (dquant)
  1410. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1411. if (!s->progressive_sequence)
  1412. s->interlaced_dct = get_bits1(&s->gb);
  1413. s->bdsp.clear_blocks(s->block[0]);
  1414. /* decode each block */
  1415. for (i = 0; i < 6; i++) {
  1416. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0)
  1417. return -1;
  1418. cbp += cbp;
  1419. }
  1420. goto end;
  1421. }
  1422. /* decode each block */
  1423. for (i = 0; i < 6; i++) {
  1424. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0)
  1425. return -1;
  1426. cbp += cbp;
  1427. }
  1428. end:
  1429. /* per-MB end of slice check */
  1430. if (s->codec_id == AV_CODEC_ID_MPEG4) {
  1431. if (mpeg4_is_resync(s)) {
  1432. const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
  1433. if (s->pict_type == AV_PICTURE_TYPE_B &&
  1434. s->next_picture.mbskip_table[xy + delta]) {
  1435. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1436. (s->mb_x + delta >= s->mb_width)
  1437. ? FFMIN(s->mb_y + 1, s->mb_height - 1)
  1438. : s->mb_y, 0);
  1439. }
  1440. if (s->pict_type == AV_PICTURE_TYPE_B &&
  1441. s->next_picture.mbskip_table[xy + delta])
  1442. return SLICE_OK;
  1443. return SLICE_END;
  1444. }
  1445. }
  1446. return SLICE_OK;
  1447. }
  1448. static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
  1449. {
  1450. int hours, minutes, seconds;
  1451. unsigned time_code = show_bits(gb, 18);
  1452. if (time_code & 0x40) { /* marker_bit */
  1453. hours = time_code >> 13;
  1454. minutes = time_code >> 7 & 0x3f;
  1455. seconds = time_code & 0x3f;
  1456. s->time_base = seconds + 60 * (minutes + 60 * hours);
  1457. skip_bits(gb, 20); /* time_code, closed_gov, broken_link */
  1458. } else {
  1459. av_log(s->avctx, AV_LOG_WARNING, "GOP header missing marker_bit\n");
  1460. }
  1461. return 0;
  1462. }
  1463. static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb)
  1464. {
  1465. int profile_and_level_indication;
  1466. profile_and_level_indication = get_bits(gb, 8);
  1467. s->avctx->profile = (profile_and_level_indication & 0xf0) >> 4;
  1468. s->avctx->level = (profile_and_level_indication & 0x0f);
  1469. // for Simple profile, level 0
  1470. if (s->avctx->profile == 0 && s->avctx->level == 8) {
  1471. s->avctx->level = 0;
  1472. }
  1473. return 0;
  1474. }
  1475. static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  1476. {
  1477. MpegEncContext *s = &ctx->m;
  1478. int width, height, vo_ver_id;
  1479. /* vol header */
  1480. skip_bits(gb, 1); /* random access */
  1481. s->vo_type = get_bits(gb, 8);
  1482. if (get_bits1(gb) != 0) { /* is_ol_id */
  1483. vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
  1484. skip_bits(gb, 3); /* vo_priority */
  1485. } else {
  1486. vo_ver_id = 1;
  1487. }
  1488. s->aspect_ratio_info = get_bits(gb, 4);
  1489. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  1490. s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
  1491. s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
  1492. } else {
  1493. s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
  1494. }
  1495. if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
  1496. int chroma_format = get_bits(gb, 2);
  1497. if (chroma_format != CHROMA_420)
  1498. av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
  1499. s->low_delay = get_bits1(gb);
  1500. if (get_bits1(gb)) { /* vbv parameters */
  1501. get_bits(gb, 15); /* first_half_bitrate */
  1502. skip_bits1(gb); /* marker */
  1503. get_bits(gb, 15); /* latter_half_bitrate */
  1504. skip_bits1(gb); /* marker */
  1505. get_bits(gb, 15); /* first_half_vbv_buffer_size */
  1506. skip_bits1(gb); /* marker */
  1507. get_bits(gb, 3); /* latter_half_vbv_buffer_size */
  1508. get_bits(gb, 11); /* first_half_vbv_occupancy */
  1509. skip_bits1(gb); /* marker */
  1510. get_bits(gb, 15); /* latter_half_vbv_occupancy */
  1511. skip_bits1(gb); /* marker */
  1512. }
  1513. } else {
  1514. /* is setting low delay flag only once the smartest thing to do?
  1515. * low delay detection will not be overridden. */
  1516. if (s->picture_number == 0)
  1517. s->low_delay = 0;
  1518. }
  1519. ctx->shape = get_bits(gb, 2); /* vol shape */
  1520. if (ctx->shape != RECT_SHAPE)
  1521. av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
  1522. if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
  1523. av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n");
  1524. skip_bits(gb, 4); /* video_object_layer_shape_extension */
  1525. }
  1526. check_marker(s->avctx, gb, "before time_increment_resolution");
  1527. s->avctx->framerate.num = get_bits(gb, 16);
  1528. if (!s->avctx->framerate.num) {
  1529. av_log(s->avctx, AV_LOG_ERROR, "framerate==0\n");
  1530. return -1;
  1531. }
  1532. ctx->time_increment_bits = av_log2(s->avctx->framerate.num - 1) + 1;
  1533. if (ctx->time_increment_bits < 1)
  1534. ctx->time_increment_bits = 1;
  1535. check_marker(s->avctx, gb, "before fixed_vop_rate");
  1536. if (get_bits1(gb) != 0) /* fixed_vop_rate */
  1537. s->avctx->framerate.den = get_bits(gb, ctx->time_increment_bits);
  1538. else
  1539. s->avctx->framerate.den = 1;
  1540. ctx->t_frame = 0;
  1541. if (ctx->shape != BIN_ONLY_SHAPE) {
  1542. if (ctx->shape == RECT_SHAPE) {
  1543. skip_bits1(gb); /* marker */
  1544. width = get_bits(gb, 13);
  1545. skip_bits1(gb); /* marker */
  1546. height = get_bits(gb, 13);
  1547. skip_bits1(gb); /* marker */
  1548. if (width && height && /* they should be non zero but who knows */
  1549. !(s->width && s->codec_tag == AV_RL32("MP4S"))) {
  1550. if (s->width && s->height &&
  1551. (s->width != width || s->height != height))
  1552. s->context_reinit = 1;
  1553. s->width = width;
  1554. s->height = height;
  1555. }
  1556. }
  1557. s->progressive_sequence =
  1558. s->progressive_frame = get_bits1(gb) ^ 1;
  1559. s->interlaced_dct = 0;
  1560. if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO))
  1561. av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */
  1562. "MPEG-4 OBMC not supported (very likely buggy encoder)\n");
  1563. if (vo_ver_id == 1)
  1564. ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
  1565. else
  1566. ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
  1567. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1568. av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
  1569. if (ctx->vol_sprite_usage == STATIC_SPRITE ||
  1570. ctx->vol_sprite_usage == GMC_SPRITE) {
  1571. if (ctx->vol_sprite_usage == STATIC_SPRITE) {
  1572. skip_bits(gb, 13); // sprite_width
  1573. skip_bits1(gb); /* marker */
  1574. skip_bits(gb, 13); // sprite_height
  1575. skip_bits1(gb); /* marker */
  1576. skip_bits(gb, 13); // sprite_left
  1577. skip_bits1(gb); /* marker */
  1578. skip_bits(gb, 13); // sprite_top
  1579. skip_bits1(gb); /* marker */
  1580. }
  1581. ctx->num_sprite_warping_points = get_bits(gb, 6);
  1582. if (ctx->num_sprite_warping_points > 3) {
  1583. av_log(s->avctx, AV_LOG_ERROR,
  1584. "%d sprite_warping_points\n",
  1585. ctx->num_sprite_warping_points);
  1586. ctx->num_sprite_warping_points = 0;
  1587. return -1;
  1588. }
  1589. s->sprite_warping_accuracy = get_bits(gb, 2);
  1590. ctx->sprite_brightness_change = get_bits1(gb);
  1591. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1592. skip_bits1(gb); // low_latency_sprite
  1593. }
  1594. // FIXME sadct disable bit if verid!=1 && shape not rect
  1595. if (get_bits1(gb) == 1) { /* not_8_bit */
  1596. s->quant_precision = get_bits(gb, 4); /* quant_precision */
  1597. if (get_bits(gb, 4) != 8) /* bits_per_pixel */
  1598. av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n");
  1599. if (s->quant_precision != 5)
  1600. av_log(s->avctx, AV_LOG_ERROR,
  1601. "quant precision %d\n", s->quant_precision);
  1602. } else {
  1603. s->quant_precision = 5;
  1604. }
  1605. // FIXME a bunch of grayscale shape things
  1606. if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
  1607. int i, v;
  1608. /* load default matrixes */
  1609. for (i = 0; i < 64; i++) {
  1610. int j = s->idsp.idct_permutation[i];
  1611. v = ff_mpeg4_default_intra_matrix[i];
  1612. s->intra_matrix[j] = v;
  1613. s->chroma_intra_matrix[j] = v;
  1614. v = ff_mpeg4_default_non_intra_matrix[i];
  1615. s->inter_matrix[j] = v;
  1616. s->chroma_inter_matrix[j] = v;
  1617. }
  1618. /* load custom intra matrix */
  1619. if (get_bits1(gb)) {
  1620. int last = 0;
  1621. for (i = 0; i < 64; i++) {
  1622. int j;
  1623. v = get_bits(gb, 8);
  1624. if (v == 0)
  1625. break;
  1626. last = v;
  1627. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1628. s->intra_matrix[j] = last;
  1629. s->chroma_intra_matrix[j] = last;
  1630. }
  1631. /* replicate last value */
  1632. for (; i < 64; i++) {
  1633. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1634. s->intra_matrix[j] = last;
  1635. s->chroma_intra_matrix[j] = last;
  1636. }
  1637. }
  1638. /* load custom non intra matrix */
  1639. if (get_bits1(gb)) {
  1640. int last = 0;
  1641. for (i = 0; i < 64; i++) {
  1642. int j;
  1643. v = get_bits(gb, 8);
  1644. if (v == 0)
  1645. break;
  1646. last = v;
  1647. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1648. s->inter_matrix[j] = v;
  1649. s->chroma_inter_matrix[j] = v;
  1650. }
  1651. /* replicate last value */
  1652. for (; i < 64; i++) {
  1653. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1654. s->inter_matrix[j] = last;
  1655. s->chroma_inter_matrix[j] = last;
  1656. }
  1657. }
  1658. // FIXME a bunch of grayscale shape things
  1659. }
  1660. if (vo_ver_id != 1)
  1661. s->quarter_sample = get_bits1(gb);
  1662. else
  1663. s->quarter_sample = 0;
  1664. if (!get_bits1(gb)) {
  1665. int pos = get_bits_count(gb);
  1666. int estimation_method = get_bits(gb, 2);
  1667. if (estimation_method < 2) {
  1668. if (!get_bits1(gb)) {
  1669. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
  1670. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
  1671. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
  1672. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
  1673. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
  1674. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */
  1675. }
  1676. if (!get_bits1(gb)) {
  1677. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
  1678. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
  1679. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
  1680. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
  1681. }
  1682. if (!check_marker(s->avctx, gb, "in complexity estimation part 1")) {
  1683. skip_bits_long(gb, pos - get_bits_count(gb));
  1684. goto no_cplx_est;
  1685. }
  1686. if (!get_bits1(gb)) {
  1687. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
  1688. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
  1689. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
  1690. ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
  1691. }
  1692. if (!get_bits1(gb)) {
  1693. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
  1694. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
  1695. ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
  1696. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
  1697. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
  1698. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
  1699. }
  1700. if (!check_marker(s->avctx, gb, "in complexity estimation part 2")) {
  1701. skip_bits_long(gb, pos - get_bits_count(gb));
  1702. goto no_cplx_est;
  1703. }
  1704. if (estimation_method == 1) {
  1705. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
  1706. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
  1707. }
  1708. } else
  1709. av_log(s->avctx, AV_LOG_ERROR,
  1710. "Invalid Complexity estimation method %d\n",
  1711. estimation_method);
  1712. } else {
  1713. no_cplx_est:
  1714. ctx->cplx_estimation_trash_i =
  1715. ctx->cplx_estimation_trash_p =
  1716. ctx->cplx_estimation_trash_b = 0;
  1717. }
  1718. ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
  1719. s->data_partitioning = get_bits1(gb);
  1720. if (s->data_partitioning)
  1721. ctx->rvlc = get_bits1(gb);
  1722. if (vo_ver_id != 1) {
  1723. ctx->new_pred = get_bits1(gb);
  1724. if (ctx->new_pred) {
  1725. av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n");
  1726. skip_bits(gb, 2); /* requested upstream message type */
  1727. skip_bits1(gb); /* newpred segment type */
  1728. }
  1729. if (get_bits1(gb)) // reduced_res_vop
  1730. av_log(s->avctx, AV_LOG_ERROR,
  1731. "reduced resolution VOP not supported\n");
  1732. } else {
  1733. ctx->new_pred = 0;
  1734. }
  1735. ctx->scalability = get_bits1(gb);
  1736. if (ctx->scalability) {
  1737. GetBitContext bak = *gb;
  1738. int h_sampling_factor_n;
  1739. int h_sampling_factor_m;
  1740. int v_sampling_factor_n;
  1741. int v_sampling_factor_m;
  1742. skip_bits1(gb); // hierarchy_type
  1743. skip_bits(gb, 4); /* ref_layer_id */
  1744. skip_bits1(gb); /* ref_layer_sampling_dir */
  1745. h_sampling_factor_n = get_bits(gb, 5);
  1746. h_sampling_factor_m = get_bits(gb, 5);
  1747. v_sampling_factor_n = get_bits(gb, 5);
  1748. v_sampling_factor_m = get_bits(gb, 5);
  1749. ctx->enhancement_type = get_bits1(gb);
  1750. if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
  1751. v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
  1752. /* illegal scalability header (VERY broken encoder),
  1753. * trying to workaround */
  1754. ctx->scalability = 0;
  1755. *gb = bak;
  1756. } else
  1757. av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n");
  1758. // bin shape stuff FIXME
  1759. }
  1760. }
  1761. return 0;
  1762. }
  1763. /**
  1764. * Decode the user data stuff in the header.
  1765. * Also initializes divx/xvid/lavc_version/build.
  1766. */
  1767. static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
  1768. {
  1769. MpegEncContext *s = &ctx->m;
  1770. char buf[256];
  1771. int i;
  1772. int e;
  1773. int ver = 0, build = 0, ver2 = 0, ver3 = 0;
  1774. char last;
  1775. for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
  1776. if (show_bits(gb, 23) == 0)
  1777. break;
  1778. buf[i] = get_bits(gb, 8);
  1779. }
  1780. buf[i] = 0;
  1781. /* divx detection */
  1782. e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
  1783. if (e < 2)
  1784. e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
  1785. if (e >= 2) {
  1786. ctx->divx_version = ver;
  1787. ctx->divx_build = build;
  1788. s->divx_packed = e == 3 && last == 'p';
  1789. if (s->divx_packed && !ctx->showed_packed_warning) {
  1790. av_log(s->avctx, AV_LOG_WARNING,
  1791. "Invalid and inefficient vfw-avi packed B-frames detected\n");
  1792. ctx->showed_packed_warning = 1;
  1793. }
  1794. }
  1795. /* libavcodec detection */
  1796. e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
  1797. if (e != 4)
  1798. e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
  1799. if (e != 4) {
  1800. e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
  1801. if (e > 1)
  1802. build = (ver << 16) + (ver2 << 8) + ver3;
  1803. }
  1804. if (e != 4) {
  1805. if (strcmp(buf, "ffmpeg") == 0)
  1806. ctx->lavc_build = 4600;
  1807. }
  1808. if (e == 4)
  1809. ctx->lavc_build = build;
  1810. /* Xvid detection */
  1811. e = sscanf(buf, "XviD%d", &build);
  1812. if (e == 1)
  1813. ctx->xvid_build = build;
  1814. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
  1815. if (s->codec_tag == AV_RL32("XVID") ||
  1816. s->codec_tag == AV_RL32("XVIX") ||
  1817. s->codec_tag == AV_RL32("RMP4") ||
  1818. s->codec_tag == AV_RL32("ZMP4") ||
  1819. s->codec_tag == AV_RL32("SIPP"))
  1820. ctx->xvid_build = 0;
  1821. }
  1822. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1)
  1823. if (s->codec_tag == AV_RL32("DIVX") && s->vo_type == 0 &&
  1824. ctx->vol_control_parameters == 0)
  1825. ctx->divx_version = 400; // divx 4
  1826. if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) {
  1827. ctx->divx_version =
  1828. ctx->divx_build = -1;
  1829. }
  1830. if (CONFIG_MPEG4_DECODER && ctx->xvid_build >= 0)
  1831. ff_xvid_idct_init(&s->idsp, s->avctx);
  1832. return 0;
  1833. }
  1834. static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  1835. {
  1836. MpegEncContext *s = &ctx->m;
  1837. int time_incr, time_increment;
  1838. s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
  1839. if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay &&
  1840. ctx->vol_control_parameters == 0 && !(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) {
  1841. av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n");
  1842. s->low_delay = 0;
  1843. }
  1844. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  1845. if (s->partitioned_frame)
  1846. s->decode_mb = mpeg4_decode_partitioned_mb;
  1847. else
  1848. s->decode_mb = mpeg4_decode_mb;
  1849. time_incr = 0;
  1850. while (get_bits1(gb) != 0)
  1851. time_incr++;
  1852. check_marker(s->avctx, gb, "before time_increment");
  1853. if (ctx->time_increment_bits == 0 ||
  1854. !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
  1855. /* Headers seem incomplete; try to guess time_increment_bits. */
  1856. for (ctx->time_increment_bits = 1;
  1857. ctx->time_increment_bits < 16;
  1858. ctx->time_increment_bits++) {
  1859. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1860. (s->pict_type == AV_PICTURE_TYPE_S &&
  1861. ctx->vol_sprite_usage == GMC_SPRITE)) {
  1862. if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
  1863. break;
  1864. } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
  1865. break;
  1866. }
  1867. }
  1868. if (IS_3IV1)
  1869. time_increment = get_bits1(gb); // FIXME investigate further
  1870. else
  1871. time_increment = get_bits(gb, ctx->time_increment_bits);
  1872. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1873. s->last_time_base = s->time_base;
  1874. s->time_base += time_incr;
  1875. s->time = s->time_base * s->avctx->framerate.num + time_increment;
  1876. if (s->workaround_bugs & FF_BUG_UMP4) {
  1877. if (s->time < s->last_non_b_time) {
  1878. /* header is not mpeg-4-compatible, broken encoder,
  1879. * trying to workaround */
  1880. s->time_base++;
  1881. s->time += s->avctx->framerate.num;
  1882. }
  1883. }
  1884. s->pp_time = s->time - s->last_non_b_time;
  1885. s->last_non_b_time = s->time;
  1886. } else {
  1887. s->time = (s->last_time_base + time_incr) * s->avctx->framerate.num + time_increment;
  1888. s->pb_time = s->pp_time - (s->last_non_b_time - s->time);
  1889. if (s->pp_time <= s->pb_time ||
  1890. s->pp_time <= s->pp_time - s->pb_time ||
  1891. s->pp_time <= 0) {
  1892. /* messed up order, maybe after seeking? skipping current B-frame */
  1893. return FRAME_SKIPPED;
  1894. }
  1895. ff_mpeg4_init_direct_mv(s);
  1896. if (ctx->t_frame == 0)
  1897. ctx->t_frame = s->pb_time;
  1898. if (ctx->t_frame == 0)
  1899. ctx->t_frame = 1; // 1/0 protection
  1900. s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) -
  1901. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  1902. s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) -
  1903. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  1904. if (!s->progressive_sequence) {
  1905. if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1)
  1906. return FRAME_SKIPPED;
  1907. }
  1908. }
  1909. check_marker(s->avctx, gb, "before vop_coded");
  1910. /* vop coded */
  1911. if (get_bits1(gb) != 1) {
  1912. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1913. av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
  1914. return FRAME_SKIPPED;
  1915. }
  1916. if (ctx->shape != BIN_ONLY_SHAPE &&
  1917. (s->pict_type == AV_PICTURE_TYPE_P ||
  1918. (s->pict_type == AV_PICTURE_TYPE_S &&
  1919. ctx->vol_sprite_usage == GMC_SPRITE))) {
  1920. /* rounding type for motion estimation */
  1921. s->no_rounding = get_bits1(gb);
  1922. } else {
  1923. s->no_rounding = 0;
  1924. }
  1925. // FIXME reduced res stuff
  1926. if (ctx->shape != RECT_SHAPE) {
  1927. if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
  1928. skip_bits(gb, 13); /* width */
  1929. skip_bits1(gb); /* marker */
  1930. skip_bits(gb, 13); /* height */
  1931. skip_bits1(gb); /* marker */
  1932. skip_bits(gb, 13); /* hor_spat_ref */
  1933. skip_bits1(gb); /* marker */
  1934. skip_bits(gb, 13); /* ver_spat_ref */
  1935. }
  1936. skip_bits1(gb); /* change_CR_disable */
  1937. if (get_bits1(gb) != 0)
  1938. skip_bits(gb, 8); /* constant_alpha_value */
  1939. }
  1940. // FIXME complexity estimation stuff
  1941. if (ctx->shape != BIN_ONLY_SHAPE) {
  1942. skip_bits_long(gb, ctx->cplx_estimation_trash_i);
  1943. if (s->pict_type != AV_PICTURE_TYPE_I)
  1944. skip_bits_long(gb, ctx->cplx_estimation_trash_p);
  1945. if (s->pict_type == AV_PICTURE_TYPE_B)
  1946. skip_bits_long(gb, ctx->cplx_estimation_trash_b);
  1947. ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
  1948. if (!s->progressive_sequence) {
  1949. s->top_field_first = get_bits1(gb);
  1950. s->alternate_scan = get_bits1(gb);
  1951. } else
  1952. s->alternate_scan = 0;
  1953. }
  1954. if (s->alternate_scan) {
  1955. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  1956. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  1957. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan);
  1958. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  1959. } else {
  1960. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  1961. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  1962. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  1963. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  1964. }
  1965. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1966. (ctx->vol_sprite_usage == STATIC_SPRITE ||
  1967. ctx->vol_sprite_usage == GMC_SPRITE)) {
  1968. if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
  1969. return AVERROR_INVALIDDATA;
  1970. if (ctx->sprite_brightness_change)
  1971. av_log(s->avctx, AV_LOG_ERROR,
  1972. "sprite_brightness_change not supported\n");
  1973. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1974. av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
  1975. }
  1976. if (ctx->shape != BIN_ONLY_SHAPE) {
  1977. s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision);
  1978. if (s->qscale == 0) {
  1979. av_log(s->avctx, AV_LOG_ERROR,
  1980. "Error, header damaged or not MPEG-4 header (qscale=0)\n");
  1981. return -1; // makes no sense to continue, as there is nothing left from the image then
  1982. }
  1983. if (s->pict_type != AV_PICTURE_TYPE_I) {
  1984. s->f_code = get_bits(gb, 3); /* fcode_for */
  1985. if (s->f_code == 0) {
  1986. av_log(s->avctx, AV_LOG_ERROR,
  1987. "Error, header damaged or not MPEG-4 header (f_code=0)\n");
  1988. return -1; // makes no sense to continue, as there is nothing left from the image then
  1989. }
  1990. } else
  1991. s->f_code = 1;
  1992. if (s->pict_type == AV_PICTURE_TYPE_B) {
  1993. s->b_code = get_bits(gb, 3);
  1994. } else
  1995. s->b_code = 1;
  1996. if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
  1997. av_log(s->avctx, AV_LOG_DEBUG,
  1998. "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\n",
  1999. s->qscale, s->f_code, s->b_code,
  2000. s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
  2001. gb->size_in_bits, s->progressive_sequence, s->alternate_scan,
  2002. s->top_field_first, s->quarter_sample ? "q" : "h",
  2003. s->data_partitioning, ctx->resync_marker,
  2004. ctx->num_sprite_warping_points, s->sprite_warping_accuracy,
  2005. 1 - s->no_rounding, s->vo_type,
  2006. ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
  2007. ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
  2008. ctx->cplx_estimation_trash_b);
  2009. }
  2010. if (!ctx->scalability) {
  2011. if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I)
  2012. skip_bits1(gb); // vop shape coding type
  2013. } else {
  2014. if (ctx->enhancement_type) {
  2015. int load_backward_shape = get_bits1(gb);
  2016. if (load_backward_shape)
  2017. av_log(s->avctx, AV_LOG_ERROR,
  2018. "load backward shape isn't supported\n");
  2019. }
  2020. skip_bits(gb, 2); // ref_select_code
  2021. }
  2022. }
  2023. /* detect buggy encoders which don't set the low_delay flag
  2024. * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames
  2025. * easily (although it's buggy too) */
  2026. if (s->vo_type == 0 && ctx->vol_control_parameters == 0 &&
  2027. ctx->divx_version == -1 && s->picture_number == 0) {
  2028. av_log(s->avctx, AV_LOG_WARNING,
  2029. "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
  2030. s->low_delay = 1;
  2031. }
  2032. s->picture_number++; // better than pic number==0 always ;)
  2033. // FIXME add short header support
  2034. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  2035. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  2036. if (s->workaround_bugs & FF_BUG_EDGE) {
  2037. s->h_edge_pos = s->width;
  2038. s->v_edge_pos = s->height;
  2039. }
  2040. return 0;
  2041. }
  2042. /**
  2043. * Decode MPEG-4 headers.
  2044. * @return <0 if no VOP found (or a damaged one)
  2045. * FRAME_SKIPPED if a not coded VOP is found
  2046. * 0 if a VOP is found
  2047. */
  2048. int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2049. {
  2050. MpegEncContext *s = &ctx->m;
  2051. unsigned startcode, v;
  2052. /* search next start code */
  2053. align_get_bits(gb);
  2054. if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) {
  2055. skip_bits(gb, 24);
  2056. if (get_bits(gb, 8) == 0xF0)
  2057. goto end;
  2058. }
  2059. startcode = 0xff;
  2060. for (;;) {
  2061. if (get_bits_count(gb) >= gb->size_in_bits) {
  2062. if (gb->size_in_bits == 8 &&
  2063. (ctx->divx_version >= 0 || ctx->xvid_build >= 0)) {
  2064. av_log(s->avctx, AV_LOG_WARNING, "frame skip %d\n", gb->size_in_bits);
  2065. return FRAME_SKIPPED; // divx bug
  2066. } else
  2067. return -1; // end of stream
  2068. }
  2069. /* use the bits after the test */
  2070. v = get_bits(gb, 8);
  2071. startcode = ((startcode << 8) | v) & 0xffffffff;
  2072. if ((startcode & 0xFFFFFF00) != 0x100)
  2073. continue; // no startcode
  2074. if (s->avctx->debug & FF_DEBUG_STARTCODE) {
  2075. av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode);
  2076. if (startcode <= 0x11F)
  2077. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start");
  2078. else if (startcode <= 0x12F)
  2079. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start");
  2080. else if (startcode <= 0x13F)
  2081. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2082. else if (startcode <= 0x15F)
  2083. av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start");
  2084. else if (startcode <= 0x1AF)
  2085. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2086. else if (startcode == 0x1B0)
  2087. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start");
  2088. else if (startcode == 0x1B1)
  2089. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End");
  2090. else if (startcode == 0x1B2)
  2091. av_log(s->avctx, AV_LOG_DEBUG, "User Data");
  2092. else if (startcode == 0x1B3)
  2093. av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start");
  2094. else if (startcode == 0x1B4)
  2095. av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error");
  2096. else if (startcode == 0x1B5)
  2097. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start");
  2098. else if (startcode == 0x1B6)
  2099. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start");
  2100. else if (startcode == 0x1B7)
  2101. av_log(s->avctx, AV_LOG_DEBUG, "slice start");
  2102. else if (startcode == 0x1B8)
  2103. av_log(s->avctx, AV_LOG_DEBUG, "extension start");
  2104. else if (startcode == 0x1B9)
  2105. av_log(s->avctx, AV_LOG_DEBUG, "fgs start");
  2106. else if (startcode == 0x1BA)
  2107. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start");
  2108. else if (startcode == 0x1BB)
  2109. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start");
  2110. else if (startcode == 0x1BC)
  2111. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start");
  2112. else if (startcode == 0x1BD)
  2113. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start");
  2114. else if (startcode == 0x1BE)
  2115. av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start");
  2116. else if (startcode == 0x1BF)
  2117. av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start");
  2118. else if (startcode == 0x1C0)
  2119. av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start");
  2120. else if (startcode == 0x1C1)
  2121. av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start");
  2122. else if (startcode == 0x1C2)
  2123. av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start");
  2124. else if (startcode == 0x1C3)
  2125. av_log(s->avctx, AV_LOG_DEBUG, "stuffing start");
  2126. else if (startcode <= 0x1C5)
  2127. av_log(s->avctx, AV_LOG_DEBUG, "reserved");
  2128. else if (startcode <= 0x1FF)
  2129. av_log(s->avctx, AV_LOG_DEBUG, "System start");
  2130. av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb));
  2131. }
  2132. if (startcode >= 0x120 && startcode <= 0x12F) {
  2133. if (decode_vol_header(ctx, gb) < 0)
  2134. return -1;
  2135. } else if (startcode == USER_DATA_STARTCODE) {
  2136. decode_user_data(ctx, gb);
  2137. } else if (startcode == GOP_STARTCODE) {
  2138. mpeg4_decode_gop_header(s, gb);
  2139. } else if (startcode == VOS_STARTCODE) {
  2140. mpeg4_decode_profile_level(s, gb);
  2141. } else if (startcode == VOP_STARTCODE) {
  2142. break;
  2143. }
  2144. align_get_bits(gb);
  2145. startcode = 0xff;
  2146. }
  2147. end:
  2148. if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
  2149. s->low_delay = 1;
  2150. s->avctx->has_b_frames = !s->low_delay;
  2151. if (s->workaround_bugs & FF_BUG_AUTODETECT) {
  2152. if (s->codec_tag == AV_RL32("XVIX"))
  2153. s->workaround_bugs |= FF_BUG_XVID_ILACE;
  2154. if (s->codec_tag == AV_RL32("UMP4"))
  2155. s->workaround_bugs |= FF_BUG_UMP4;
  2156. if (ctx->divx_version >= 500 && ctx->divx_build < 1814)
  2157. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  2158. if (ctx->divx_version > 502 && ctx->divx_build < 1814)
  2159. s->workaround_bugs |= FF_BUG_QPEL_CHROMA2;
  2160. if (ctx->xvid_build <= 3U)
  2161. s->padding_bug_score = 256 * 256 * 256 * 64;
  2162. if (ctx->xvid_build <= 1U)
  2163. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  2164. if (ctx->xvid_build <= 12U)
  2165. s->workaround_bugs |= FF_BUG_EDGE;
  2166. if (ctx->xvid_build <= 32U)
  2167. s->workaround_bugs |= FF_BUG_DC_CLIP;
  2168. if (ctx->lavc_build < 4653U)
  2169. s->workaround_bugs |= FF_BUG_STD_QPEL;
  2170. if (ctx->lavc_build < 4655U)
  2171. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  2172. if (ctx->lavc_build < 4670U)
  2173. s->workaround_bugs |= FF_BUG_EDGE;
  2174. if (ctx->lavc_build <= 4712U)
  2175. s->workaround_bugs |= FF_BUG_DC_CLIP;
  2176. if (ctx->divx_version >= 0)
  2177. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  2178. if (ctx->divx_version == 501 && ctx->divx_build == 20020416)
  2179. s->padding_bug_score = 256 * 256 * 256 * 64;
  2180. if (ctx->divx_version < 500U)
  2181. s->workaround_bugs |= FF_BUG_EDGE;
  2182. if (ctx->divx_version >= 0)
  2183. s->workaround_bugs |= FF_BUG_HPEL_CHROMA;
  2184. }
  2185. if (s->avctx->debug & FF_DEBUG_BUGS)
  2186. av_log(s->avctx, AV_LOG_DEBUG,
  2187. "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
  2188. s->workaround_bugs, ctx->lavc_build, ctx->xvid_build,
  2189. ctx->divx_version, ctx->divx_build, s->divx_packed ? "p" : "");
  2190. return decode_vop_header(ctx, gb);
  2191. }
  2192. int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
  2193. {
  2194. Mpeg4DecContext *ctx = avctx->priv_data;
  2195. MpegEncContext *s = &ctx->m;
  2196. /* divx 5.01+ bitstream reorder stuff */
  2197. if (s->divx_packed) {
  2198. int current_pos = get_bits_count(&s->gb) >> 3;
  2199. int startcode_found = 0;
  2200. if (buf_size - current_pos > 5) {
  2201. int i;
  2202. for (i = current_pos; i < buf_size - 3; i++)
  2203. if (buf[i] == 0 &&
  2204. buf[i + 1] == 0 &&
  2205. buf[i + 2] == 1 &&
  2206. buf[i + 3] == 0xB6) {
  2207. startcode_found = 1;
  2208. break;
  2209. }
  2210. }
  2211. if (s->gb.buffer == s->bitstream_buffer && buf_size > 7 &&
  2212. ctx->xvid_build >= 0) { // xvid style
  2213. startcode_found = 1;
  2214. current_pos = 0;
  2215. }
  2216. if (startcode_found) {
  2217. av_fast_malloc(&s->bitstream_buffer,
  2218. &s->allocated_bitstream_buffer_size,
  2219. buf_size - current_pos +
  2220. AV_INPUT_BUFFER_PADDING_SIZE);
  2221. if (!s->bitstream_buffer)
  2222. return AVERROR(ENOMEM);
  2223. memcpy(s->bitstream_buffer, buf + current_pos,
  2224. buf_size - current_pos);
  2225. s->bitstream_buffer_size = buf_size - current_pos;
  2226. }
  2227. }
  2228. return 0;
  2229. }
  2230. static int mpeg4_update_thread_context(AVCodecContext *dst,
  2231. const AVCodecContext *src)
  2232. {
  2233. Mpeg4DecContext *s = dst->priv_data;
  2234. const Mpeg4DecContext *s1 = src->priv_data;
  2235. int init = s->m.context_initialized;
  2236. int ret = ff_mpeg_update_thread_context(dst, src);
  2237. if (ret < 0)
  2238. return ret;
  2239. if (CONFIG_MPEG4_DECODER && !init && s1->xvid_build >= 0)
  2240. ff_xvid_idct_init(&s->m.idsp, dst);
  2241. s->shape = s1->shape;
  2242. s->time_increment_bits = s1->time_increment_bits;
  2243. s->xvid_build = s1->xvid_build;
  2244. return 0;
  2245. }
  2246. static av_cold int decode_init(AVCodecContext *avctx)
  2247. {
  2248. Mpeg4DecContext *ctx = avctx->priv_data;
  2249. MpegEncContext *s = &ctx->m;
  2250. int ret;
  2251. static int done = 0;
  2252. ctx->divx_version =
  2253. ctx->divx_build =
  2254. ctx->xvid_build =
  2255. ctx->lavc_build = -1;
  2256. if ((ret = ff_h263_decode_init(avctx)) < 0)
  2257. return ret;
  2258. if (!done) {
  2259. done = 1;
  2260. ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  2261. ff_rl_init(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
  2262. ff_rl_init(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
  2263. INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
  2264. INIT_VLC_RL(ff_rvlc_rl_inter, 1072);
  2265. INIT_VLC_RL(ff_rvlc_rl_intra, 1072);
  2266. INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */,
  2267. &ff_mpeg4_DCtab_lum[0][1], 2, 1,
  2268. &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512);
  2269. INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */,
  2270. &ff_mpeg4_DCtab_chrom[0][1], 2, 1,
  2271. &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512);
  2272. INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
  2273. &ff_sprite_trajectory_tab[0][1], 4, 2,
  2274. &ff_sprite_trajectory_tab[0][0], 4, 2, 128);
  2275. INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
  2276. &ff_mb_type_b_tab[0][1], 2, 1,
  2277. &ff_mb_type_b_tab[0][0], 2, 1, 16);
  2278. }
  2279. s->h263_pred = 1;
  2280. s->low_delay = 0; /* default, might be overridden in the vol header during header parsing */
  2281. s->decode_mb = mpeg4_decode_mb;
  2282. ctx->time_increment_bits = 4; /* default value for broken headers */
  2283. avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
  2284. avctx->internal->allocate_progress = 1;
  2285. return 0;
  2286. }
  2287. AVCodec ff_mpeg4_decoder = {
  2288. .name = "mpeg4",
  2289. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  2290. .type = AVMEDIA_TYPE_VIDEO,
  2291. .id = AV_CODEC_ID_MPEG4,
  2292. .priv_data_size = sizeof(Mpeg4DecContext),
  2293. .init = decode_init,
  2294. .close = ff_h263_decode_end,
  2295. .decode = ff_h263_decode_frame,
  2296. .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
  2297. AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
  2298. AV_CODEC_CAP_FRAME_THREADS,
  2299. .flush = ff_mpeg_flush,
  2300. .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
  2301. .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles),
  2302. .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context),
  2303. };