You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2803 lines
106KB

  1. /*
  2. * MPEG4 decoder.
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #define UNCHECKED_BITSTREAM_READER 1
  23. #include "libavutil/opt.h"
  24. #include "error_resilience.h"
  25. #include "idctdsp.h"
  26. #include "internal.h"
  27. #include "mpegutils.h"
  28. #include "mpegvideo.h"
  29. #include "mpeg4video.h"
  30. #include "h263.h"
  31. #include "thread.h"
  32. /* The defines below define the number of bits that are read at once for
  33. * reading vlc values. Changing these may improve speed and data cache needs
  34. * be aware though that decreasing them may need the number of stages that is
  35. * passed to get_vlc* to be increased. */
  36. #define SPRITE_TRAJ_VLC_BITS 6
  37. #define DC_VLC_BITS 9
  38. #define MB_TYPE_B_VLC_BITS 4
  39. static VLC dc_lum, dc_chrom;
  40. static VLC sprite_trajectory;
  41. static VLC mb_type_b_vlc;
  42. static const int mb_type_b_map[4] = {
  43. MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
  44. MB_TYPE_L0L1 | MB_TYPE_16x16,
  45. MB_TYPE_L1 | MB_TYPE_16x16,
  46. MB_TYPE_L0 | MB_TYPE_16x16,
  47. };
  48. /**
  49. * Predict the ac.
  50. * @param n block index (0-3 are luma, 4-5 are chroma)
  51. * @param dir the ac prediction direction
  52. */
  53. void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
  54. {
  55. int i;
  56. int16_t *ac_val, *ac_val1;
  57. int8_t *const qscale_table = s->current_picture.qscale_table;
  58. /* find prediction */
  59. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  60. ac_val1 = ac_val;
  61. if (s->ac_pred) {
  62. if (dir == 0) {
  63. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  64. /* left prediction */
  65. ac_val -= 16;
  66. if (s->mb_x == 0 || s->qscale == qscale_table[xy] ||
  67. n == 1 || n == 3) {
  68. /* same qscale */
  69. for (i = 1; i < 8; i++)
  70. block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
  71. } else {
  72. /* different qscale, we must rescale */
  73. for (i = 1; i < 8; i++)
  74. block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  75. }
  76. } else {
  77. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  78. /* top prediction */
  79. ac_val -= 16 * s->block_wrap[n];
  80. if (s->mb_y == 0 || s->qscale == qscale_table[xy] ||
  81. n == 2 || n == 3) {
  82. /* same qscale */
  83. for (i = 1; i < 8; i++)
  84. block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
  85. } else {
  86. /* different qscale, we must rescale */
  87. for (i = 1; i < 8; i++)
  88. block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  89. }
  90. }
  91. }
  92. /* left copy */
  93. for (i = 1; i < 8; i++)
  94. ac_val1[i] = block[s->idsp.idct_permutation[i << 3]];
  95. /* top copy */
  96. for (i = 1; i < 8; i++)
  97. ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
  98. }
  99. /**
  100. * check if the next stuff is a resync marker or the end.
  101. * @return 0 if not
  102. */
  103. static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
  104. {
  105. MpegEncContext *s = &ctx->m;
  106. int bits_count = get_bits_count(&s->gb);
  107. int v = show_bits(&s->gb, 16);
  108. if (s->workaround_bugs & FF_BUG_NO_PADDING && !ctx->resync_marker)
  109. return 0;
  110. while (v <= 0xFF) {
  111. if (s->pict_type == AV_PICTURE_TYPE_B ||
  112. (v >> (8 - s->pict_type) != 1) || s->partitioned_frame)
  113. break;
  114. skip_bits(&s->gb, 8 + s->pict_type);
  115. bits_count += 8 + s->pict_type;
  116. v = show_bits(&s->gb, 16);
  117. }
  118. if (bits_count + 8 >= s->gb.size_in_bits) {
  119. v >>= 8;
  120. v |= 0x7F >> (7 - (bits_count & 7));
  121. if (v == 0x7F)
  122. return s->mb_num;
  123. } else {
  124. if (v == ff_mpeg4_resync_prefix[bits_count & 7]) {
  125. int len, mb_num;
  126. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  127. GetBitContext gb = s->gb;
  128. skip_bits(&s->gb, 1);
  129. align_get_bits(&s->gb);
  130. for (len = 0; len < 32; len++)
  131. if (get_bits1(&s->gb))
  132. break;
  133. mb_num = get_bits(&s->gb, mb_num_bits);
  134. if (!mb_num || mb_num > s->mb_num || get_bits_count(&s->gb)+6 > s->gb.size_in_bits)
  135. mb_num= -1;
  136. s->gb = gb;
  137. if (len >= ff_mpeg4_get_video_packet_prefix_length(s))
  138. return mb_num;
  139. }
  140. }
  141. return 0;
  142. }
  143. static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
  144. {
  145. MpegEncContext *s = &ctx->m;
  146. int a = 2 << s->sprite_warping_accuracy;
  147. int rho = 3 - s->sprite_warping_accuracy;
  148. int r = 16 / a;
  149. int alpha = 0;
  150. int beta = 0;
  151. int w = s->width;
  152. int h = s->height;
  153. int min_ab, i, w2, h2, w3, h3;
  154. int sprite_ref[4][2];
  155. int virtual_ref[2][2];
  156. // only true for rectangle shapes
  157. const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 },
  158. { 0, s->height }, { s->width, s->height } };
  159. int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
  160. if (w <= 0 || h <= 0)
  161. return AVERROR_INVALIDDATA;
  162. for (i = 0; i < ctx->num_sprite_warping_points; i++) {
  163. int length;
  164. int x = 0, y = 0;
  165. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  166. if (length)
  167. x = get_xbits(gb, length);
  168. if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
  169. skip_bits1(gb); /* marker bit */
  170. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  171. if (length)
  172. y = get_xbits(gb, length);
  173. skip_bits1(gb); /* marker bit */
  174. ctx->sprite_traj[i][0] = d[i][0] = x;
  175. ctx->sprite_traj[i][1] = d[i][1] = y;
  176. }
  177. for (; i < 4; i++)
  178. ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0;
  179. while ((1 << alpha) < w)
  180. alpha++;
  181. while ((1 << beta) < h)
  182. beta++; /* typo in the mpeg4 std for the definition of w' and h' */
  183. w2 = 1 << alpha;
  184. h2 = 1 << beta;
  185. // Note, the 4th point isn't used for GMC
  186. if (ctx->divx_version == 500 && ctx->divx_build == 413) {
  187. sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0];
  188. sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1];
  189. sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0];
  190. sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1];
  191. sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0];
  192. sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1];
  193. } else {
  194. sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]);
  195. sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]);
  196. sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]);
  197. sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]);
  198. sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]);
  199. sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]);
  200. }
  201. /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]);
  202. * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */
  203. /* this is mostly identical to the mpeg4 std (and is totally unreadable
  204. * because of that...). Perhaps it should be reordered to be more readable.
  205. * The idea behind this virtual_ref mess is to be able to use shifts later
  206. * per pixel instead of divides so the distance between points is converted
  207. * from w&h based to w2&h2 based which are of the 2^x form. */
  208. virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
  209. ROUNDED_DIV(((w - w2) *
  210. (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  211. w2 * (r * sprite_ref[1][0] - 16 * vop_ref[1][0])), w);
  212. virtual_ref[0][1] = 16 * vop_ref[0][1] +
  213. ROUNDED_DIV(((w - w2) *
  214. (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  215. w2 * (r * sprite_ref[1][1] - 16 * vop_ref[1][1])), w);
  216. virtual_ref[1][0] = 16 * vop_ref[0][0] +
  217. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  218. h2 * (r * sprite_ref[2][0] - 16 * vop_ref[2][0])), h);
  219. virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
  220. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  221. h2 * (r * sprite_ref[2][1] - 16 * vop_ref[2][1])), h);
  222. switch (ctx->num_sprite_warping_points) {
  223. case 0:
  224. s->sprite_offset[0][0] =
  225. s->sprite_offset[0][1] =
  226. s->sprite_offset[1][0] =
  227. s->sprite_offset[1][1] = 0;
  228. s->sprite_delta[0][0] = a;
  229. s->sprite_delta[0][1] =
  230. s->sprite_delta[1][0] = 0;
  231. s->sprite_delta[1][1] = a;
  232. ctx->sprite_shift[0] =
  233. ctx->sprite_shift[1] = 0;
  234. break;
  235. case 1: // GMC only
  236. s->sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0];
  237. s->sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1];
  238. s->sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
  239. a * (vop_ref[0][0] / 2);
  240. s->sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
  241. a * (vop_ref[0][1] / 2);
  242. s->sprite_delta[0][0] = a;
  243. s->sprite_delta[0][1] =
  244. s->sprite_delta[1][0] = 0;
  245. s->sprite_delta[1][1] = a;
  246. ctx->sprite_shift[0] =
  247. ctx->sprite_shift[1] = 0;
  248. break;
  249. case 2:
  250. s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + rho)) +
  251. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  252. (-vop_ref[0][0]) +
  253. (r * sprite_ref[0][1] - virtual_ref[0][1]) *
  254. (-vop_ref[0][1]) + (1 << (alpha + rho - 1));
  255. s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + rho)) +
  256. (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  257. (-vop_ref[0][0]) +
  258. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  259. (-vop_ref[0][1]) + (1 << (alpha + rho - 1));
  260. s->sprite_offset[1][0] = ((-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  261. (-2 * vop_ref[0][0] + 1) +
  262. (r * sprite_ref[0][1] - virtual_ref[0][1]) *
  263. (-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  264. sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1)));
  265. s->sprite_offset[1][1] = ((-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  266. (-2 * vop_ref[0][0] + 1) +
  267. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  268. (-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  269. sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1)));
  270. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  271. s->sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]);
  272. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]);
  273. s->sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  274. ctx->sprite_shift[0] = alpha + rho;
  275. ctx->sprite_shift[1] = alpha + rho + 2;
  276. break;
  277. case 3:
  278. min_ab = FFMIN(alpha, beta);
  279. w3 = w2 >> min_ab;
  280. h3 = h2 >> min_ab;
  281. s->sprite_offset[0][0] = (sprite_ref[0][0] << (alpha + beta + rho - min_ab)) +
  282. (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  283. h3 * (-vop_ref[0][0]) +
  284. (-r * sprite_ref[0][0] + virtual_ref[1][0]) *
  285. w3 * (-vop_ref[0][1]) +
  286. (1 << (alpha + beta + rho - min_ab - 1));
  287. s->sprite_offset[0][1] = (sprite_ref[0][1] << (alpha + beta + rho - min_ab)) +
  288. (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  289. h3 * (-vop_ref[0][0]) +
  290. (-r * sprite_ref[0][1] + virtual_ref[1][1]) *
  291. w3 * (-vop_ref[0][1]) +
  292. (1 << (alpha + beta + rho - min_ab - 1));
  293. s->sprite_offset[1][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  294. h3 * (-2 * vop_ref[0][0] + 1) +
  295. (-r * sprite_ref[0][0] + virtual_ref[1][0]) *
  296. w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
  297. r * sprite_ref[0][0] - 16 * w2 * h3 +
  298. (1 << (alpha + beta + rho - min_ab + 1));
  299. s->sprite_offset[1][1] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  300. h3 * (-2 * vop_ref[0][0] + 1) +
  301. (-r * sprite_ref[0][1] + virtual_ref[1][1]) *
  302. w3 * (-2 * vop_ref[0][1] + 1) + 2 * w2 * h3 *
  303. r * sprite_ref[0][1] - 16 * w2 * h3 +
  304. (1 << (alpha + beta + rho - min_ab + 1));
  305. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3;
  306. s->sprite_delta[0][1] = (-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3;
  307. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3;
  308. s->sprite_delta[1][1] = (-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3;
  309. ctx->sprite_shift[0] = alpha + beta + rho - min_ab;
  310. ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2;
  311. break;
  312. }
  313. /* try to simplify the situation */
  314. if (s->sprite_delta[0][0] == a << ctx->sprite_shift[0] &&
  315. s->sprite_delta[0][1] == 0 &&
  316. s->sprite_delta[1][0] == 0 &&
  317. s->sprite_delta[1][1] == a << ctx->sprite_shift[0]) {
  318. s->sprite_offset[0][0] >>= ctx->sprite_shift[0];
  319. s->sprite_offset[0][1] >>= ctx->sprite_shift[0];
  320. s->sprite_offset[1][0] >>= ctx->sprite_shift[1];
  321. s->sprite_offset[1][1] >>= ctx->sprite_shift[1];
  322. s->sprite_delta[0][0] = a;
  323. s->sprite_delta[0][1] = 0;
  324. s->sprite_delta[1][0] = 0;
  325. s->sprite_delta[1][1] = a;
  326. ctx->sprite_shift[0] = 0;
  327. ctx->sprite_shift[1] = 0;
  328. s->real_sprite_warping_points = 1;
  329. } else {
  330. int shift_y = 16 - ctx->sprite_shift[0];
  331. int shift_c = 16 - ctx->sprite_shift[1];
  332. for (i = 0; i < 2; i++) {
  333. s->sprite_offset[0][i] <<= shift_y;
  334. s->sprite_offset[1][i] <<= shift_c;
  335. s->sprite_delta[0][i] <<= shift_y;
  336. s->sprite_delta[1][i] <<= shift_y;
  337. ctx->sprite_shift[i] = 16;
  338. }
  339. s->real_sprite_warping_points = ctx->num_sprite_warping_points;
  340. }
  341. return 0;
  342. }
  343. static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
  344. int len = FFMIN(ctx->time_increment_bits + 3, 15);
  345. get_bits(gb, len);
  346. if (get_bits1(gb))
  347. get_bits(gb, len);
  348. check_marker(gb, "after new_pred");
  349. return 0;
  350. }
  351. /**
  352. * Decode the next video packet.
  353. * @return <0 if something went wrong
  354. */
  355. int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
  356. {
  357. MpegEncContext *s = &ctx->m;
  358. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  359. int header_extension = 0, mb_num, len;
  360. /* is there enough space left for a video packet + header */
  361. if (get_bits_count(&s->gb) > s->gb.size_in_bits - 20)
  362. return -1;
  363. for (len = 0; len < 32; len++)
  364. if (get_bits1(&s->gb))
  365. break;
  366. if (len != ff_mpeg4_get_video_packet_prefix_length(s)) {
  367. av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
  368. return -1;
  369. }
  370. if (ctx->shape != RECT_SHAPE) {
  371. header_extension = get_bits1(&s->gb);
  372. // FIXME more stuff here
  373. }
  374. mb_num = get_bits(&s->gb, mb_num_bits);
  375. if (mb_num >= s->mb_num) {
  376. av_log(s->avctx, AV_LOG_ERROR,
  377. "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
  378. return -1;
  379. }
  380. s->mb_x = mb_num % s->mb_width;
  381. s->mb_y = mb_num / s->mb_width;
  382. if (ctx->shape != BIN_ONLY_SHAPE) {
  383. int qscale = get_bits(&s->gb, s->quant_precision);
  384. if (qscale)
  385. s->chroma_qscale = s->qscale = qscale;
  386. }
  387. if (ctx->shape == RECT_SHAPE)
  388. header_extension = get_bits1(&s->gb);
  389. if (header_extension) {
  390. int time_incr = 0;
  391. while (get_bits1(&s->gb) != 0)
  392. time_incr++;
  393. check_marker(&s->gb, "before time_increment in video packed header");
  394. skip_bits(&s->gb, ctx->time_increment_bits); /* time_increment */
  395. check_marker(&s->gb, "before vop_coding_type in video packed header");
  396. skip_bits(&s->gb, 2); /* vop coding type */
  397. // FIXME not rect stuff here
  398. if (ctx->shape != BIN_ONLY_SHAPE) {
  399. skip_bits(&s->gb, 3); /* intra dc vlc threshold */
  400. // FIXME don't just ignore everything
  401. if (s->pict_type == AV_PICTURE_TYPE_S &&
  402. ctx->vol_sprite_usage == GMC_SPRITE) {
  403. if (mpeg4_decode_sprite_trajectory(ctx, &s->gb) < 0)
  404. return AVERROR_INVALIDDATA;
  405. av_log(s->avctx, AV_LOG_ERROR, "untested\n");
  406. }
  407. // FIXME reduced res stuff here
  408. if (s->pict_type != AV_PICTURE_TYPE_I) {
  409. int f_code = get_bits(&s->gb, 3); /* fcode_for */
  410. if (f_code == 0)
  411. av_log(s->avctx, AV_LOG_ERROR,
  412. "Error, video packet header damaged (f_code=0)\n");
  413. }
  414. if (s->pict_type == AV_PICTURE_TYPE_B) {
  415. int b_code = get_bits(&s->gb, 3);
  416. if (b_code == 0)
  417. av_log(s->avctx, AV_LOG_ERROR,
  418. "Error, video packet header damaged (b_code=0)\n");
  419. }
  420. }
  421. }
  422. if (ctx->new_pred)
  423. decode_new_pred(ctx, &s->gb);
  424. return 0;
  425. }
  426. /**
  427. * Get the average motion vector for a GMC MB.
  428. * @param n either 0 for the x component or 1 for y
  429. * @return the average MV for a GMC MB
  430. */
  431. static inline int get_amv(Mpeg4DecContext *ctx, int n)
  432. {
  433. MpegEncContext *s = &ctx->m;
  434. int x, y, mb_v, sum, dx, dy, shift;
  435. int len = 1 << (s->f_code + 4);
  436. const int a = s->sprite_warping_accuracy;
  437. if (s->workaround_bugs & FF_BUG_AMV)
  438. len >>= s->quarter_sample;
  439. if (s->real_sprite_warping_points == 1) {
  440. if (ctx->divx_version == 500 && ctx->divx_build == 413)
  441. sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample));
  442. else
  443. sum = RSHIFT(s->sprite_offset[0][n] << s->quarter_sample, a);
  444. } else {
  445. dx = s->sprite_delta[n][0];
  446. dy = s->sprite_delta[n][1];
  447. shift = ctx->sprite_shift[0];
  448. if (n)
  449. dy -= 1 << (shift + a + 1);
  450. else
  451. dx -= 1 << (shift + a + 1);
  452. mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16;
  453. sum = 0;
  454. for (y = 0; y < 16; y++) {
  455. int v;
  456. v = mb_v + dy * y;
  457. // FIXME optimize
  458. for (x = 0; x < 16; x++) {
  459. sum += v >> shift;
  460. v += dx;
  461. }
  462. }
  463. sum = RSHIFT(sum, a + 8 - s->quarter_sample);
  464. }
  465. if (sum < -len)
  466. sum = -len;
  467. else if (sum >= len)
  468. sum = len - 1;
  469. return sum;
  470. }
  471. /**
  472. * Decode the dc value.
  473. * @param n block index (0-3 are luma, 4-5 are chroma)
  474. * @param dir_ptr the prediction direction will be stored here
  475. * @return the quantized dc
  476. */
  477. static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
  478. {
  479. int level, code;
  480. if (n < 4)
  481. code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1);
  482. else
  483. code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1);
  484. if (code < 0 || code > 9 /* && s->nbit < 9 */) {
  485. av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
  486. return -1;
  487. }
  488. if (code == 0) {
  489. level = 0;
  490. } else {
  491. if (IS_3IV1) {
  492. if (code == 1)
  493. level = 2 * get_bits1(&s->gb) - 1;
  494. else {
  495. if (get_bits1(&s->gb))
  496. level = get_bits(&s->gb, code - 1) + (1 << (code - 1));
  497. else
  498. level = -get_bits(&s->gb, code - 1) - (1 << (code - 1));
  499. }
  500. } else {
  501. level = get_xbits(&s->gb, code);
  502. }
  503. if (code > 8) {
  504. if (get_bits1(&s->gb) == 0) { /* marker */
  505. if (s->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)) {
  506. av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
  507. return -1;
  508. }
  509. }
  510. }
  511. }
  512. return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0);
  513. }
  514. /**
  515. * Decode first partition.
  516. * @return number of MBs decoded or <0 if an error occurred
  517. */
  518. static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
  519. {
  520. MpegEncContext *s = &ctx->m;
  521. int mb_num = 0;
  522. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  523. /* decode first partition */
  524. s->first_slice_line = 1;
  525. for (; s->mb_y < s->mb_height; s->mb_y++) {
  526. ff_init_block_index(s);
  527. for (; s->mb_x < s->mb_width; s->mb_x++) {
  528. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  529. int cbpc;
  530. int dir = 0;
  531. mb_num++;
  532. ff_update_block_index(s);
  533. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  534. s->first_slice_line = 0;
  535. if (s->pict_type == AV_PICTURE_TYPE_I) {
  536. int i;
  537. do {
  538. if (show_bits_long(&s->gb, 19) == DC_MARKER)
  539. return mb_num - 1;
  540. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  541. if (cbpc < 0) {
  542. av_log(s->avctx, AV_LOG_ERROR,
  543. "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  544. return -1;
  545. }
  546. } while (cbpc == 8);
  547. s->cbp_table[xy] = cbpc & 3;
  548. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  549. s->mb_intra = 1;
  550. if (cbpc & 4)
  551. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  552. s->current_picture.qscale_table[xy] = s->qscale;
  553. s->mbintra_table[xy] = 1;
  554. for (i = 0; i < 6; i++) {
  555. int dc_pred_dir;
  556. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  557. if (dc < 0) {
  558. av_log(s->avctx, AV_LOG_ERROR,
  559. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  560. return -1;
  561. }
  562. dir <<= 1;
  563. if (dc_pred_dir)
  564. dir |= 1;
  565. }
  566. s->pred_dir_table[xy] = dir;
  567. } else { /* P/S_TYPE */
  568. int mx, my, pred_x, pred_y, bits;
  569. int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
  570. const int stride = s->b8_stride * 2;
  571. try_again:
  572. bits = show_bits(&s->gb, 17);
  573. if (bits == MOTION_MARKER)
  574. return mb_num - 1;
  575. skip_bits1(&s->gb);
  576. if (bits & 0x10000) {
  577. /* skip mb */
  578. if (s->pict_type == AV_PICTURE_TYPE_S &&
  579. ctx->vol_sprite_usage == GMC_SPRITE) {
  580. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  581. MB_TYPE_16x16 |
  582. MB_TYPE_GMC |
  583. MB_TYPE_L0;
  584. mx = get_amv(ctx, 0);
  585. my = get_amv(ctx, 1);
  586. } else {
  587. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  588. MB_TYPE_16x16 |
  589. MB_TYPE_L0;
  590. mx = my = 0;
  591. }
  592. mot_val[0] =
  593. mot_val[2] =
  594. mot_val[0 + stride] =
  595. mot_val[2 + stride] = mx;
  596. mot_val[1] =
  597. mot_val[3] =
  598. mot_val[1 + stride] =
  599. mot_val[3 + stride] = my;
  600. if (s->mbintra_table[xy])
  601. ff_clean_intra_table_entries(s);
  602. continue;
  603. }
  604. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  605. if (cbpc < 0) {
  606. av_log(s->avctx, AV_LOG_ERROR,
  607. "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  608. return -1;
  609. }
  610. if (cbpc == 20)
  611. goto try_again;
  612. s->cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant
  613. s->mb_intra = ((cbpc & 4) != 0);
  614. if (s->mb_intra) {
  615. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  616. s->mbintra_table[xy] = 1;
  617. mot_val[0] =
  618. mot_val[2] =
  619. mot_val[0 + stride] =
  620. mot_val[2 + stride] = 0;
  621. mot_val[1] =
  622. mot_val[3] =
  623. mot_val[1 + stride] =
  624. mot_val[3 + stride] = 0;
  625. } else {
  626. if (s->mbintra_table[xy])
  627. ff_clean_intra_table_entries(s);
  628. if (s->pict_type == AV_PICTURE_TYPE_S &&
  629. ctx->vol_sprite_usage == GMC_SPRITE &&
  630. (cbpc & 16) == 0)
  631. s->mcsel = get_bits1(&s->gb);
  632. else
  633. s->mcsel = 0;
  634. if ((cbpc & 16) == 0) {
  635. /* 16x16 motion prediction */
  636. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  637. if (!s->mcsel) {
  638. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  639. if (mx >= 0xffff)
  640. return -1;
  641. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  642. if (my >= 0xffff)
  643. return -1;
  644. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  645. MB_TYPE_L0;
  646. } else {
  647. mx = get_amv(ctx, 0);
  648. my = get_amv(ctx, 1);
  649. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  650. MB_TYPE_GMC |
  651. MB_TYPE_L0;
  652. }
  653. mot_val[0] =
  654. mot_val[2] =
  655. mot_val[0 + stride] =
  656. mot_val[2 + stride] = mx;
  657. mot_val[1] =
  658. mot_val[3] =
  659. mot_val[1 + stride] =
  660. mot_val[3 + stride] = my;
  661. } else {
  662. int i;
  663. s->current_picture.mb_type[xy] = MB_TYPE_8x8 |
  664. MB_TYPE_L0;
  665. for (i = 0; i < 4; i++) {
  666. int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  667. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  668. if (mx >= 0xffff)
  669. return -1;
  670. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  671. if (my >= 0xffff)
  672. return -1;
  673. mot_val[0] = mx;
  674. mot_val[1] = my;
  675. }
  676. }
  677. }
  678. }
  679. }
  680. s->mb_x = 0;
  681. }
  682. return mb_num;
  683. }
  684. /**
  685. * decode second partition.
  686. * @return <0 if an error occurred
  687. */
  688. static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
  689. {
  690. int mb_num = 0;
  691. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  692. s->mb_x = s->resync_mb_x;
  693. s->first_slice_line = 1;
  694. for (s->mb_y = s->resync_mb_y; mb_num < mb_count; s->mb_y++) {
  695. ff_init_block_index(s);
  696. for (; mb_num < mb_count && s->mb_x < s->mb_width; s->mb_x++) {
  697. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  698. mb_num++;
  699. ff_update_block_index(s);
  700. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  701. s->first_slice_line = 0;
  702. if (s->pict_type == AV_PICTURE_TYPE_I) {
  703. int ac_pred = get_bits1(&s->gb);
  704. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  705. if (cbpy < 0) {
  706. av_log(s->avctx, AV_LOG_ERROR,
  707. "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  708. return -1;
  709. }
  710. s->cbp_table[xy] |= cbpy << 2;
  711. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  712. } else { /* P || S_TYPE */
  713. if (IS_INTRA(s->current_picture.mb_type[xy])) {
  714. int i;
  715. int dir = 0;
  716. int ac_pred = get_bits1(&s->gb);
  717. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  718. if (cbpy < 0) {
  719. av_log(s->avctx, AV_LOG_ERROR,
  720. "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  721. return -1;
  722. }
  723. if (s->cbp_table[xy] & 8)
  724. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  725. s->current_picture.qscale_table[xy] = s->qscale;
  726. for (i = 0; i < 6; i++) {
  727. int dc_pred_dir;
  728. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  729. if (dc < 0) {
  730. av_log(s->avctx, AV_LOG_ERROR,
  731. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  732. return -1;
  733. }
  734. dir <<= 1;
  735. if (dc_pred_dir)
  736. dir |= 1;
  737. }
  738. s->cbp_table[xy] &= 3; // remove dquant
  739. s->cbp_table[xy] |= cbpy << 2;
  740. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  741. s->pred_dir_table[xy] = dir;
  742. } else if (IS_SKIP(s->current_picture.mb_type[xy])) {
  743. s->current_picture.qscale_table[xy] = s->qscale;
  744. s->cbp_table[xy] = 0;
  745. } else {
  746. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  747. if (cbpy < 0) {
  748. av_log(s->avctx, AV_LOG_ERROR,
  749. "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  750. return -1;
  751. }
  752. if (s->cbp_table[xy] & 8)
  753. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  754. s->current_picture.qscale_table[xy] = s->qscale;
  755. s->cbp_table[xy] &= 3; // remove dquant
  756. s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
  757. }
  758. }
  759. }
  760. if (mb_num >= mb_count)
  761. return 0;
  762. s->mb_x = 0;
  763. }
  764. return 0;
  765. }
  766. /**
  767. * Decode the first and second partition.
  768. * @return <0 if error (and sets error type in the error_status_table)
  769. */
  770. int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
  771. {
  772. MpegEncContext *s = &ctx->m;
  773. int mb_num;
  774. const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
  775. const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
  776. mb_num = mpeg4_decode_partition_a(ctx);
  777. if (mb_num < 0) {
  778. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  779. s->mb_x, s->mb_y, part_a_error);
  780. return -1;
  781. }
  782. if (s->resync_mb_x + s->resync_mb_y * s->mb_width + mb_num > s->mb_num) {
  783. av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n");
  784. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  785. s->mb_x, s->mb_y, part_a_error);
  786. return -1;
  787. }
  788. s->mb_num_left = mb_num;
  789. if (s->pict_type == AV_PICTURE_TYPE_I) {
  790. while (show_bits(&s->gb, 9) == 1)
  791. skip_bits(&s->gb, 9);
  792. if (get_bits_long(&s->gb, 19) != DC_MARKER) {
  793. av_log(s->avctx, AV_LOG_ERROR,
  794. "marker missing after first I partition at %d %d\n",
  795. s->mb_x, s->mb_y);
  796. return -1;
  797. }
  798. } else {
  799. while (show_bits(&s->gb, 10) == 1)
  800. skip_bits(&s->gb, 10);
  801. if (get_bits(&s->gb, 17) != MOTION_MARKER) {
  802. av_log(s->avctx, AV_LOG_ERROR,
  803. "marker missing after first P partition at %d %d\n",
  804. s->mb_x, s->mb_y);
  805. return -1;
  806. }
  807. }
  808. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  809. s->mb_x - 1, s->mb_y, part_a_end);
  810. if (mpeg4_decode_partition_b(s, mb_num) < 0) {
  811. if (s->pict_type == AV_PICTURE_TYPE_P)
  812. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  813. s->mb_x, s->mb_y, ER_DC_ERROR);
  814. return -1;
  815. } else {
  816. if (s->pict_type == AV_PICTURE_TYPE_P)
  817. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  818. s->mb_x - 1, s->mb_y, ER_DC_END);
  819. }
  820. return 0;
  821. }
  822. /**
  823. * Decode a block.
  824. * @return <0 if an error occurred
  825. */
  826. static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
  827. int n, int coded, int intra, int rvlc)
  828. {
  829. MpegEncContext *s = &ctx->m;
  830. int level, i, last, run, qmul, qadd;
  831. int av_uninit(dc_pred_dir);
  832. RLTable *rl;
  833. RL_VLC_ELEM *rl_vlc;
  834. const uint8_t *scan_table;
  835. // Note intra & rvlc should be optimized away if this is inlined
  836. if (intra) {
  837. if (ctx->use_intra_dc_vlc) {
  838. /* DC coef */
  839. if (s->partitioned_frame) {
  840. level = s->dc_val[0][s->block_index[n]];
  841. if (n < 4)
  842. level = FASTDIV((level + (s->y_dc_scale >> 1)), s->y_dc_scale);
  843. else
  844. level = FASTDIV((level + (s->c_dc_scale >> 1)), s->c_dc_scale);
  845. dc_pred_dir = (s->pred_dir_table[s->mb_x + s->mb_y * s->mb_stride] << n) & 32;
  846. } else {
  847. level = mpeg4_decode_dc(s, n, &dc_pred_dir);
  848. if (level < 0)
  849. return -1;
  850. }
  851. block[0] = level;
  852. i = 0;
  853. } else {
  854. i = -1;
  855. ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0);
  856. }
  857. if (!coded)
  858. goto not_coded;
  859. if (rvlc) {
  860. rl = &ff_rvlc_rl_intra;
  861. rl_vlc = ff_rvlc_rl_intra.rl_vlc[0];
  862. } else {
  863. rl = &ff_mpeg4_rl_intra;
  864. rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
  865. }
  866. if (s->ac_pred) {
  867. if (dc_pred_dir == 0)
  868. scan_table = s->intra_v_scantable.permutated; /* left */
  869. else
  870. scan_table = s->intra_h_scantable.permutated; /* top */
  871. } else {
  872. scan_table = s->intra_scantable.permutated;
  873. }
  874. qmul = 1;
  875. qadd = 0;
  876. } else {
  877. i = -1;
  878. if (!coded) {
  879. s->block_last_index[n] = i;
  880. return 0;
  881. }
  882. if (rvlc)
  883. rl = &ff_rvlc_rl_inter;
  884. else
  885. rl = &ff_h263_rl_inter;
  886. scan_table = s->intra_scantable.permutated;
  887. if (s->mpeg_quant) {
  888. qmul = 1;
  889. qadd = 0;
  890. if (rvlc)
  891. rl_vlc = ff_rvlc_rl_inter.rl_vlc[0];
  892. else
  893. rl_vlc = ff_h263_rl_inter.rl_vlc[0];
  894. } else {
  895. qmul = s->qscale << 1;
  896. qadd = (s->qscale - 1) | 1;
  897. if (rvlc)
  898. rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale];
  899. else
  900. rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale];
  901. }
  902. }
  903. {
  904. OPEN_READER(re, &s->gb);
  905. for (;;) {
  906. UPDATE_CACHE(re, &s->gb);
  907. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
  908. if (level == 0) {
  909. /* escape */
  910. if (rvlc) {
  911. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  912. av_log(s->avctx, AV_LOG_ERROR,
  913. "1. marker bit missing in rvlc esc\n");
  914. return -1;
  915. }
  916. SKIP_CACHE(re, &s->gb, 1);
  917. last = SHOW_UBITS(re, &s->gb, 1);
  918. SKIP_CACHE(re, &s->gb, 1);
  919. run = SHOW_UBITS(re, &s->gb, 6);
  920. SKIP_COUNTER(re, &s->gb, 1 + 1 + 6);
  921. UPDATE_CACHE(re, &s->gb);
  922. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  923. av_log(s->avctx, AV_LOG_ERROR,
  924. "2. marker bit missing in rvlc esc\n");
  925. return -1;
  926. }
  927. SKIP_CACHE(re, &s->gb, 1);
  928. level = SHOW_UBITS(re, &s->gb, 11);
  929. SKIP_CACHE(re, &s->gb, 11);
  930. if (SHOW_UBITS(re, &s->gb, 5) != 0x10) {
  931. av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n");
  932. return -1;
  933. }
  934. SKIP_CACHE(re, &s->gb, 5);
  935. level = level * qmul + qadd;
  936. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  937. SKIP_COUNTER(re, &s->gb, 1 + 11 + 5 + 1);
  938. i += run + 1;
  939. if (last)
  940. i += 192;
  941. } else {
  942. int cache;
  943. cache = GET_CACHE(re, &s->gb);
  944. if (IS_3IV1)
  945. cache ^= 0xC0000000;
  946. if (cache & 0x80000000) {
  947. if (cache & 0x40000000) {
  948. /* third escape */
  949. SKIP_CACHE(re, &s->gb, 2);
  950. last = SHOW_UBITS(re, &s->gb, 1);
  951. SKIP_CACHE(re, &s->gb, 1);
  952. run = SHOW_UBITS(re, &s->gb, 6);
  953. SKIP_COUNTER(re, &s->gb, 2 + 1 + 6);
  954. UPDATE_CACHE(re, &s->gb);
  955. if (IS_3IV1) {
  956. level = SHOW_SBITS(re, &s->gb, 12);
  957. LAST_SKIP_BITS(re, &s->gb, 12);
  958. } else {
  959. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  960. av_log(s->avctx, AV_LOG_ERROR,
  961. "1. marker bit missing in 3. esc\n");
  962. if (!(s->err_recognition & AV_EF_IGNORE_ERR))
  963. return -1;
  964. }
  965. SKIP_CACHE(re, &s->gb, 1);
  966. level = SHOW_SBITS(re, &s->gb, 12);
  967. SKIP_CACHE(re, &s->gb, 12);
  968. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  969. av_log(s->avctx, AV_LOG_ERROR,
  970. "2. marker bit missing in 3. esc\n");
  971. if (!(s->err_recognition & AV_EF_IGNORE_ERR))
  972. return -1;
  973. }
  974. SKIP_COUNTER(re, &s->gb, 1 + 12 + 1);
  975. }
  976. #if 0
  977. if (s->error_recognition >= FF_ER_COMPLIANT) {
  978. const int abs_level= FFABS(level);
  979. if (abs_level<=MAX_LEVEL && run<=MAX_RUN) {
  980. const int run1= run - rl->max_run[last][abs_level] - 1;
  981. if (abs_level <= rl->max_level[last][run]) {
  982. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n");
  983. return -1;
  984. }
  985. if (s->error_recognition > FF_ER_COMPLIANT) {
  986. if (abs_level <= rl->max_level[last][run]*2) {
  987. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n");
  988. return -1;
  989. }
  990. if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) {
  991. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n");
  992. return -1;
  993. }
  994. }
  995. }
  996. }
  997. #endif
  998. if (level > 0)
  999. level = level * qmul + qadd;
  1000. else
  1001. level = level * qmul - qadd;
  1002. if ((unsigned)(level + 2048) > 4095) {
  1003. if (s->err_recognition & (AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)) {
  1004. if (level > 2560 || level < -2560) {
  1005. av_log(s->avctx, AV_LOG_ERROR,
  1006. "|level| overflow in 3. esc, qp=%d\n",
  1007. s->qscale);
  1008. return -1;
  1009. }
  1010. }
  1011. level = level < 0 ? -2048 : 2047;
  1012. }
  1013. i += run + 1;
  1014. if (last)
  1015. i += 192;
  1016. } else {
  1017. /* second escape */
  1018. SKIP_BITS(re, &s->gb, 2);
  1019. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1020. i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing
  1021. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1022. LAST_SKIP_BITS(re, &s->gb, 1);
  1023. }
  1024. } else {
  1025. /* first escape */
  1026. SKIP_BITS(re, &s->gb, 1);
  1027. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1028. i += run;
  1029. level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing
  1030. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1031. LAST_SKIP_BITS(re, &s->gb, 1);
  1032. }
  1033. }
  1034. } else {
  1035. i += run;
  1036. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1037. LAST_SKIP_BITS(re, &s->gb, 1);
  1038. }
  1039. tprintf(s->avctx, "dct[%d][%d] = %- 4d end?:%d\n", scan_table[i&63]&7, scan_table[i&63] >> 3, level, i>62);
  1040. if (i > 62) {
  1041. i -= 192;
  1042. if (i & (~63)) {
  1043. av_log(s->avctx, AV_LOG_ERROR,
  1044. "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1045. return -1;
  1046. }
  1047. block[scan_table[i]] = level;
  1048. break;
  1049. }
  1050. block[scan_table[i]] = level;
  1051. }
  1052. CLOSE_READER(re, &s->gb);
  1053. }
  1054. not_coded:
  1055. if (intra) {
  1056. if (!ctx->use_intra_dc_vlc) {
  1057. block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0);
  1058. i -= i >> 31; // if (i == -1) i = 0;
  1059. }
  1060. ff_mpeg4_pred_ac(s, block, n, dc_pred_dir);
  1061. if (s->ac_pred)
  1062. i = 63; // FIXME not optimal
  1063. }
  1064. s->block_last_index[n] = i;
  1065. return 0;
  1066. }
  1067. /**
  1068. * decode partition C of one MB.
  1069. * @return <0 if an error occurred
  1070. */
  1071. static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
  1072. {
  1073. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1074. int cbp, mb_type;
  1075. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1076. mb_type = s->current_picture.mb_type[xy];
  1077. cbp = s->cbp_table[xy];
  1078. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1079. if (s->current_picture.qscale_table[xy] != s->qscale)
  1080. ff_set_qscale(s, s->current_picture.qscale_table[xy]);
  1081. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1082. s->pict_type == AV_PICTURE_TYPE_S) {
  1083. int i;
  1084. for (i = 0; i < 4; i++) {
  1085. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
  1086. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
  1087. }
  1088. s->mb_intra = IS_INTRA(mb_type);
  1089. if (IS_SKIP(mb_type)) {
  1090. /* skip mb */
  1091. for (i = 0; i < 6; i++)
  1092. s->block_last_index[i] = -1;
  1093. s->mv_dir = MV_DIR_FORWARD;
  1094. s->mv_type = MV_TYPE_16X16;
  1095. if (s->pict_type == AV_PICTURE_TYPE_S
  1096. && ctx->vol_sprite_usage == GMC_SPRITE) {
  1097. s->mcsel = 1;
  1098. s->mb_skipped = 0;
  1099. } else {
  1100. s->mcsel = 0;
  1101. s->mb_skipped = 1;
  1102. }
  1103. } else if (s->mb_intra) {
  1104. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1105. } else if (!s->mb_intra) {
  1106. // s->mcsel = 0; // FIXME do we need to init that?
  1107. s->mv_dir = MV_DIR_FORWARD;
  1108. if (IS_8X8(mb_type)) {
  1109. s->mv_type = MV_TYPE_8X8;
  1110. } else {
  1111. s->mv_type = MV_TYPE_16X16;
  1112. }
  1113. }
  1114. } else { /* I-Frame */
  1115. s->mb_intra = 1;
  1116. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1117. }
  1118. if (!IS_SKIP(mb_type)) {
  1119. int i;
  1120. s->bdsp.clear_blocks(s->block[0]);
  1121. /* decode each block */
  1122. for (i = 0; i < 6; i++) {
  1123. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, s->mb_intra, ctx->rvlc) < 0) {
  1124. av_log(s->avctx, AV_LOG_ERROR,
  1125. "texture corrupted at %d %d %d\n",
  1126. s->mb_x, s->mb_y, s->mb_intra);
  1127. return -1;
  1128. }
  1129. cbp += cbp;
  1130. }
  1131. }
  1132. /* per-MB end of slice check */
  1133. if (--s->mb_num_left <= 0) {
  1134. if (mpeg4_is_resync(ctx))
  1135. return SLICE_END;
  1136. else
  1137. return SLICE_NOEND;
  1138. } else {
  1139. if (mpeg4_is_resync(ctx)) {
  1140. const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
  1141. if (s->cbp_table[xy + delta])
  1142. return SLICE_END;
  1143. }
  1144. return SLICE_OK;
  1145. }
  1146. }
  1147. static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
  1148. {
  1149. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1150. int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
  1151. int16_t *mot_val;
  1152. static int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1153. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1154. av_assert2(s->h263_pred);
  1155. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1156. s->pict_type == AV_PICTURE_TYPE_S) {
  1157. do {
  1158. if (get_bits1(&s->gb)) {
  1159. /* skip mb */
  1160. s->mb_intra = 0;
  1161. for (i = 0; i < 6; i++)
  1162. s->block_last_index[i] = -1;
  1163. s->mv_dir = MV_DIR_FORWARD;
  1164. s->mv_type = MV_TYPE_16X16;
  1165. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1166. ctx->vol_sprite_usage == GMC_SPRITE) {
  1167. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1168. MB_TYPE_GMC |
  1169. MB_TYPE_16x16 |
  1170. MB_TYPE_L0;
  1171. s->mcsel = 1;
  1172. s->mv[0][0][0] = get_amv(ctx, 0);
  1173. s->mv[0][0][1] = get_amv(ctx, 1);
  1174. s->mb_skipped = 0;
  1175. } else {
  1176. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1177. MB_TYPE_16x16 |
  1178. MB_TYPE_L0;
  1179. s->mcsel = 0;
  1180. s->mv[0][0][0] = 0;
  1181. s->mv[0][0][1] = 0;
  1182. s->mb_skipped = 1;
  1183. }
  1184. goto end;
  1185. }
  1186. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  1187. if (cbpc < 0) {
  1188. av_log(s->avctx, AV_LOG_ERROR,
  1189. "mcbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1190. return -1;
  1191. }
  1192. } while (cbpc == 20);
  1193. s->bdsp.clear_blocks(s->block[0]);
  1194. dquant = cbpc & 8;
  1195. s->mb_intra = ((cbpc & 4) != 0);
  1196. if (s->mb_intra)
  1197. goto intra;
  1198. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1199. ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0)
  1200. s->mcsel = get_bits1(&s->gb);
  1201. else
  1202. s->mcsel = 0;
  1203. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
  1204. cbp = (cbpc & 3) | (cbpy << 2);
  1205. if (dquant)
  1206. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1207. if ((!s->progressive_sequence) &&
  1208. (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE)))
  1209. s->interlaced_dct = get_bits1(&s->gb);
  1210. s->mv_dir = MV_DIR_FORWARD;
  1211. if ((cbpc & 16) == 0) {
  1212. if (s->mcsel) {
  1213. s->current_picture.mb_type[xy] = MB_TYPE_GMC |
  1214. MB_TYPE_16x16 |
  1215. MB_TYPE_L0;
  1216. /* 16x16 global motion prediction */
  1217. s->mv_type = MV_TYPE_16X16;
  1218. mx = get_amv(ctx, 0);
  1219. my = get_amv(ctx, 1);
  1220. s->mv[0][0][0] = mx;
  1221. s->mv[0][0][1] = my;
  1222. } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
  1223. s->current_picture.mb_type[xy] = MB_TYPE_16x8 |
  1224. MB_TYPE_L0 |
  1225. MB_TYPE_INTERLACED;
  1226. /* 16x8 field motion prediction */
  1227. s->mv_type = MV_TYPE_FIELD;
  1228. s->field_select[0][0] = get_bits1(&s->gb);
  1229. s->field_select[0][1] = get_bits1(&s->gb);
  1230. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1231. for (i = 0; i < 2; i++) {
  1232. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1233. if (mx >= 0xffff)
  1234. return -1;
  1235. my = ff_h263_decode_motion(s, pred_y / 2, s->f_code);
  1236. if (my >= 0xffff)
  1237. return -1;
  1238. s->mv[0][i][0] = mx;
  1239. s->mv[0][i][1] = my;
  1240. }
  1241. } else {
  1242. s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  1243. /* 16x16 motion prediction */
  1244. s->mv_type = MV_TYPE_16X16;
  1245. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1246. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1247. if (mx >= 0xffff)
  1248. return -1;
  1249. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1250. if (my >= 0xffff)
  1251. return -1;
  1252. s->mv[0][0][0] = mx;
  1253. s->mv[0][0][1] = my;
  1254. }
  1255. } else {
  1256. s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
  1257. s->mv_type = MV_TYPE_8X8;
  1258. for (i = 0; i < 4; i++) {
  1259. mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  1260. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1261. if (mx >= 0xffff)
  1262. return -1;
  1263. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1264. if (my >= 0xffff)
  1265. return -1;
  1266. s->mv[0][i][0] = mx;
  1267. s->mv[0][i][1] = my;
  1268. mot_val[0] = mx;
  1269. mot_val[1] = my;
  1270. }
  1271. }
  1272. } else if (s->pict_type == AV_PICTURE_TYPE_B) {
  1273. int modb1; // first bit of modb
  1274. int modb2; // second bit of modb
  1275. int mb_type;
  1276. s->mb_intra = 0; // B-frames never contain intra blocks
  1277. s->mcsel = 0; // ... true gmc blocks
  1278. if (s->mb_x == 0) {
  1279. for (i = 0; i < 2; i++) {
  1280. s->last_mv[i][0][0] =
  1281. s->last_mv[i][0][1] =
  1282. s->last_mv[i][1][0] =
  1283. s->last_mv[i][1][1] = 0;
  1284. }
  1285. ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
  1286. }
  1287. /* if we skipped it in the future P Frame than skip it now too */
  1288. s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
  1289. if (s->mb_skipped) {
  1290. /* skip mb */
  1291. for (i = 0; i < 6; i++)
  1292. s->block_last_index[i] = -1;
  1293. s->mv_dir = MV_DIR_FORWARD;
  1294. s->mv_type = MV_TYPE_16X16;
  1295. s->mv[0][0][0] =
  1296. s->mv[0][0][1] =
  1297. s->mv[1][0][0] =
  1298. s->mv[1][0][1] = 0;
  1299. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1300. MB_TYPE_16x16 |
  1301. MB_TYPE_L0;
  1302. goto end;
  1303. }
  1304. modb1 = get_bits1(&s->gb);
  1305. if (modb1) {
  1306. // like MB_TYPE_B_DIRECT but no vectors coded
  1307. mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1;
  1308. cbp = 0;
  1309. } else {
  1310. modb2 = get_bits1(&s->gb);
  1311. mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1);
  1312. if (mb_type < 0) {
  1313. av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n");
  1314. return -1;
  1315. }
  1316. mb_type = mb_type_b_map[mb_type];
  1317. if (modb2) {
  1318. cbp = 0;
  1319. } else {
  1320. s->bdsp.clear_blocks(s->block[0]);
  1321. cbp = get_bits(&s->gb, 6);
  1322. }
  1323. if ((!IS_DIRECT(mb_type)) && cbp) {
  1324. if (get_bits1(&s->gb))
  1325. ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2);
  1326. }
  1327. if (!s->progressive_sequence) {
  1328. if (cbp)
  1329. s->interlaced_dct = get_bits1(&s->gb);
  1330. if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) {
  1331. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1332. mb_type &= ~MB_TYPE_16x16;
  1333. if (USES_LIST(mb_type, 0)) {
  1334. s->field_select[0][0] = get_bits1(&s->gb);
  1335. s->field_select[0][1] = get_bits1(&s->gb);
  1336. }
  1337. if (USES_LIST(mb_type, 1)) {
  1338. s->field_select[1][0] = get_bits1(&s->gb);
  1339. s->field_select[1][1] = get_bits1(&s->gb);
  1340. }
  1341. }
  1342. }
  1343. s->mv_dir = 0;
  1344. if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) {
  1345. s->mv_type = MV_TYPE_16X16;
  1346. if (USES_LIST(mb_type, 0)) {
  1347. s->mv_dir = MV_DIR_FORWARD;
  1348. mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
  1349. my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
  1350. s->last_mv[0][1][0] =
  1351. s->last_mv[0][0][0] =
  1352. s->mv[0][0][0] = mx;
  1353. s->last_mv[0][1][1] =
  1354. s->last_mv[0][0][1] =
  1355. s->mv[0][0][1] = my;
  1356. }
  1357. if (USES_LIST(mb_type, 1)) {
  1358. s->mv_dir |= MV_DIR_BACKWARD;
  1359. mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
  1360. my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
  1361. s->last_mv[1][1][0] =
  1362. s->last_mv[1][0][0] =
  1363. s->mv[1][0][0] = mx;
  1364. s->last_mv[1][1][1] =
  1365. s->last_mv[1][0][1] =
  1366. s->mv[1][0][1] = my;
  1367. }
  1368. } else if (!IS_DIRECT(mb_type)) {
  1369. s->mv_type = MV_TYPE_FIELD;
  1370. if (USES_LIST(mb_type, 0)) {
  1371. s->mv_dir = MV_DIR_FORWARD;
  1372. for (i = 0; i < 2; i++) {
  1373. mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code);
  1374. my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code);
  1375. s->last_mv[0][i][0] =
  1376. s->mv[0][i][0] = mx;
  1377. s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2;
  1378. }
  1379. }
  1380. if (USES_LIST(mb_type, 1)) {
  1381. s->mv_dir |= MV_DIR_BACKWARD;
  1382. for (i = 0; i < 2; i++) {
  1383. mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code);
  1384. my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code);
  1385. s->last_mv[1][i][0] =
  1386. s->mv[1][i][0] = mx;
  1387. s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2;
  1388. }
  1389. }
  1390. }
  1391. }
  1392. if (IS_DIRECT(mb_type)) {
  1393. if (IS_SKIP(mb_type)) {
  1394. mx =
  1395. my = 0;
  1396. } else {
  1397. mx = ff_h263_decode_motion(s, 0, 1);
  1398. my = ff_h263_decode_motion(s, 0, 1);
  1399. }
  1400. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
  1401. mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
  1402. }
  1403. s->current_picture.mb_type[xy] = mb_type;
  1404. } else { /* I-Frame */
  1405. do {
  1406. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  1407. if (cbpc < 0) {
  1408. av_log(s->avctx, AV_LOG_ERROR,
  1409. "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1410. return -1;
  1411. }
  1412. } while (cbpc == 8);
  1413. dquant = cbpc & 4;
  1414. s->mb_intra = 1;
  1415. intra:
  1416. s->ac_pred = get_bits1(&s->gb);
  1417. if (s->ac_pred)
  1418. s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
  1419. else
  1420. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  1421. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1422. if (cbpy < 0) {
  1423. av_log(s->avctx, AV_LOG_ERROR,
  1424. "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1425. return -1;
  1426. }
  1427. cbp = (cbpc & 3) | (cbpy << 2);
  1428. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1429. if (dquant)
  1430. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1431. if (!s->progressive_sequence)
  1432. s->interlaced_dct = get_bits1(&s->gb);
  1433. s->bdsp.clear_blocks(s->block[0]);
  1434. /* decode each block */
  1435. for (i = 0; i < 6; i++) {
  1436. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0)
  1437. return -1;
  1438. cbp += cbp;
  1439. }
  1440. goto end;
  1441. }
  1442. /* decode each block */
  1443. for (i = 0; i < 6; i++) {
  1444. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0)
  1445. return -1;
  1446. cbp += cbp;
  1447. }
  1448. end:
  1449. /* per-MB end of slice check */
  1450. if (s->codec_id == AV_CODEC_ID_MPEG4) {
  1451. int next = mpeg4_is_resync(ctx);
  1452. if (next) {
  1453. if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
  1454. return -1;
  1455. } else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next)
  1456. return SLICE_END;
  1457. if (s->pict_type == AV_PICTURE_TYPE_B) {
  1458. const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
  1459. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1460. (s->mb_x + delta >= s->mb_width)
  1461. ? FFMIN(s->mb_y + 1, s->mb_height - 1)
  1462. : s->mb_y, 0);
  1463. if (s->next_picture.mbskip_table[xy + delta])
  1464. return SLICE_OK;
  1465. }
  1466. return SLICE_END;
  1467. }
  1468. }
  1469. return SLICE_OK;
  1470. }
  1471. static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
  1472. {
  1473. int hours, minutes, seconds;
  1474. if (!show_bits(gb, 23)) {
  1475. av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n");
  1476. return -1;
  1477. }
  1478. hours = get_bits(gb, 5);
  1479. minutes = get_bits(gb, 6);
  1480. skip_bits1(gb);
  1481. seconds = get_bits(gb, 6);
  1482. s->time_base = seconds + 60*(minutes + 60*hours);
  1483. skip_bits1(gb);
  1484. skip_bits1(gb);
  1485. return 0;
  1486. }
  1487. static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb)
  1488. {
  1489. s->avctx->profile = get_bits(gb, 4);
  1490. s->avctx->level = get_bits(gb, 4);
  1491. // for Simple profile, level 0
  1492. if (s->avctx->profile == 0 && s->avctx->level == 8) {
  1493. s->avctx->level = 0;
  1494. }
  1495. return 0;
  1496. }
  1497. static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  1498. {
  1499. MpegEncContext *s = &ctx->m;
  1500. int width, height, vo_ver_id;
  1501. /* vol header */
  1502. skip_bits(gb, 1); /* random access */
  1503. s->vo_type = get_bits(gb, 8);
  1504. if (get_bits1(gb) != 0) { /* is_ol_id */
  1505. vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
  1506. skip_bits(gb, 3); /* vo_priority */
  1507. } else {
  1508. vo_ver_id = 1;
  1509. }
  1510. s->aspect_ratio_info = get_bits(gb, 4);
  1511. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  1512. s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
  1513. s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
  1514. } else {
  1515. s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
  1516. }
  1517. if ((s->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
  1518. int chroma_format = get_bits(gb, 2);
  1519. if (chroma_format != CHROMA_420)
  1520. av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
  1521. s->low_delay = get_bits1(gb);
  1522. if (get_bits1(gb)) { /* vbv parameters */
  1523. get_bits(gb, 15); /* first_half_bitrate */
  1524. skip_bits1(gb); /* marker */
  1525. get_bits(gb, 15); /* latter_half_bitrate */
  1526. skip_bits1(gb); /* marker */
  1527. get_bits(gb, 15); /* first_half_vbv_buffer_size */
  1528. skip_bits1(gb); /* marker */
  1529. get_bits(gb, 3); /* latter_half_vbv_buffer_size */
  1530. get_bits(gb, 11); /* first_half_vbv_occupancy */
  1531. skip_bits1(gb); /* marker */
  1532. get_bits(gb, 15); /* latter_half_vbv_occupancy */
  1533. skip_bits1(gb); /* marker */
  1534. }
  1535. } else {
  1536. /* is setting low delay flag only once the smartest thing to do?
  1537. * low delay detection won't be overridden. */
  1538. if (s->picture_number == 0)
  1539. s->low_delay = 0;
  1540. }
  1541. ctx->shape = get_bits(gb, 2); /* vol shape */
  1542. if (ctx->shape != RECT_SHAPE)
  1543. av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
  1544. if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
  1545. av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n");
  1546. skip_bits(gb, 4); /* video_object_layer_shape_extension */
  1547. }
  1548. check_marker(gb, "before time_increment_resolution");
  1549. s->avctx->time_base.den = get_bits(gb, 16);
  1550. if (!s->avctx->time_base.den) {
  1551. av_log(s->avctx, AV_LOG_ERROR, "time_base.den==0\n");
  1552. s->avctx->time_base.num = 0;
  1553. return -1;
  1554. }
  1555. ctx->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
  1556. if (ctx->time_increment_bits < 1)
  1557. ctx->time_increment_bits = 1;
  1558. check_marker(gb, "before fixed_vop_rate");
  1559. if (get_bits1(gb) != 0) /* fixed_vop_rate */
  1560. s->avctx->time_base.num = get_bits(gb, ctx->time_increment_bits);
  1561. else
  1562. s->avctx->time_base.num = 1;
  1563. ctx->t_frame = 0;
  1564. if (ctx->shape != BIN_ONLY_SHAPE) {
  1565. if (ctx->shape == RECT_SHAPE) {
  1566. check_marker(gb, "before width");
  1567. width = get_bits(gb, 13);
  1568. check_marker(gb, "before height");
  1569. height = get_bits(gb, 13);
  1570. check_marker(gb, "after height");
  1571. if (width && height && /* they should be non zero but who knows */
  1572. !(s->width && s->codec_tag == AV_RL32("MP4S"))) {
  1573. if (s->width && s->height &&
  1574. (s->width != width || s->height != height))
  1575. s->context_reinit = 1;
  1576. s->width = width;
  1577. s->height = height;
  1578. }
  1579. }
  1580. s->progressive_sequence =
  1581. s->progressive_frame = get_bits1(gb) ^ 1;
  1582. s->interlaced_dct = 0;
  1583. if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO))
  1584. av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */
  1585. "MPEG4 OBMC not supported (very likely buggy encoder)\n");
  1586. if (vo_ver_id == 1)
  1587. ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
  1588. else
  1589. ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
  1590. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1591. av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
  1592. if (ctx->vol_sprite_usage == STATIC_SPRITE ||
  1593. ctx->vol_sprite_usage == GMC_SPRITE) {
  1594. if (ctx->vol_sprite_usage == STATIC_SPRITE) {
  1595. skip_bits(gb, 13); // sprite_width
  1596. skip_bits1(gb); /* marker */
  1597. skip_bits(gb, 13); // sprite_height
  1598. skip_bits1(gb); /* marker */
  1599. skip_bits(gb, 13); // sprite_left
  1600. skip_bits1(gb); /* marker */
  1601. skip_bits(gb, 13); // sprite_top
  1602. skip_bits1(gb); /* marker */
  1603. }
  1604. ctx->num_sprite_warping_points = get_bits(gb, 6);
  1605. if (ctx->num_sprite_warping_points > 3) {
  1606. av_log(s->avctx, AV_LOG_ERROR,
  1607. "%d sprite_warping_points\n",
  1608. ctx->num_sprite_warping_points);
  1609. ctx->num_sprite_warping_points = 0;
  1610. return -1;
  1611. }
  1612. s->sprite_warping_accuracy = get_bits(gb, 2);
  1613. ctx->sprite_brightness_change = get_bits1(gb);
  1614. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1615. skip_bits1(gb); // low_latency_sprite
  1616. }
  1617. // FIXME sadct disable bit if verid!=1 && shape not rect
  1618. if (get_bits1(gb) == 1) { /* not_8_bit */
  1619. s->quant_precision = get_bits(gb, 4); /* quant_precision */
  1620. if (get_bits(gb, 4) != 8) /* bits_per_pixel */
  1621. av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n");
  1622. if (s->quant_precision != 5)
  1623. av_log(s->avctx, AV_LOG_ERROR,
  1624. "quant precision %d\n", s->quant_precision);
  1625. if (s->quant_precision<3 || s->quant_precision>9) {
  1626. s->quant_precision = 5;
  1627. }
  1628. } else {
  1629. s->quant_precision = 5;
  1630. }
  1631. // FIXME a bunch of grayscale shape things
  1632. if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
  1633. int i, v;
  1634. /* load default matrixes */
  1635. for (i = 0; i < 64; i++) {
  1636. int j = s->idsp.idct_permutation[i];
  1637. v = ff_mpeg4_default_intra_matrix[i];
  1638. s->intra_matrix[j] = v;
  1639. s->chroma_intra_matrix[j] = v;
  1640. v = ff_mpeg4_default_non_intra_matrix[i];
  1641. s->inter_matrix[j] = v;
  1642. s->chroma_inter_matrix[j] = v;
  1643. }
  1644. /* load custom intra matrix */
  1645. if (get_bits1(gb)) {
  1646. int last = 0;
  1647. for (i = 0; i < 64; i++) {
  1648. int j;
  1649. v = get_bits(gb, 8);
  1650. if (v == 0)
  1651. break;
  1652. last = v;
  1653. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1654. s->intra_matrix[j] = last;
  1655. s->chroma_intra_matrix[j] = last;
  1656. }
  1657. /* replicate last value */
  1658. for (; i < 64; i++) {
  1659. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1660. s->intra_matrix[j] = last;
  1661. s->chroma_intra_matrix[j] = last;
  1662. }
  1663. }
  1664. /* load custom non intra matrix */
  1665. if (get_bits1(gb)) {
  1666. int last = 0;
  1667. for (i = 0; i < 64; i++) {
  1668. int j;
  1669. v = get_bits(gb, 8);
  1670. if (v == 0)
  1671. break;
  1672. last = v;
  1673. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1674. s->inter_matrix[j] = v;
  1675. s->chroma_inter_matrix[j] = v;
  1676. }
  1677. /* replicate last value */
  1678. for (; i < 64; i++) {
  1679. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1680. s->inter_matrix[j] = last;
  1681. s->chroma_inter_matrix[j] = last;
  1682. }
  1683. }
  1684. // FIXME a bunch of grayscale shape things
  1685. }
  1686. if (vo_ver_id != 1)
  1687. s->quarter_sample = get_bits1(gb);
  1688. else
  1689. s->quarter_sample = 0;
  1690. if (get_bits_left(gb) < 4) {
  1691. av_log(s->avctx, AV_LOG_ERROR, "VOL Header truncated\n");
  1692. return AVERROR_INVALIDDATA;
  1693. }
  1694. if (!get_bits1(gb)) {
  1695. int pos = get_bits_count(gb);
  1696. int estimation_method = get_bits(gb, 2);
  1697. if (estimation_method < 2) {
  1698. if (!get_bits1(gb)) {
  1699. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
  1700. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
  1701. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
  1702. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
  1703. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
  1704. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upampling */
  1705. }
  1706. if (!get_bits1(gb)) {
  1707. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
  1708. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
  1709. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
  1710. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
  1711. }
  1712. if (!check_marker(gb, "in complexity estimation part 1")) {
  1713. skip_bits_long(gb, pos - get_bits_count(gb));
  1714. goto no_cplx_est;
  1715. }
  1716. if (!get_bits1(gb)) {
  1717. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
  1718. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
  1719. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
  1720. ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
  1721. }
  1722. if (!get_bits1(gb)) {
  1723. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
  1724. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
  1725. ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
  1726. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
  1727. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
  1728. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
  1729. }
  1730. if (!check_marker(gb, "in complexity estimation part 2")) {
  1731. skip_bits_long(gb, pos - get_bits_count(gb));
  1732. goto no_cplx_est;
  1733. }
  1734. if (estimation_method == 1) {
  1735. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
  1736. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
  1737. }
  1738. } else
  1739. av_log(s->avctx, AV_LOG_ERROR,
  1740. "Invalid Complexity estimation method %d\n",
  1741. estimation_method);
  1742. } else {
  1743. no_cplx_est:
  1744. ctx->cplx_estimation_trash_i =
  1745. ctx->cplx_estimation_trash_p =
  1746. ctx->cplx_estimation_trash_b = 0;
  1747. }
  1748. ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
  1749. s->data_partitioning = get_bits1(gb);
  1750. if (s->data_partitioning)
  1751. ctx->rvlc = get_bits1(gb);
  1752. if (vo_ver_id != 1) {
  1753. ctx->new_pred = get_bits1(gb);
  1754. if (ctx->new_pred) {
  1755. av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n");
  1756. skip_bits(gb, 2); /* requested upstream message type */
  1757. skip_bits1(gb); /* newpred segment type */
  1758. }
  1759. if (get_bits1(gb)) // reduced_res_vop
  1760. av_log(s->avctx, AV_LOG_ERROR,
  1761. "reduced resolution VOP not supported\n");
  1762. } else {
  1763. ctx->new_pred = 0;
  1764. }
  1765. ctx->scalability = get_bits1(gb);
  1766. if (ctx->scalability) {
  1767. GetBitContext bak = *gb;
  1768. int h_sampling_factor_n;
  1769. int h_sampling_factor_m;
  1770. int v_sampling_factor_n;
  1771. int v_sampling_factor_m;
  1772. skip_bits1(gb); // hierarchy_type
  1773. skip_bits(gb, 4); /* ref_layer_id */
  1774. skip_bits1(gb); /* ref_layer_sampling_dir */
  1775. h_sampling_factor_n = get_bits(gb, 5);
  1776. h_sampling_factor_m = get_bits(gb, 5);
  1777. v_sampling_factor_n = get_bits(gb, 5);
  1778. v_sampling_factor_m = get_bits(gb, 5);
  1779. ctx->enhancement_type = get_bits1(gb);
  1780. if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
  1781. v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
  1782. /* illegal scalability header (VERY broken encoder),
  1783. * trying to workaround */
  1784. ctx->scalability = 0;
  1785. *gb = bak;
  1786. } else
  1787. av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n");
  1788. // bin shape stuff FIXME
  1789. }
  1790. }
  1791. if (s->avctx->debug&FF_DEBUG_PICT_INFO) {
  1792. av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, %s%s%s%s\n",
  1793. s->avctx->time_base.num, s->avctx->time_base.den,
  1794. ctx->time_increment_bits,
  1795. s->quant_precision,
  1796. s->progressive_sequence,
  1797. ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "",
  1798. s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : ""
  1799. );
  1800. }
  1801. return 0;
  1802. }
  1803. /**
  1804. * Decode the user data stuff in the header.
  1805. * Also initializes divx/xvid/lavc_version/build.
  1806. */
  1807. static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
  1808. {
  1809. MpegEncContext *s = &ctx->m;
  1810. char buf[256];
  1811. int i;
  1812. int e;
  1813. int ver = 0, build = 0, ver2 = 0, ver3 = 0;
  1814. char last;
  1815. for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
  1816. if (show_bits(gb, 23) == 0)
  1817. break;
  1818. buf[i] = get_bits(gb, 8);
  1819. }
  1820. buf[i] = 0;
  1821. /* divx detection */
  1822. e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
  1823. if (e < 2)
  1824. e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
  1825. if (e >= 2) {
  1826. ctx->divx_version = ver;
  1827. ctx->divx_build = build;
  1828. s->divx_packed = e == 3 && last == 'p';
  1829. if (s->divx_packed && !ctx->showed_packed_warning) {
  1830. av_log(s->avctx, AV_LOG_INFO, "Video uses a non-standard and "
  1831. "wasteful way to store B-frames ('packed B-frames'). "
  1832. "Consider using a tool like VirtualDub or avidemux to fix it.\n");
  1833. ctx->showed_packed_warning = 1;
  1834. }
  1835. }
  1836. /* libavcodec detection */
  1837. e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
  1838. if (e != 4)
  1839. e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
  1840. if (e != 4) {
  1841. e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
  1842. if (e > 1)
  1843. build = (ver << 16) + (ver2 << 8) + ver3;
  1844. }
  1845. if (e != 4) {
  1846. if (strcmp(buf, "ffmpeg") == 0)
  1847. ctx->lavc_build = 4600;
  1848. }
  1849. if (e == 4)
  1850. ctx->lavc_build = build;
  1851. /* Xvid detection */
  1852. e = sscanf(buf, "XviD%d", &build);
  1853. if (e == 1)
  1854. ctx->xvid_build = build;
  1855. return 0;
  1856. }
  1857. int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
  1858. {
  1859. Mpeg4DecContext *ctx = avctx->priv_data;
  1860. MpegEncContext *s = &ctx->m;
  1861. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
  1862. if (s->stream_codec_tag == AV_RL32("XVID") ||
  1863. s->codec_tag == AV_RL32("XVID") ||
  1864. s->codec_tag == AV_RL32("XVIX") ||
  1865. s->codec_tag == AV_RL32("RMP4") ||
  1866. s->codec_tag == AV_RL32("ZMP4") ||
  1867. s->codec_tag == AV_RL32("SIPP"))
  1868. ctx->xvid_build = 0;
  1869. }
  1870. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1)
  1871. if (s->codec_tag == AV_RL32("DIVX") && s->vo_type == 0 &&
  1872. s->vol_control_parameters == 0)
  1873. ctx->divx_version = 400; // divx 4
  1874. if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) {
  1875. ctx->divx_version =
  1876. ctx->divx_build = -1;
  1877. }
  1878. if (s->workaround_bugs & FF_BUG_AUTODETECT) {
  1879. if (s->codec_tag == AV_RL32("XVIX"))
  1880. s->workaround_bugs |= FF_BUG_XVID_ILACE;
  1881. if (s->codec_tag == AV_RL32("UMP4"))
  1882. s->workaround_bugs |= FF_BUG_UMP4;
  1883. if (ctx->divx_version >= 500 && ctx->divx_build < 1814)
  1884. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  1885. if (ctx->divx_version > 502 && ctx->divx_build < 1814)
  1886. s->workaround_bugs |= FF_BUG_QPEL_CHROMA2;
  1887. if (ctx->xvid_build <= 3U)
  1888. s->padding_bug_score = 256 * 256 * 256 * 64;
  1889. if (ctx->xvid_build <= 1U)
  1890. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  1891. if (ctx->xvid_build <= 12U)
  1892. s->workaround_bugs |= FF_BUG_EDGE;
  1893. if (ctx->xvid_build <= 32U)
  1894. s->workaround_bugs |= FF_BUG_DC_CLIP;
  1895. #define SET_QPEL_FUNC(postfix1, postfix2) \
  1896. s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \
  1897. s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \
  1898. s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2;
  1899. if (ctx->lavc_build < 4653U)
  1900. s->workaround_bugs |= FF_BUG_STD_QPEL;
  1901. if (ctx->lavc_build < 4655U)
  1902. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  1903. if (ctx->lavc_build < 4670U)
  1904. s->workaround_bugs |= FF_BUG_EDGE;
  1905. if (ctx->lavc_build <= 4712U)
  1906. s->workaround_bugs |= FF_BUG_DC_CLIP;
  1907. if (ctx->divx_version >= 0)
  1908. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  1909. if (ctx->divx_version == 501 && ctx->divx_build == 20020416)
  1910. s->padding_bug_score = 256 * 256 * 256 * 64;
  1911. if (ctx->divx_version < 500U)
  1912. s->workaround_bugs |= FF_BUG_EDGE;
  1913. if (ctx->divx_version >= 0)
  1914. s->workaround_bugs |= FF_BUG_HPEL_CHROMA;
  1915. }
  1916. if (s->workaround_bugs & FF_BUG_STD_QPEL) {
  1917. SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c)
  1918. SET_QPEL_FUNC(qpel_pixels_tab[0][7], qpel16_mc31_old_c)
  1919. SET_QPEL_FUNC(qpel_pixels_tab[0][9], qpel16_mc12_old_c)
  1920. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c)
  1921. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c)
  1922. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c)
  1923. SET_QPEL_FUNC(qpel_pixels_tab[1][5], qpel8_mc11_old_c)
  1924. SET_QPEL_FUNC(qpel_pixels_tab[1][7], qpel8_mc31_old_c)
  1925. SET_QPEL_FUNC(qpel_pixels_tab[1][9], qpel8_mc12_old_c)
  1926. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c)
  1927. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c)
  1928. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c)
  1929. }
  1930. if (avctx->debug & FF_DEBUG_BUGS)
  1931. av_log(s->avctx, AV_LOG_DEBUG,
  1932. "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
  1933. s->workaround_bugs, ctx->lavc_build, ctx->xvid_build,
  1934. ctx->divx_version, ctx->divx_build, s->divx_packed ? "p" : "");
  1935. #if HAVE_MMX
  1936. if (s->codec_id == AV_CODEC_ID_MPEG4 && ctx->xvid_build >= 0 &&
  1937. avctx->idct_algo == FF_IDCT_AUTO &&
  1938. (av_get_cpu_flags() & AV_CPU_FLAG_MMX)) {
  1939. avctx->idct_algo = FF_IDCT_XVIDMMX;
  1940. ff_dct_common_init(s);
  1941. return 1;
  1942. }
  1943. #endif
  1944. return 0;
  1945. }
  1946. static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  1947. {
  1948. MpegEncContext *s = &ctx->m;
  1949. int time_incr, time_increment;
  1950. int64_t pts;
  1951. s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
  1952. if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay &&
  1953. s->vol_control_parameters == 0 && !(s->flags & CODEC_FLAG_LOW_DELAY)) {
  1954. av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n");
  1955. s->low_delay = 0;
  1956. }
  1957. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  1958. if (s->partitioned_frame)
  1959. s->decode_mb = mpeg4_decode_partitioned_mb;
  1960. else
  1961. s->decode_mb = mpeg4_decode_mb;
  1962. time_incr = 0;
  1963. while (get_bits1(gb) != 0)
  1964. time_incr++;
  1965. check_marker(gb, "before time_increment");
  1966. if (ctx->time_increment_bits == 0 ||
  1967. !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
  1968. av_log(s->avctx, AV_LOG_ERROR,
  1969. "hmm, seems the headers are not complete, trying to guess time_increment_bits\n");
  1970. for (ctx->time_increment_bits = 1;
  1971. ctx->time_increment_bits < 16;
  1972. ctx->time_increment_bits++) {
  1973. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1974. (s->pict_type == AV_PICTURE_TYPE_S &&
  1975. ctx->vol_sprite_usage == GMC_SPRITE)) {
  1976. if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
  1977. break;
  1978. } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
  1979. break;
  1980. }
  1981. av_log(s->avctx, AV_LOG_ERROR,
  1982. "my guess is %d bits ;)\n", ctx->time_increment_bits);
  1983. if (s->avctx->time_base.den && 4*s->avctx->time_base.den < 1<<ctx->time_increment_bits) {
  1984. s->avctx->time_base.den = 1<<ctx->time_increment_bits;
  1985. }
  1986. }
  1987. if (IS_3IV1)
  1988. time_increment = get_bits1(gb); // FIXME investigate further
  1989. else
  1990. time_increment = get_bits(gb, ctx->time_increment_bits);
  1991. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1992. s->last_time_base = s->time_base;
  1993. s->time_base += time_incr;
  1994. s->time = s->time_base * s->avctx->time_base.den + time_increment;
  1995. if (s->workaround_bugs & FF_BUG_UMP4) {
  1996. if (s->time < s->last_non_b_time) {
  1997. /* header is not mpeg-4-compatible, broken encoder,
  1998. * trying to workaround */
  1999. s->time_base++;
  2000. s->time += s->avctx->time_base.den;
  2001. }
  2002. }
  2003. s->pp_time = s->time - s->last_non_b_time;
  2004. s->last_non_b_time = s->time;
  2005. } else {
  2006. s->time = (s->last_time_base + time_incr) * s->avctx->time_base.den + time_increment;
  2007. s->pb_time = s->pp_time - (s->last_non_b_time - s->time);
  2008. if (s->pp_time <= s->pb_time ||
  2009. s->pp_time <= s->pp_time - s->pb_time ||
  2010. s->pp_time <= 0) {
  2011. /* messed up order, maybe after seeking? skipping current b-frame */
  2012. return FRAME_SKIPPED;
  2013. }
  2014. ff_mpeg4_init_direct_mv(s);
  2015. if (ctx->t_frame == 0)
  2016. ctx->t_frame = s->pb_time;
  2017. if (ctx->t_frame == 0)
  2018. ctx->t_frame = 1; // 1/0 protection
  2019. s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) -
  2020. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  2021. s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) -
  2022. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  2023. if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) {
  2024. s->pb_field_time = 2;
  2025. s->pp_field_time = 4;
  2026. if (!s->progressive_sequence)
  2027. return FRAME_SKIPPED;
  2028. }
  2029. }
  2030. if (s->avctx->time_base.num)
  2031. pts = ROUNDED_DIV(s->time, s->avctx->time_base.num);
  2032. else
  2033. pts = AV_NOPTS_VALUE;
  2034. if (s->avctx->debug&FF_DEBUG_PTS)
  2035. av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %"PRId64"\n",
  2036. pts);
  2037. check_marker(gb, "before vop_coded");
  2038. /* vop coded */
  2039. if (get_bits1(gb) != 1) {
  2040. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  2041. av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
  2042. return FRAME_SKIPPED;
  2043. }
  2044. if (ctx->new_pred)
  2045. decode_new_pred(ctx, gb);
  2046. if (ctx->shape != BIN_ONLY_SHAPE &&
  2047. (s->pict_type == AV_PICTURE_TYPE_P ||
  2048. (s->pict_type == AV_PICTURE_TYPE_S &&
  2049. ctx->vol_sprite_usage == GMC_SPRITE))) {
  2050. /* rounding type for motion estimation */
  2051. s->no_rounding = get_bits1(gb);
  2052. } else {
  2053. s->no_rounding = 0;
  2054. }
  2055. // FIXME reduced res stuff
  2056. if (ctx->shape != RECT_SHAPE) {
  2057. if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
  2058. skip_bits(gb, 13); /* width */
  2059. skip_bits1(gb); /* marker */
  2060. skip_bits(gb, 13); /* height */
  2061. skip_bits1(gb); /* marker */
  2062. skip_bits(gb, 13); /* hor_spat_ref */
  2063. skip_bits1(gb); /* marker */
  2064. skip_bits(gb, 13); /* ver_spat_ref */
  2065. }
  2066. skip_bits1(gb); /* change_CR_disable */
  2067. if (get_bits1(gb) != 0)
  2068. skip_bits(gb, 8); /* constant_alpha_value */
  2069. }
  2070. // FIXME complexity estimation stuff
  2071. if (ctx->shape != BIN_ONLY_SHAPE) {
  2072. skip_bits_long(gb, ctx->cplx_estimation_trash_i);
  2073. if (s->pict_type != AV_PICTURE_TYPE_I)
  2074. skip_bits_long(gb, ctx->cplx_estimation_trash_p);
  2075. if (s->pict_type == AV_PICTURE_TYPE_B)
  2076. skip_bits_long(gb, ctx->cplx_estimation_trash_b);
  2077. if (get_bits_left(gb) < 3) {
  2078. av_log(s->avctx, AV_LOG_ERROR, "Header truncated\n");
  2079. return -1;
  2080. }
  2081. ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
  2082. if (!s->progressive_sequence) {
  2083. s->top_field_first = get_bits1(gb);
  2084. s->alternate_scan = get_bits1(gb);
  2085. } else
  2086. s->alternate_scan = 0;
  2087. }
  2088. if (s->alternate_scan) {
  2089. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  2090. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  2091. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan);
  2092. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  2093. } else {
  2094. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  2095. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  2096. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  2097. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  2098. }
  2099. if (s->pict_type == AV_PICTURE_TYPE_S &&
  2100. (ctx->vol_sprite_usage == STATIC_SPRITE ||
  2101. ctx->vol_sprite_usage == GMC_SPRITE)) {
  2102. if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
  2103. return AVERROR_INVALIDDATA;
  2104. if (ctx->sprite_brightness_change)
  2105. av_log(s->avctx, AV_LOG_ERROR,
  2106. "sprite_brightness_change not supported\n");
  2107. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  2108. av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
  2109. }
  2110. if (ctx->shape != BIN_ONLY_SHAPE) {
  2111. s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision);
  2112. if (s->qscale == 0) {
  2113. av_log(s->avctx, AV_LOG_ERROR,
  2114. "Error, header damaged or not MPEG4 header (qscale=0)\n");
  2115. return -1; // makes no sense to continue, as there is nothing left from the image then
  2116. }
  2117. if (s->pict_type != AV_PICTURE_TYPE_I) {
  2118. s->f_code = get_bits(gb, 3); /* fcode_for */
  2119. if (s->f_code == 0) {
  2120. av_log(s->avctx, AV_LOG_ERROR,
  2121. "Error, header damaged or not MPEG4 header (f_code=0)\n");
  2122. s->f_code = 1;
  2123. return -1; // makes no sense to continue, as there is nothing left from the image then
  2124. }
  2125. } else
  2126. s->f_code = 1;
  2127. if (s->pict_type == AV_PICTURE_TYPE_B) {
  2128. s->b_code = get_bits(gb, 3);
  2129. if (s->b_code == 0) {
  2130. av_log(s->avctx, AV_LOG_ERROR,
  2131. "Error, header damaged or not MPEG4 header (b_code=0)\n");
  2132. s->b_code=1;
  2133. return -1; // makes no sense to continue, as the MV decoding will break very quickly
  2134. }
  2135. } else
  2136. s->b_code = 1;
  2137. if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
  2138. av_log(s->avctx, AV_LOG_DEBUG,
  2139. "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n",
  2140. s->qscale, s->f_code, s->b_code,
  2141. s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
  2142. gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
  2143. s->top_field_first, s->quarter_sample ? "q" : "h",
  2144. s->data_partitioning, ctx->resync_marker,
  2145. ctx->num_sprite_warping_points, s->sprite_warping_accuracy,
  2146. 1 - s->no_rounding, s->vo_type,
  2147. s->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
  2148. ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
  2149. ctx->cplx_estimation_trash_b,
  2150. s->time,
  2151. time_increment
  2152. );
  2153. }
  2154. if (!ctx->scalability) {
  2155. if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I)
  2156. skip_bits1(gb); // vop shape coding type
  2157. } else {
  2158. if (ctx->enhancement_type) {
  2159. int load_backward_shape = get_bits1(gb);
  2160. if (load_backward_shape)
  2161. av_log(s->avctx, AV_LOG_ERROR,
  2162. "load backward shape isn't supported\n");
  2163. }
  2164. skip_bits(gb, 2); // ref_select_code
  2165. }
  2166. }
  2167. /* detect buggy encoders which don't set the low_delay flag
  2168. * (divx4/xvid/opendivx). Note we cannot detect divx5 without b-frames
  2169. * easily (although it's buggy too) */
  2170. if (s->vo_type == 0 && s->vol_control_parameters == 0 &&
  2171. ctx->divx_version == -1 && s->picture_number == 0) {
  2172. av_log(s->avctx, AV_LOG_WARNING,
  2173. "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
  2174. s->low_delay = 1;
  2175. }
  2176. s->picture_number++; // better than pic number==0 always ;)
  2177. // FIXME add short header support
  2178. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  2179. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  2180. if (s->workaround_bugs & FF_BUG_EDGE) {
  2181. s->h_edge_pos = s->width;
  2182. s->v_edge_pos = s->height;
  2183. }
  2184. return 0;
  2185. }
  2186. /**
  2187. * Decode mpeg4 headers.
  2188. * @return <0 if no VOP found (or a damaged one)
  2189. * FRAME_SKIPPED if a not coded VOP is found
  2190. * 0 if a VOP is found
  2191. */
  2192. int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2193. {
  2194. MpegEncContext *s = &ctx->m;
  2195. unsigned startcode, v;
  2196. /* search next start code */
  2197. align_get_bits(gb);
  2198. if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) {
  2199. skip_bits(gb, 24);
  2200. if (get_bits(gb, 8) == 0xF0)
  2201. goto end;
  2202. }
  2203. startcode = 0xff;
  2204. for (;;) {
  2205. if (get_bits_count(gb) >= gb->size_in_bits) {
  2206. if (gb->size_in_bits == 8 &&
  2207. (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) {
  2208. av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits);
  2209. return FRAME_SKIPPED; // divx bug
  2210. } else
  2211. return -1; // end of stream
  2212. }
  2213. /* use the bits after the test */
  2214. v = get_bits(gb, 8);
  2215. startcode = ((startcode << 8) | v) & 0xffffffff;
  2216. if ((startcode & 0xFFFFFF00) != 0x100)
  2217. continue; // no startcode
  2218. if (s->avctx->debug & FF_DEBUG_STARTCODE) {
  2219. av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode);
  2220. if (startcode <= 0x11F)
  2221. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start");
  2222. else if (startcode <= 0x12F)
  2223. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start");
  2224. else if (startcode <= 0x13F)
  2225. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2226. else if (startcode <= 0x15F)
  2227. av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start");
  2228. else if (startcode <= 0x1AF)
  2229. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2230. else if (startcode == 0x1B0)
  2231. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start");
  2232. else if (startcode == 0x1B1)
  2233. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End");
  2234. else if (startcode == 0x1B2)
  2235. av_log(s->avctx, AV_LOG_DEBUG, "User Data");
  2236. else if (startcode == 0x1B3)
  2237. av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start");
  2238. else if (startcode == 0x1B4)
  2239. av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error");
  2240. else if (startcode == 0x1B5)
  2241. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start");
  2242. else if (startcode == 0x1B6)
  2243. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start");
  2244. else if (startcode == 0x1B7)
  2245. av_log(s->avctx, AV_LOG_DEBUG, "slice start");
  2246. else if (startcode == 0x1B8)
  2247. av_log(s->avctx, AV_LOG_DEBUG, "extension start");
  2248. else if (startcode == 0x1B9)
  2249. av_log(s->avctx, AV_LOG_DEBUG, "fgs start");
  2250. else if (startcode == 0x1BA)
  2251. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start");
  2252. else if (startcode == 0x1BB)
  2253. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start");
  2254. else if (startcode == 0x1BC)
  2255. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start");
  2256. else if (startcode == 0x1BD)
  2257. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start");
  2258. else if (startcode == 0x1BE)
  2259. av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start");
  2260. else if (startcode == 0x1BF)
  2261. av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start");
  2262. else if (startcode == 0x1C0)
  2263. av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start");
  2264. else if (startcode == 0x1C1)
  2265. av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start");
  2266. else if (startcode == 0x1C2)
  2267. av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start");
  2268. else if (startcode == 0x1C3)
  2269. av_log(s->avctx, AV_LOG_DEBUG, "stuffing start");
  2270. else if (startcode <= 0x1C5)
  2271. av_log(s->avctx, AV_LOG_DEBUG, "reserved");
  2272. else if (startcode <= 0x1FF)
  2273. av_log(s->avctx, AV_LOG_DEBUG, "System start");
  2274. av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb));
  2275. }
  2276. if (startcode >= 0x120 && startcode <= 0x12F) {
  2277. if (decode_vol_header(ctx, gb) < 0)
  2278. return -1;
  2279. } else if (startcode == USER_DATA_STARTCODE) {
  2280. decode_user_data(ctx, gb);
  2281. } else if (startcode == GOP_STARTCODE) {
  2282. mpeg4_decode_gop_header(s, gb);
  2283. } else if (startcode == VOS_STARTCODE) {
  2284. mpeg4_decode_profile_level(s, gb);
  2285. } else if (startcode == VOP_STARTCODE) {
  2286. break;
  2287. }
  2288. align_get_bits(gb);
  2289. startcode = 0xff;
  2290. }
  2291. end:
  2292. if (s->flags & CODEC_FLAG_LOW_DELAY)
  2293. s->low_delay = 1;
  2294. s->avctx->has_b_frames = !s->low_delay;
  2295. return decode_vop_header(ctx, gb);
  2296. }
  2297. av_cold void ff_mpeg4videodec_static_init(void) {
  2298. static int done = 0;
  2299. if (!done) {
  2300. ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  2301. ff_init_rl(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
  2302. ff_init_rl(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
  2303. INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
  2304. INIT_VLC_RL(ff_rvlc_rl_inter, 1072);
  2305. INIT_VLC_RL(ff_rvlc_rl_intra, 1072);
  2306. INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */,
  2307. &ff_mpeg4_DCtab_lum[0][1], 2, 1,
  2308. &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512);
  2309. INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */,
  2310. &ff_mpeg4_DCtab_chrom[0][1], 2, 1,
  2311. &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512);
  2312. INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
  2313. &ff_sprite_trajectory_tab[0][1], 4, 2,
  2314. &ff_sprite_trajectory_tab[0][0], 4, 2, 128);
  2315. INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
  2316. &ff_mb_type_b_tab[0][1], 2, 1,
  2317. &ff_mb_type_b_tab[0][0], 2, 1, 16);
  2318. done = 1;
  2319. }
  2320. }
  2321. int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
  2322. {
  2323. Mpeg4DecContext *ctx = avctx->priv_data;
  2324. MpegEncContext *s = &ctx->m;
  2325. /* divx 5.01+ bitstream reorder stuff */
  2326. /* Since this clobbers the input buffer and hwaccel codecs still need the
  2327. * data during hwaccel->end_frame we should not do this any earlier */
  2328. if (s->divx_packed) {
  2329. int current_pos = s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb) >> 3);
  2330. int startcode_found = 0;
  2331. if (buf_size - current_pos > 7) {
  2332. int i;
  2333. for (i = current_pos; i < buf_size - 4; i++)
  2334. if (buf[i] == 0 &&
  2335. buf[i + 1] == 0 &&
  2336. buf[i + 2] == 1 &&
  2337. buf[i + 3] == 0xB6) {
  2338. startcode_found = !(buf[i + 4] & 0x40);
  2339. break;
  2340. }
  2341. }
  2342. if (startcode_found) {
  2343. av_fast_padded_malloc(&s->bitstream_buffer,
  2344. &s->allocated_bitstream_buffer_size,
  2345. buf_size - current_pos);
  2346. if (!s->bitstream_buffer)
  2347. return AVERROR(ENOMEM);
  2348. memcpy(s->bitstream_buffer, buf + current_pos,
  2349. buf_size - current_pos);
  2350. s->bitstream_buffer_size = buf_size - current_pos;
  2351. }
  2352. }
  2353. return 0;
  2354. }
  2355. static int mpeg4_update_thread_context(AVCodecContext *dst,
  2356. const AVCodecContext *src)
  2357. {
  2358. Mpeg4DecContext *s = dst->priv_data;
  2359. const Mpeg4DecContext *s1 = src->priv_data;
  2360. int ret = ff_mpeg_update_thread_context(dst, src);
  2361. if (ret < 0)
  2362. return ret;
  2363. memcpy(((uint8_t*)s) + sizeof(MpegEncContext), ((uint8_t*)s1) + sizeof(MpegEncContext), sizeof(Mpeg4DecContext) - sizeof(MpegEncContext));
  2364. return 0;
  2365. }
  2366. static av_cold int decode_init(AVCodecContext *avctx)
  2367. {
  2368. Mpeg4DecContext *ctx = avctx->priv_data;
  2369. MpegEncContext *s = &ctx->m;
  2370. int ret;
  2371. ctx->divx_version =
  2372. ctx->divx_build =
  2373. ctx->xvid_build =
  2374. ctx->lavc_build = -1;
  2375. if ((ret = ff_h263_decode_init(avctx)) < 0)
  2376. return ret;
  2377. ff_mpeg4videodec_static_init();
  2378. s->h263_pred = 1;
  2379. s->low_delay = 0; /* default, might be overridden in the vol header during header parsing */
  2380. s->decode_mb = mpeg4_decode_mb;
  2381. ctx->time_increment_bits = 4; /* default value for broken headers */
  2382. avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
  2383. avctx->internal->allocate_progress = 1;
  2384. return 0;
  2385. }
  2386. static const AVProfile mpeg4_video_profiles[] = {
  2387. { FF_PROFILE_MPEG4_SIMPLE, "Simple Profile" },
  2388. { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, "Simple Scalable Profile" },
  2389. { FF_PROFILE_MPEG4_CORE, "Core Profile" },
  2390. { FF_PROFILE_MPEG4_MAIN, "Main Profile" },
  2391. { FF_PROFILE_MPEG4_N_BIT, "N-bit Profile" },
  2392. { FF_PROFILE_MPEG4_SCALABLE_TEXTURE, "Scalable Texture Profile" },
  2393. { FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION, "Simple Face Animation Profile" },
  2394. { FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE, "Basic Animated Texture Profile" },
  2395. { FF_PROFILE_MPEG4_HYBRID, "Hybrid Profile" },
  2396. { FF_PROFILE_MPEG4_ADVANCED_REAL_TIME, "Advanced Real Time Simple Profile" },
  2397. { FF_PROFILE_MPEG4_CORE_SCALABLE, "Code Scalable Profile" },
  2398. { FF_PROFILE_MPEG4_ADVANCED_CODING, "Advanced Coding Profile" },
  2399. { FF_PROFILE_MPEG4_ADVANCED_CORE, "Advanced Core Profile" },
  2400. { FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE, "Advanced Scalable Texture Profile" },
  2401. { FF_PROFILE_MPEG4_SIMPLE_STUDIO, "Simple Studio Profile" },
  2402. { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, "Advanced Simple Profile" },
  2403. { FF_PROFILE_UNKNOWN },
  2404. };
  2405. static const AVOption mpeg4_options[] = {
  2406. {"quarter_sample", "1/4 subpel MC", offsetof(MpegEncContext, quarter_sample), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
  2407. {"divx_packed", "divx style packed b frames", offsetof(MpegEncContext, divx_packed), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
  2408. {NULL}
  2409. };
  2410. static const AVClass mpeg4_class = {
  2411. "MPEG4 Video Decoder",
  2412. av_default_item_name,
  2413. mpeg4_options,
  2414. LIBAVUTIL_VERSION_INT,
  2415. };
  2416. static const AVClass mpeg4_vdpau_class = {
  2417. "MPEG4 Video VDPAU Decoder",
  2418. av_default_item_name,
  2419. mpeg4_options,
  2420. LIBAVUTIL_VERSION_INT,
  2421. };
  2422. AVCodec ff_mpeg4_decoder = {
  2423. .name = "mpeg4",
  2424. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  2425. .type = AVMEDIA_TYPE_VIDEO,
  2426. .id = AV_CODEC_ID_MPEG4,
  2427. .priv_data_size = sizeof(Mpeg4DecContext),
  2428. .init = decode_init,
  2429. .close = ff_h263_decode_end,
  2430. .decode = ff_h263_decode_frame,
  2431. .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
  2432. CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
  2433. CODEC_CAP_FRAME_THREADS,
  2434. .flush = ff_mpeg_flush,
  2435. .max_lowres = 3,
  2436. .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
  2437. .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
  2438. .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context),
  2439. .priv_class = &mpeg4_class,
  2440. };
  2441. #if CONFIG_MPEG4_VDPAU_DECODER
  2442. AVCodec ff_mpeg4_vdpau_decoder = {
  2443. .name = "mpeg4_vdpau",
  2444. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
  2445. .type = AVMEDIA_TYPE_VIDEO,
  2446. .id = AV_CODEC_ID_MPEG4,
  2447. .priv_data_size = sizeof(MpegEncContext),
  2448. .init = decode_init,
  2449. .close = ff_h263_decode_end,
  2450. .decode = ff_h263_decode_frame,
  2451. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
  2452. CODEC_CAP_HWACCEL_VDPAU,
  2453. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_MPEG4,
  2454. AV_PIX_FMT_NONE },
  2455. .priv_class = &mpeg4_vdpau_class,
  2456. };
  2457. #endif