You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2872 lines
110KB

  1. /*
  2. * MPEG-4 decoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #define UNCHECKED_BITSTREAM_READER 1
  23. #include "libavutil/internal.h"
  24. #include "libavutil/opt.h"
  25. #include "error_resilience.h"
  26. #include "idctdsp.h"
  27. #include "internal.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "mpegvideodata.h"
  31. #include "mpeg4video.h"
  32. #include "h263.h"
  33. #include "profiles.h"
  34. #include "thread.h"
  35. #include "xvididct.h"
  36. /* The defines below define the number of bits that are read at once for
  37. * reading vlc values. Changing these may improve speed and data cache needs
  38. * be aware though that decreasing them may need the number of stages that is
  39. * passed to get_vlc* to be increased. */
  40. #define SPRITE_TRAJ_VLC_BITS 6
  41. #define DC_VLC_BITS 9
  42. #define MB_TYPE_B_VLC_BITS 4
  43. static VLC dc_lum, dc_chrom;
  44. static VLC sprite_trajectory;
  45. static VLC mb_type_b_vlc;
  46. static const int mb_type_b_map[4] = {
  47. MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
  48. MB_TYPE_L0L1 | MB_TYPE_16x16,
  49. MB_TYPE_L1 | MB_TYPE_16x16,
  50. MB_TYPE_L0 | MB_TYPE_16x16,
  51. };
  52. /**
  53. * Predict the ac.
  54. * @param n block index (0-3 are luma, 4-5 are chroma)
  55. * @param dir the ac prediction direction
  56. */
  57. void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
  58. {
  59. int i;
  60. int16_t *ac_val, *ac_val1;
  61. int8_t *const qscale_table = s->current_picture.qscale_table;
  62. /* find prediction */
  63. ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16;
  64. ac_val1 = ac_val;
  65. if (s->ac_pred) {
  66. if (dir == 0) {
  67. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  68. /* left prediction */
  69. ac_val -= 16;
  70. if (s->mb_x == 0 || s->qscale == qscale_table[xy] ||
  71. n == 1 || n == 3) {
  72. /* same qscale */
  73. for (i = 1; i < 8; i++)
  74. block[s->idsp.idct_permutation[i << 3]] += ac_val[i];
  75. } else {
  76. /* different qscale, we must rescale */
  77. for (i = 1; i < 8; i++)
  78. block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  79. }
  80. } else {
  81. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  82. /* top prediction */
  83. ac_val -= 16 * s->block_wrap[n];
  84. if (s->mb_y == 0 || s->qscale == qscale_table[xy] ||
  85. n == 2 || n == 3) {
  86. /* same qscale */
  87. for (i = 1; i < 8; i++)
  88. block[s->idsp.idct_permutation[i]] += ac_val[i + 8];
  89. } else {
  90. /* different qscale, we must rescale */
  91. for (i = 1; i < 8; i++)
  92. block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  93. }
  94. }
  95. }
  96. /* left copy */
  97. for (i = 1; i < 8; i++)
  98. ac_val1[i] = block[s->idsp.idct_permutation[i << 3]];
  99. /* top copy */
  100. for (i = 1; i < 8; i++)
  101. ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
  102. }
  103. /**
  104. * check if the next stuff is a resync marker or the end.
  105. * @return 0 if not
  106. */
  107. static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
  108. {
  109. MpegEncContext *s = &ctx->m;
  110. int bits_count = get_bits_count(&s->gb);
  111. int v = show_bits(&s->gb, 16);
  112. if (s->workaround_bugs & FF_BUG_NO_PADDING && !ctx->resync_marker)
  113. return 0;
  114. while (v <= 0xFF) {
  115. if (s->pict_type == AV_PICTURE_TYPE_B ||
  116. (v >> (8 - s->pict_type) != 1) || s->partitioned_frame)
  117. break;
  118. skip_bits(&s->gb, 8 + s->pict_type);
  119. bits_count += 8 + s->pict_type;
  120. v = show_bits(&s->gb, 16);
  121. }
  122. if (bits_count + 8 >= s->gb.size_in_bits) {
  123. v >>= 8;
  124. v |= 0x7F >> (7 - (bits_count & 7));
  125. if (v == 0x7F)
  126. return s->mb_num;
  127. } else {
  128. if (v == ff_mpeg4_resync_prefix[bits_count & 7]) {
  129. int len, mb_num;
  130. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  131. GetBitContext gb = s->gb;
  132. skip_bits(&s->gb, 1);
  133. align_get_bits(&s->gb);
  134. for (len = 0; len < 32; len++)
  135. if (get_bits1(&s->gb))
  136. break;
  137. mb_num = get_bits(&s->gb, mb_num_bits);
  138. if (!mb_num || mb_num > s->mb_num || get_bits_count(&s->gb)+6 > s->gb.size_in_bits)
  139. mb_num= -1;
  140. s->gb = gb;
  141. if (len >= ff_mpeg4_get_video_packet_prefix_length(s))
  142. return mb_num;
  143. }
  144. }
  145. return 0;
  146. }
  147. static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
  148. {
  149. MpegEncContext *s = &ctx->m;
  150. int a = 2 << s->sprite_warping_accuracy;
  151. int rho = 3 - s->sprite_warping_accuracy;
  152. int r = 16 / a;
  153. int alpha = 0;
  154. int beta = 0;
  155. int w = s->width;
  156. int h = s->height;
  157. int min_ab, i, w2, h2, w3, h3;
  158. int sprite_ref[4][2];
  159. int virtual_ref[2][2];
  160. int64_t sprite_offset[2][2];
  161. // only true for rectangle shapes
  162. const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 },
  163. { 0, s->height }, { s->width, s->height } };
  164. int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
  165. if (w <= 0 || h <= 0)
  166. return AVERROR_INVALIDDATA;
  167. for (i = 0; i < ctx->num_sprite_warping_points; i++) {
  168. int length;
  169. int x = 0, y = 0;
  170. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  171. if (length > 0)
  172. x = get_xbits(gb, length);
  173. if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
  174. check_marker(s->avctx, gb, "before sprite_trajectory");
  175. length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
  176. if (length > 0)
  177. y = get_xbits(gb, length);
  178. check_marker(s->avctx, gb, "after sprite_trajectory");
  179. ctx->sprite_traj[i][0] = d[i][0] = x;
  180. ctx->sprite_traj[i][1] = d[i][1] = y;
  181. }
  182. for (; i < 4; i++)
  183. ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0;
  184. while ((1 << alpha) < w)
  185. alpha++;
  186. while ((1 << beta) < h)
  187. beta++; /* typo in the MPEG-4 std for the definition of w' and h' */
  188. w2 = 1 << alpha;
  189. h2 = 1 << beta;
  190. // Note, the 4th point isn't used for GMC
  191. if (ctx->divx_version == 500 && ctx->divx_build == 413) {
  192. sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0];
  193. sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1];
  194. sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0];
  195. sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1];
  196. sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0];
  197. sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1];
  198. } else {
  199. sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]);
  200. sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]);
  201. sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]);
  202. sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]);
  203. sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]);
  204. sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]);
  205. }
  206. /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]);
  207. * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */
  208. /* This is mostly identical to the MPEG-4 std (and is totally unreadable
  209. * because of that...). Perhaps it should be reordered to be more readable.
  210. * The idea behind this virtual_ref mess is to be able to use shifts later
  211. * per pixel instead of divides so the distance between points is converted
  212. * from w&h based to w2&h2 based which are of the 2^x form. */
  213. virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) +
  214. ROUNDED_DIV(((w - w2) *
  215. (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  216. w2 * (r * sprite_ref[1][0] - 16 * vop_ref[1][0])), w);
  217. virtual_ref[0][1] = 16 * vop_ref[0][1] +
  218. ROUNDED_DIV(((w - w2) *
  219. (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  220. w2 * (r * sprite_ref[1][1] - 16 * vop_ref[1][1])), w);
  221. virtual_ref[1][0] = 16 * vop_ref[0][0] +
  222. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16 * vop_ref[0][0]) +
  223. h2 * (r * sprite_ref[2][0] - 16 * vop_ref[2][0])), h);
  224. virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) +
  225. ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16 * vop_ref[0][1]) +
  226. h2 * (r * sprite_ref[2][1] - 16 * vop_ref[2][1])), h);
  227. switch (ctx->num_sprite_warping_points) {
  228. case 0:
  229. sprite_offset[0][0] =
  230. sprite_offset[0][1] =
  231. sprite_offset[1][0] =
  232. sprite_offset[1][1] = 0;
  233. s->sprite_delta[0][0] = a;
  234. s->sprite_delta[0][1] =
  235. s->sprite_delta[1][0] = 0;
  236. s->sprite_delta[1][1] = a;
  237. ctx->sprite_shift[0] =
  238. ctx->sprite_shift[1] = 0;
  239. break;
  240. case 1: // GMC only
  241. sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0];
  242. sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1];
  243. sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) -
  244. a * (vop_ref[0][0] / 2);
  245. sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) -
  246. a * (vop_ref[0][1] / 2);
  247. s->sprite_delta[0][0] = a;
  248. s->sprite_delta[0][1] =
  249. s->sprite_delta[1][0] = 0;
  250. s->sprite_delta[1][1] = a;
  251. ctx->sprite_shift[0] =
  252. ctx->sprite_shift[1] = 0;
  253. break;
  254. case 2:
  255. sprite_offset[0][0] = ((int64_t) sprite_ref[0][0] * (1 << alpha + rho)) +
  256. ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) *
  257. ((int64_t) -vop_ref[0][0]) +
  258. ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) *
  259. ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1));
  260. sprite_offset[0][1] = ((int64_t) sprite_ref[0][1] * (1 << alpha + rho)) +
  261. ((int64_t) -r * sprite_ref[0][1] + virtual_ref[0][1]) *
  262. ((int64_t) -vop_ref[0][0]) +
  263. ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) *
  264. ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1));
  265. sprite_offset[1][0] = (((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  266. ((int64_t)-2 * vop_ref[0][0] + 1) +
  267. ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) *
  268. ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  269. (int64_t) sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1)));
  270. sprite_offset[1][1] = (((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) *
  271. ((int64_t)-2 * vop_ref[0][0] + 1) +
  272. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) *
  273. ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r *
  274. (int64_t) sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1)));
  275. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  276. s->sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]);
  277. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]);
  278. s->sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]);
  279. ctx->sprite_shift[0] = alpha + rho;
  280. ctx->sprite_shift[1] = alpha + rho + 2;
  281. break;
  282. case 3:
  283. min_ab = FFMIN(alpha, beta);
  284. w3 = w2 >> min_ab;
  285. h3 = h2 >> min_ab;
  286. sprite_offset[0][0] = ((int64_t)sprite_ref[0][0] * (1 << (alpha + beta + rho - min_ab))) +
  287. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-vop_ref[0][0]) +
  288. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-vop_ref[0][1]) +
  289. ((int64_t)1 << (alpha + beta + rho - min_ab - 1));
  290. sprite_offset[0][1] = ((int64_t)sprite_ref[0][1] * (1 << (alpha + beta + rho - min_ab))) +
  291. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-vop_ref[0][0]) +
  292. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-vop_ref[0][1]) +
  293. ((int64_t)1 << (alpha + beta + rho - min_ab - 1));
  294. sprite_offset[1][0] = ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-2 * vop_ref[0][0] + 1) +
  295. ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-2 * vop_ref[0][1] + 1) +
  296. (int64_t)2 * w2 * h3 * r * sprite_ref[0][0] - 16 * w2 * h3 +
  297. ((int64_t)1 << (alpha + beta + rho - min_ab + 1));
  298. sprite_offset[1][1] = ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-2 * vop_ref[0][0] + 1) +
  299. ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-2 * vop_ref[0][1] + 1) +
  300. (int64_t)2 * w2 * h3 * r * sprite_ref[0][1] - 16 * w2 * h3 +
  301. ((int64_t)1 << (alpha + beta + rho - min_ab + 1));
  302. s->sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3;
  303. s->sprite_delta[0][1] = (-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3;
  304. s->sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3;
  305. s->sprite_delta[1][1] = (-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3;
  306. ctx->sprite_shift[0] = alpha + beta + rho - min_ab;
  307. ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2;
  308. break;
  309. }
  310. /* try to simplify the situation */
  311. if (s->sprite_delta[0][0] == a << ctx->sprite_shift[0] &&
  312. s->sprite_delta[0][1] == 0 &&
  313. s->sprite_delta[1][0] == 0 &&
  314. s->sprite_delta[1][1] == a << ctx->sprite_shift[0]) {
  315. sprite_offset[0][0] >>= ctx->sprite_shift[0];
  316. sprite_offset[0][1] >>= ctx->sprite_shift[0];
  317. sprite_offset[1][0] >>= ctx->sprite_shift[1];
  318. sprite_offset[1][1] >>= ctx->sprite_shift[1];
  319. s->sprite_delta[0][0] = a;
  320. s->sprite_delta[0][1] = 0;
  321. s->sprite_delta[1][0] = 0;
  322. s->sprite_delta[1][1] = a;
  323. ctx->sprite_shift[0] = 0;
  324. ctx->sprite_shift[1] = 0;
  325. s->real_sprite_warping_points = 1;
  326. } else {
  327. int shift_y = 16 - ctx->sprite_shift[0];
  328. int shift_c = 16 - ctx->sprite_shift[1];
  329. for (i = 0; i < 2; i++) {
  330. if (shift_c < 0 || shift_y < 0 ||
  331. FFABS( sprite_offset[0][i]) >= INT_MAX >> shift_y ||
  332. FFABS( sprite_offset[1][i]) >= INT_MAX >> shift_c ||
  333. FFABS(s->sprite_delta[0][i]) >= INT_MAX >> shift_y ||
  334. FFABS(s->sprite_delta[1][i]) >= INT_MAX >> shift_y
  335. ) {
  336. avpriv_request_sample(s->avctx, "Too large sprite shift, delta or offset");
  337. goto overflow;
  338. }
  339. }
  340. for (i = 0; i < 2; i++) {
  341. sprite_offset[0][i] *= 1 << shift_y;
  342. sprite_offset[1][i] *= 1 << shift_c;
  343. s->sprite_delta[0][i] *= 1 << shift_y;
  344. s->sprite_delta[1][i] *= 1 << shift_y;
  345. ctx->sprite_shift[i] = 16;
  346. }
  347. for (i = 0; i < 2; i++) {
  348. int64_t sd[2] = {
  349. s->sprite_delta[i][0] - a * (1LL<<16),
  350. s->sprite_delta[i][1] - a * (1LL<<16)
  351. };
  352. if (llabs(sprite_offset[0][i] + s->sprite_delta[i][0] * (w+16LL)) >= INT_MAX ||
  353. llabs(sprite_offset[0][i] + s->sprite_delta[i][1] * (h+16LL)) >= INT_MAX ||
  354. llabs(sprite_offset[0][i] + s->sprite_delta[i][0] * (w+16LL) + s->sprite_delta[i][1] * (h+16LL)) >= INT_MAX ||
  355. llabs(s->sprite_delta[i][0] * (w+16LL)) >= INT_MAX ||
  356. llabs(s->sprite_delta[i][1] * (w+16LL)) >= INT_MAX ||
  357. llabs(sd[0]) >= INT_MAX ||
  358. llabs(sd[1]) >= INT_MAX ||
  359. llabs(sprite_offset[0][i] + sd[0] * (w+16LL)) >= INT_MAX ||
  360. llabs(sprite_offset[0][i] + sd[1] * (h+16LL)) >= INT_MAX ||
  361. llabs(sprite_offset[0][i] + sd[0] * (w+16LL) + sd[1] * (h+16LL)) >= INT_MAX
  362. ) {
  363. avpriv_request_sample(s->avctx, "Overflow on sprite points");
  364. goto overflow;
  365. }
  366. }
  367. s->real_sprite_warping_points = ctx->num_sprite_warping_points;
  368. }
  369. s->sprite_offset[0][0] = sprite_offset[0][0];
  370. s->sprite_offset[0][1] = sprite_offset[0][1];
  371. s->sprite_offset[1][0] = sprite_offset[1][0];
  372. s->sprite_offset[1][1] = sprite_offset[1][1];
  373. return 0;
  374. overflow:
  375. memset(s->sprite_offset, 0, sizeof(s->sprite_offset));
  376. memset(s->sprite_delta, 0, sizeof(s->sprite_delta));
  377. return AVERROR_PATCHWELCOME;
  378. }
  379. static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
  380. MpegEncContext *s = &ctx->m;
  381. int len = FFMIN(ctx->time_increment_bits + 3, 15);
  382. get_bits(gb, len);
  383. if (get_bits1(gb))
  384. get_bits(gb, len);
  385. check_marker(s->avctx, gb, "after new_pred");
  386. return 0;
  387. }
  388. /**
  389. * Decode the next video packet.
  390. * @return <0 if something went wrong
  391. */
  392. int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
  393. {
  394. MpegEncContext *s = &ctx->m;
  395. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  396. int header_extension = 0, mb_num, len;
  397. /* is there enough space left for a video packet + header */
  398. if (get_bits_count(&s->gb) > s->gb.size_in_bits - 20)
  399. return -1;
  400. for (len = 0; len < 32; len++)
  401. if (get_bits1(&s->gb))
  402. break;
  403. if (len != ff_mpeg4_get_video_packet_prefix_length(s)) {
  404. av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
  405. return -1;
  406. }
  407. if (ctx->shape != RECT_SHAPE) {
  408. header_extension = get_bits1(&s->gb);
  409. // FIXME more stuff here
  410. }
  411. mb_num = get_bits(&s->gb, mb_num_bits);
  412. if (mb_num >= s->mb_num) {
  413. av_log(s->avctx, AV_LOG_ERROR,
  414. "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
  415. return -1;
  416. }
  417. s->mb_x = mb_num % s->mb_width;
  418. s->mb_y = mb_num / s->mb_width;
  419. if (ctx->shape != BIN_ONLY_SHAPE) {
  420. int qscale = get_bits(&s->gb, s->quant_precision);
  421. if (qscale)
  422. s->chroma_qscale = s->qscale = qscale;
  423. }
  424. if (ctx->shape == RECT_SHAPE)
  425. header_extension = get_bits1(&s->gb);
  426. if (header_extension) {
  427. int time_incr = 0;
  428. while (get_bits1(&s->gb) != 0)
  429. time_incr++;
  430. check_marker(s->avctx, &s->gb, "before time_increment in video packed header");
  431. skip_bits(&s->gb, ctx->time_increment_bits); /* time_increment */
  432. check_marker(s->avctx, &s->gb, "before vop_coding_type in video packed header");
  433. skip_bits(&s->gb, 2); /* vop coding type */
  434. // FIXME not rect stuff here
  435. if (ctx->shape != BIN_ONLY_SHAPE) {
  436. skip_bits(&s->gb, 3); /* intra dc vlc threshold */
  437. // FIXME don't just ignore everything
  438. if (s->pict_type == AV_PICTURE_TYPE_S &&
  439. ctx->vol_sprite_usage == GMC_SPRITE) {
  440. if (mpeg4_decode_sprite_trajectory(ctx, &s->gb) < 0)
  441. return AVERROR_INVALIDDATA;
  442. av_log(s->avctx, AV_LOG_ERROR, "untested\n");
  443. }
  444. // FIXME reduced res stuff here
  445. if (s->pict_type != AV_PICTURE_TYPE_I) {
  446. int f_code = get_bits(&s->gb, 3); /* fcode_for */
  447. if (f_code == 0)
  448. av_log(s->avctx, AV_LOG_ERROR,
  449. "Error, video packet header damaged (f_code=0)\n");
  450. }
  451. if (s->pict_type == AV_PICTURE_TYPE_B) {
  452. int b_code = get_bits(&s->gb, 3);
  453. if (b_code == 0)
  454. av_log(s->avctx, AV_LOG_ERROR,
  455. "Error, video packet header damaged (b_code=0)\n");
  456. }
  457. }
  458. }
  459. if (ctx->new_pred)
  460. decode_new_pred(ctx, &s->gb);
  461. return 0;
  462. }
  463. /**
  464. * Get the average motion vector for a GMC MB.
  465. * @param n either 0 for the x component or 1 for y
  466. * @return the average MV for a GMC MB
  467. */
  468. static inline int get_amv(Mpeg4DecContext *ctx, int n)
  469. {
  470. MpegEncContext *s = &ctx->m;
  471. int x, y, mb_v, sum, dx, dy, shift;
  472. int len = 1 << (s->f_code + 4);
  473. const int a = s->sprite_warping_accuracy;
  474. if (s->workaround_bugs & FF_BUG_AMV)
  475. len >>= s->quarter_sample;
  476. if (s->real_sprite_warping_points == 1) {
  477. if (ctx->divx_version == 500 && ctx->divx_build == 413)
  478. sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample));
  479. else
  480. sum = RSHIFT(s->sprite_offset[0][n] * (1 << s->quarter_sample), a);
  481. } else {
  482. dx = s->sprite_delta[n][0];
  483. dy = s->sprite_delta[n][1];
  484. shift = ctx->sprite_shift[0];
  485. if (n)
  486. dy -= 1 << (shift + a + 1);
  487. else
  488. dx -= 1 << (shift + a + 1);
  489. mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16;
  490. sum = 0;
  491. for (y = 0; y < 16; y++) {
  492. int v;
  493. v = mb_v + dy * y;
  494. // FIXME optimize
  495. for (x = 0; x < 16; x++) {
  496. sum += v >> shift;
  497. v += dx;
  498. }
  499. }
  500. sum = RSHIFT(sum, a + 8 - s->quarter_sample);
  501. }
  502. if (sum < -len)
  503. sum = -len;
  504. else if (sum >= len)
  505. sum = len - 1;
  506. return sum;
  507. }
  508. /**
  509. * Decode the dc value.
  510. * @param n block index (0-3 are luma, 4-5 are chroma)
  511. * @param dir_ptr the prediction direction will be stored here
  512. * @return the quantized dc
  513. */
  514. static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
  515. {
  516. int level, code;
  517. if (n < 4)
  518. code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1);
  519. else
  520. code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1);
  521. if (code < 0 || code > 9 /* && s->nbit < 9 */) {
  522. av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
  523. return -1;
  524. }
  525. if (code == 0) {
  526. level = 0;
  527. } else {
  528. if (IS_3IV1) {
  529. if (code == 1)
  530. level = 2 * get_bits1(&s->gb) - 1;
  531. else {
  532. if (get_bits1(&s->gb))
  533. level = get_bits(&s->gb, code - 1) + (1 << (code - 1));
  534. else
  535. level = -get_bits(&s->gb, code - 1) - (1 << (code - 1));
  536. }
  537. } else {
  538. level = get_xbits(&s->gb, code);
  539. }
  540. if (code > 8) {
  541. if (get_bits1(&s->gb) == 0) { /* marker */
  542. if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)) {
  543. av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n");
  544. return -1;
  545. }
  546. }
  547. }
  548. }
  549. return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0);
  550. }
  551. /**
  552. * Decode first partition.
  553. * @return number of MBs decoded or <0 if an error occurred
  554. */
  555. static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
  556. {
  557. MpegEncContext *s = &ctx->m;
  558. int mb_num = 0;
  559. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  560. /* decode first partition */
  561. s->first_slice_line = 1;
  562. for (; s->mb_y < s->mb_height; s->mb_y++) {
  563. ff_init_block_index(s);
  564. for (; s->mb_x < s->mb_width; s->mb_x++) {
  565. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  566. int cbpc;
  567. int dir = 0;
  568. mb_num++;
  569. ff_update_block_index(s);
  570. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  571. s->first_slice_line = 0;
  572. if (s->pict_type == AV_PICTURE_TYPE_I) {
  573. int i;
  574. do {
  575. if (show_bits_long(&s->gb, 19) == DC_MARKER)
  576. return mb_num - 1;
  577. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  578. if (cbpc < 0) {
  579. av_log(s->avctx, AV_LOG_ERROR,
  580. "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  581. return -1;
  582. }
  583. } while (cbpc == 8);
  584. s->cbp_table[xy] = cbpc & 3;
  585. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  586. s->mb_intra = 1;
  587. if (cbpc & 4)
  588. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  589. s->current_picture.qscale_table[xy] = s->qscale;
  590. s->mbintra_table[xy] = 1;
  591. for (i = 0; i < 6; i++) {
  592. int dc_pred_dir;
  593. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  594. if (dc < 0) {
  595. av_log(s->avctx, AV_LOG_ERROR,
  596. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  597. return -1;
  598. }
  599. dir <<= 1;
  600. if (dc_pred_dir)
  601. dir |= 1;
  602. }
  603. s->pred_dir_table[xy] = dir;
  604. } else { /* P/S_TYPE */
  605. int mx, my, pred_x, pred_y, bits;
  606. int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]];
  607. const int stride = s->b8_stride * 2;
  608. try_again:
  609. bits = show_bits(&s->gb, 17);
  610. if (bits == MOTION_MARKER)
  611. return mb_num - 1;
  612. skip_bits1(&s->gb);
  613. if (bits & 0x10000) {
  614. /* skip mb */
  615. if (s->pict_type == AV_PICTURE_TYPE_S &&
  616. ctx->vol_sprite_usage == GMC_SPRITE) {
  617. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  618. MB_TYPE_16x16 |
  619. MB_TYPE_GMC |
  620. MB_TYPE_L0;
  621. mx = get_amv(ctx, 0);
  622. my = get_amv(ctx, 1);
  623. } else {
  624. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  625. MB_TYPE_16x16 |
  626. MB_TYPE_L0;
  627. mx = my = 0;
  628. }
  629. mot_val[0] =
  630. mot_val[2] =
  631. mot_val[0 + stride] =
  632. mot_val[2 + stride] = mx;
  633. mot_val[1] =
  634. mot_val[3] =
  635. mot_val[1 + stride] =
  636. mot_val[3 + stride] = my;
  637. if (s->mbintra_table[xy])
  638. ff_clean_intra_table_entries(s);
  639. continue;
  640. }
  641. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  642. if (cbpc < 0) {
  643. av_log(s->avctx, AV_LOG_ERROR,
  644. "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y);
  645. return -1;
  646. }
  647. if (cbpc == 20)
  648. goto try_again;
  649. s->cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant
  650. s->mb_intra = ((cbpc & 4) != 0);
  651. if (s->mb_intra) {
  652. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  653. s->mbintra_table[xy] = 1;
  654. mot_val[0] =
  655. mot_val[2] =
  656. mot_val[0 + stride] =
  657. mot_val[2 + stride] = 0;
  658. mot_val[1] =
  659. mot_val[3] =
  660. mot_val[1 + stride] =
  661. mot_val[3 + stride] = 0;
  662. } else {
  663. if (s->mbintra_table[xy])
  664. ff_clean_intra_table_entries(s);
  665. if (s->pict_type == AV_PICTURE_TYPE_S &&
  666. ctx->vol_sprite_usage == GMC_SPRITE &&
  667. (cbpc & 16) == 0)
  668. s->mcsel = get_bits1(&s->gb);
  669. else
  670. s->mcsel = 0;
  671. if ((cbpc & 16) == 0) {
  672. /* 16x16 motion prediction */
  673. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  674. if (!s->mcsel) {
  675. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  676. if (mx >= 0xffff)
  677. return -1;
  678. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  679. if (my >= 0xffff)
  680. return -1;
  681. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  682. MB_TYPE_L0;
  683. } else {
  684. mx = get_amv(ctx, 0);
  685. my = get_amv(ctx, 1);
  686. s->current_picture.mb_type[xy] = MB_TYPE_16x16 |
  687. MB_TYPE_GMC |
  688. MB_TYPE_L0;
  689. }
  690. mot_val[0] =
  691. mot_val[2] =
  692. mot_val[0 + stride] =
  693. mot_val[2 + stride] = mx;
  694. mot_val[1] =
  695. mot_val[3] =
  696. mot_val[1 + stride] =
  697. mot_val[3 + stride] = my;
  698. } else {
  699. int i;
  700. s->current_picture.mb_type[xy] = MB_TYPE_8x8 |
  701. MB_TYPE_L0;
  702. for (i = 0; i < 4; i++) {
  703. int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  704. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  705. if (mx >= 0xffff)
  706. return -1;
  707. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  708. if (my >= 0xffff)
  709. return -1;
  710. mot_val[0] = mx;
  711. mot_val[1] = my;
  712. }
  713. }
  714. }
  715. }
  716. }
  717. s->mb_x = 0;
  718. }
  719. return mb_num;
  720. }
  721. /**
  722. * decode second partition.
  723. * @return <0 if an error occurred
  724. */
  725. static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
  726. {
  727. int mb_num = 0;
  728. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  729. s->mb_x = s->resync_mb_x;
  730. s->first_slice_line = 1;
  731. for (s->mb_y = s->resync_mb_y; mb_num < mb_count; s->mb_y++) {
  732. ff_init_block_index(s);
  733. for (; mb_num < mb_count && s->mb_x < s->mb_width; s->mb_x++) {
  734. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  735. mb_num++;
  736. ff_update_block_index(s);
  737. if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1)
  738. s->first_slice_line = 0;
  739. if (s->pict_type == AV_PICTURE_TYPE_I) {
  740. int ac_pred = get_bits1(&s->gb);
  741. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  742. if (cbpy < 0) {
  743. av_log(s->avctx, AV_LOG_ERROR,
  744. "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  745. return -1;
  746. }
  747. s->cbp_table[xy] |= cbpy << 2;
  748. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  749. } else { /* P || S_TYPE */
  750. if (IS_INTRA(s->current_picture.mb_type[xy])) {
  751. int i;
  752. int dir = 0;
  753. int ac_pred = get_bits1(&s->gb);
  754. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  755. if (cbpy < 0) {
  756. av_log(s->avctx, AV_LOG_ERROR,
  757. "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  758. return -1;
  759. }
  760. if (s->cbp_table[xy] & 8)
  761. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  762. s->current_picture.qscale_table[xy] = s->qscale;
  763. for (i = 0; i < 6; i++) {
  764. int dc_pred_dir;
  765. int dc = mpeg4_decode_dc(s, i, &dc_pred_dir);
  766. if (dc < 0) {
  767. av_log(s->avctx, AV_LOG_ERROR,
  768. "DC corrupted at %d %d\n", s->mb_x, s->mb_y);
  769. return -1;
  770. }
  771. dir <<= 1;
  772. if (dc_pred_dir)
  773. dir |= 1;
  774. }
  775. s->cbp_table[xy] &= 3; // remove dquant
  776. s->cbp_table[xy] |= cbpy << 2;
  777. s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED;
  778. s->pred_dir_table[xy] = dir;
  779. } else if (IS_SKIP(s->current_picture.mb_type[xy])) {
  780. s->current_picture.qscale_table[xy] = s->qscale;
  781. s->cbp_table[xy] = 0;
  782. } else {
  783. int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  784. if (cbpy < 0) {
  785. av_log(s->avctx, AV_LOG_ERROR,
  786. "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y);
  787. return -1;
  788. }
  789. if (s->cbp_table[xy] & 8)
  790. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  791. s->current_picture.qscale_table[xy] = s->qscale;
  792. s->cbp_table[xy] &= 3; // remove dquant
  793. s->cbp_table[xy] |= (cbpy ^ 0xf) << 2;
  794. }
  795. }
  796. }
  797. if (mb_num >= mb_count)
  798. return 0;
  799. s->mb_x = 0;
  800. }
  801. return 0;
  802. }
  803. /**
  804. * Decode the first and second partition.
  805. * @return <0 if error (and sets error type in the error_status_table)
  806. */
  807. int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
  808. {
  809. MpegEncContext *s = &ctx->m;
  810. int mb_num;
  811. const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
  812. const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
  813. mb_num = mpeg4_decode_partition_a(ctx);
  814. if (mb_num <= 0) {
  815. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  816. s->mb_x, s->mb_y, part_a_error);
  817. return -1;
  818. }
  819. if (s->resync_mb_x + s->resync_mb_y * s->mb_width + mb_num > s->mb_num) {
  820. av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n");
  821. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  822. s->mb_x, s->mb_y, part_a_error);
  823. return -1;
  824. }
  825. s->mb_num_left = mb_num;
  826. if (s->pict_type == AV_PICTURE_TYPE_I) {
  827. while (show_bits(&s->gb, 9) == 1)
  828. skip_bits(&s->gb, 9);
  829. if (get_bits_long(&s->gb, 19) != DC_MARKER) {
  830. av_log(s->avctx, AV_LOG_ERROR,
  831. "marker missing after first I partition at %d %d\n",
  832. s->mb_x, s->mb_y);
  833. return -1;
  834. }
  835. } else {
  836. while (show_bits(&s->gb, 10) == 1)
  837. skip_bits(&s->gb, 10);
  838. if (get_bits(&s->gb, 17) != MOTION_MARKER) {
  839. av_log(s->avctx, AV_LOG_ERROR,
  840. "marker missing after first P partition at %d %d\n",
  841. s->mb_x, s->mb_y);
  842. return -1;
  843. }
  844. }
  845. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  846. s->mb_x - 1, s->mb_y, part_a_end);
  847. if (mpeg4_decode_partition_b(s, mb_num) < 0) {
  848. if (s->pict_type == AV_PICTURE_TYPE_P)
  849. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  850. s->mb_x, s->mb_y, ER_DC_ERROR);
  851. return -1;
  852. } else {
  853. if (s->pict_type == AV_PICTURE_TYPE_P)
  854. ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
  855. s->mb_x - 1, s->mb_y, ER_DC_END);
  856. }
  857. return 0;
  858. }
  859. /**
  860. * Decode a block.
  861. * @return <0 if an error occurred
  862. */
  863. static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
  864. int n, int coded, int intra, int rvlc)
  865. {
  866. MpegEncContext *s = &ctx->m;
  867. int level, i, last, run, qmul, qadd;
  868. int av_uninit(dc_pred_dir);
  869. RLTable *rl;
  870. RL_VLC_ELEM *rl_vlc;
  871. const uint8_t *scan_table;
  872. // Note intra & rvlc should be optimized away if this is inlined
  873. if (intra) {
  874. if (ctx->use_intra_dc_vlc) {
  875. /* DC coef */
  876. if (s->partitioned_frame) {
  877. level = s->dc_val[0][s->block_index[n]];
  878. if (n < 4)
  879. level = FASTDIV((level + (s->y_dc_scale >> 1)), s->y_dc_scale);
  880. else
  881. level = FASTDIV((level + (s->c_dc_scale >> 1)), s->c_dc_scale);
  882. dc_pred_dir = (s->pred_dir_table[s->mb_x + s->mb_y * s->mb_stride] << n) & 32;
  883. } else {
  884. level = mpeg4_decode_dc(s, n, &dc_pred_dir);
  885. if (level < 0)
  886. return -1;
  887. }
  888. block[0] = level;
  889. i = 0;
  890. } else {
  891. i = -1;
  892. ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0);
  893. }
  894. if (!coded)
  895. goto not_coded;
  896. if (rvlc) {
  897. rl = &ff_rvlc_rl_intra;
  898. rl_vlc = ff_rvlc_rl_intra.rl_vlc[0];
  899. } else {
  900. rl = &ff_mpeg4_rl_intra;
  901. rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0];
  902. }
  903. if (s->ac_pred) {
  904. if (dc_pred_dir == 0)
  905. scan_table = s->intra_v_scantable.permutated; /* left */
  906. else
  907. scan_table = s->intra_h_scantable.permutated; /* top */
  908. } else {
  909. scan_table = s->intra_scantable.permutated;
  910. }
  911. qmul = 1;
  912. qadd = 0;
  913. } else {
  914. i = -1;
  915. if (!coded) {
  916. s->block_last_index[n] = i;
  917. return 0;
  918. }
  919. if (rvlc)
  920. rl = &ff_rvlc_rl_inter;
  921. else
  922. rl = &ff_h263_rl_inter;
  923. scan_table = s->intra_scantable.permutated;
  924. if (s->mpeg_quant) {
  925. qmul = 1;
  926. qadd = 0;
  927. if (rvlc)
  928. rl_vlc = ff_rvlc_rl_inter.rl_vlc[0];
  929. else
  930. rl_vlc = ff_h263_rl_inter.rl_vlc[0];
  931. } else {
  932. qmul = s->qscale << 1;
  933. qadd = (s->qscale - 1) | 1;
  934. if (rvlc)
  935. rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale];
  936. else
  937. rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale];
  938. }
  939. }
  940. {
  941. OPEN_READER(re, &s->gb);
  942. for (;;) {
  943. UPDATE_CACHE(re, &s->gb);
  944. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0);
  945. if (level == 0) {
  946. /* escape */
  947. if (rvlc) {
  948. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  949. av_log(s->avctx, AV_LOG_ERROR,
  950. "1. marker bit missing in rvlc esc\n");
  951. return -1;
  952. }
  953. SKIP_CACHE(re, &s->gb, 1);
  954. last = SHOW_UBITS(re, &s->gb, 1);
  955. SKIP_CACHE(re, &s->gb, 1);
  956. run = SHOW_UBITS(re, &s->gb, 6);
  957. SKIP_COUNTER(re, &s->gb, 1 + 1 + 6);
  958. UPDATE_CACHE(re, &s->gb);
  959. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  960. av_log(s->avctx, AV_LOG_ERROR,
  961. "2. marker bit missing in rvlc esc\n");
  962. return -1;
  963. }
  964. SKIP_CACHE(re, &s->gb, 1);
  965. level = SHOW_UBITS(re, &s->gb, 11);
  966. SKIP_CACHE(re, &s->gb, 11);
  967. if (SHOW_UBITS(re, &s->gb, 5) != 0x10) {
  968. av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n");
  969. return -1;
  970. }
  971. SKIP_CACHE(re, &s->gb, 5);
  972. level = level * qmul + qadd;
  973. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  974. SKIP_COUNTER(re, &s->gb, 1 + 11 + 5 + 1);
  975. i += run + 1;
  976. if (last)
  977. i += 192;
  978. } else {
  979. int cache;
  980. cache = GET_CACHE(re, &s->gb);
  981. if (IS_3IV1)
  982. cache ^= 0xC0000000;
  983. if (cache & 0x80000000) {
  984. if (cache & 0x40000000) {
  985. /* third escape */
  986. SKIP_CACHE(re, &s->gb, 2);
  987. last = SHOW_UBITS(re, &s->gb, 1);
  988. SKIP_CACHE(re, &s->gb, 1);
  989. run = SHOW_UBITS(re, &s->gb, 6);
  990. SKIP_COUNTER(re, &s->gb, 2 + 1 + 6);
  991. UPDATE_CACHE(re, &s->gb);
  992. if (IS_3IV1) {
  993. level = SHOW_SBITS(re, &s->gb, 12);
  994. LAST_SKIP_BITS(re, &s->gb, 12);
  995. } else {
  996. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  997. av_log(s->avctx, AV_LOG_ERROR,
  998. "1. marker bit missing in 3. esc\n");
  999. if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR))
  1000. return -1;
  1001. }
  1002. SKIP_CACHE(re, &s->gb, 1);
  1003. level = SHOW_SBITS(re, &s->gb, 12);
  1004. SKIP_CACHE(re, &s->gb, 12);
  1005. if (SHOW_UBITS(re, &s->gb, 1) == 0) {
  1006. av_log(s->avctx, AV_LOG_ERROR,
  1007. "2. marker bit missing in 3. esc\n");
  1008. if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR))
  1009. return -1;
  1010. }
  1011. SKIP_COUNTER(re, &s->gb, 1 + 12 + 1);
  1012. }
  1013. #if 0
  1014. if (s->error_recognition >= FF_ER_COMPLIANT) {
  1015. const int abs_level= FFABS(level);
  1016. if (abs_level<=MAX_LEVEL && run<=MAX_RUN) {
  1017. const int run1= run - rl->max_run[last][abs_level] - 1;
  1018. if (abs_level <= rl->max_level[last][run]) {
  1019. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n");
  1020. return -1;
  1021. }
  1022. if (s->error_recognition > FF_ER_COMPLIANT) {
  1023. if (abs_level <= rl->max_level[last][run]*2) {
  1024. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n");
  1025. return -1;
  1026. }
  1027. if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) {
  1028. av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n");
  1029. return -1;
  1030. }
  1031. }
  1032. }
  1033. }
  1034. #endif
  1035. if (level > 0)
  1036. level = level * qmul + qadd;
  1037. else
  1038. level = level * qmul - qadd;
  1039. if ((unsigned)(level + 2048) > 4095) {
  1040. if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)) {
  1041. if (level > 2560 || level < -2560) {
  1042. av_log(s->avctx, AV_LOG_ERROR,
  1043. "|level| overflow in 3. esc, qp=%d\n",
  1044. s->qscale);
  1045. return -1;
  1046. }
  1047. }
  1048. level = level < 0 ? -2048 : 2047;
  1049. }
  1050. i += run + 1;
  1051. if (last)
  1052. i += 192;
  1053. } else {
  1054. /* second escape */
  1055. SKIP_BITS(re, &s->gb, 2);
  1056. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1057. i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing
  1058. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1059. LAST_SKIP_BITS(re, &s->gb, 1);
  1060. }
  1061. } else {
  1062. /* first escape */
  1063. SKIP_BITS(re, &s->gb, 1);
  1064. GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1);
  1065. i += run;
  1066. level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing
  1067. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1068. LAST_SKIP_BITS(re, &s->gb, 1);
  1069. }
  1070. }
  1071. } else {
  1072. i += run;
  1073. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1074. LAST_SKIP_BITS(re, &s->gb, 1);
  1075. }
  1076. ff_tlog(s->avctx, "dct[%d][%d] = %- 4d end?:%d\n", scan_table[i&63]&7, scan_table[i&63] >> 3, level, i>62);
  1077. if (i > 62) {
  1078. i -= 192;
  1079. if (i & (~63)) {
  1080. av_log(s->avctx, AV_LOG_ERROR,
  1081. "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1082. return -1;
  1083. }
  1084. block[scan_table[i]] = level;
  1085. break;
  1086. }
  1087. block[scan_table[i]] = level;
  1088. }
  1089. CLOSE_READER(re, &s->gb);
  1090. }
  1091. not_coded:
  1092. if (intra) {
  1093. if (!ctx->use_intra_dc_vlc) {
  1094. block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0);
  1095. i -= i >> 31; // if (i == -1) i = 0;
  1096. }
  1097. ff_mpeg4_pred_ac(s, block, n, dc_pred_dir);
  1098. if (s->ac_pred)
  1099. i = 63; // FIXME not optimal
  1100. }
  1101. s->block_last_index[n] = i;
  1102. return 0;
  1103. }
  1104. /**
  1105. * decode partition C of one MB.
  1106. * @return <0 if an error occurred
  1107. */
  1108. static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
  1109. {
  1110. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1111. int cbp, mb_type;
  1112. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1113. mb_type = s->current_picture.mb_type[xy];
  1114. cbp = s->cbp_table[xy];
  1115. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1116. if (s->current_picture.qscale_table[xy] != s->qscale)
  1117. ff_set_qscale(s, s->current_picture.qscale_table[xy]);
  1118. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1119. s->pict_type == AV_PICTURE_TYPE_S) {
  1120. int i;
  1121. for (i = 0; i < 4; i++) {
  1122. s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
  1123. s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
  1124. }
  1125. s->mb_intra = IS_INTRA(mb_type);
  1126. if (IS_SKIP(mb_type)) {
  1127. /* skip mb */
  1128. for (i = 0; i < 6; i++)
  1129. s->block_last_index[i] = -1;
  1130. s->mv_dir = MV_DIR_FORWARD;
  1131. s->mv_type = MV_TYPE_16X16;
  1132. if (s->pict_type == AV_PICTURE_TYPE_S
  1133. && ctx->vol_sprite_usage == GMC_SPRITE) {
  1134. s->mcsel = 1;
  1135. s->mb_skipped = 0;
  1136. } else {
  1137. s->mcsel = 0;
  1138. s->mb_skipped = 1;
  1139. }
  1140. } else if (s->mb_intra) {
  1141. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1142. } else if (!s->mb_intra) {
  1143. // s->mcsel = 0; // FIXME do we need to init that?
  1144. s->mv_dir = MV_DIR_FORWARD;
  1145. if (IS_8X8(mb_type)) {
  1146. s->mv_type = MV_TYPE_8X8;
  1147. } else {
  1148. s->mv_type = MV_TYPE_16X16;
  1149. }
  1150. }
  1151. } else { /* I-Frame */
  1152. s->mb_intra = 1;
  1153. s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]);
  1154. }
  1155. if (!IS_SKIP(mb_type)) {
  1156. int i;
  1157. s->bdsp.clear_blocks(s->block[0]);
  1158. /* decode each block */
  1159. for (i = 0; i < 6; i++) {
  1160. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, s->mb_intra, ctx->rvlc) < 0) {
  1161. av_log(s->avctx, AV_LOG_ERROR,
  1162. "texture corrupted at %d %d %d\n",
  1163. s->mb_x, s->mb_y, s->mb_intra);
  1164. return -1;
  1165. }
  1166. cbp += cbp;
  1167. }
  1168. }
  1169. /* per-MB end of slice check */
  1170. if (--s->mb_num_left <= 0) {
  1171. if (mpeg4_is_resync(ctx))
  1172. return SLICE_END;
  1173. else
  1174. return SLICE_NOEND;
  1175. } else {
  1176. if (mpeg4_is_resync(ctx)) {
  1177. const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1;
  1178. if (s->cbp_table[xy + delta])
  1179. return SLICE_END;
  1180. }
  1181. return SLICE_OK;
  1182. }
  1183. }
  1184. static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
  1185. {
  1186. Mpeg4DecContext *ctx = (Mpeg4DecContext *)s;
  1187. int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
  1188. int16_t *mot_val;
  1189. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1190. const int xy = s->mb_x + s->mb_y * s->mb_stride;
  1191. av_assert2(s->h263_pred);
  1192. if (s->pict_type == AV_PICTURE_TYPE_P ||
  1193. s->pict_type == AV_PICTURE_TYPE_S) {
  1194. do {
  1195. if (get_bits1(&s->gb)) {
  1196. /* skip mb */
  1197. s->mb_intra = 0;
  1198. for (i = 0; i < 6; i++)
  1199. s->block_last_index[i] = -1;
  1200. s->mv_dir = MV_DIR_FORWARD;
  1201. s->mv_type = MV_TYPE_16X16;
  1202. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1203. ctx->vol_sprite_usage == GMC_SPRITE) {
  1204. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1205. MB_TYPE_GMC |
  1206. MB_TYPE_16x16 |
  1207. MB_TYPE_L0;
  1208. s->mcsel = 1;
  1209. s->mv[0][0][0] = get_amv(ctx, 0);
  1210. s->mv[0][0][1] = get_amv(ctx, 1);
  1211. s->mb_skipped = 0;
  1212. } else {
  1213. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1214. MB_TYPE_16x16 |
  1215. MB_TYPE_L0;
  1216. s->mcsel = 0;
  1217. s->mv[0][0][0] = 0;
  1218. s->mv[0][0][1] = 0;
  1219. s->mb_skipped = 1;
  1220. }
  1221. goto end;
  1222. }
  1223. cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  1224. if (cbpc < 0) {
  1225. av_log(s->avctx, AV_LOG_ERROR,
  1226. "mcbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1227. return -1;
  1228. }
  1229. } while (cbpc == 20);
  1230. s->bdsp.clear_blocks(s->block[0]);
  1231. dquant = cbpc & 8;
  1232. s->mb_intra = ((cbpc & 4) != 0);
  1233. if (s->mb_intra)
  1234. goto intra;
  1235. if (s->pict_type == AV_PICTURE_TYPE_S &&
  1236. ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0)
  1237. s->mcsel = get_bits1(&s->gb);
  1238. else
  1239. s->mcsel = 0;
  1240. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
  1241. if (cbpy < 0) {
  1242. av_log(s->avctx, AV_LOG_ERROR,
  1243. "P cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1244. return AVERROR_INVALIDDATA;
  1245. }
  1246. cbp = (cbpc & 3) | (cbpy << 2);
  1247. if (dquant)
  1248. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1249. if ((!s->progressive_sequence) &&
  1250. (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE)))
  1251. s->interlaced_dct = get_bits1(&s->gb);
  1252. s->mv_dir = MV_DIR_FORWARD;
  1253. if ((cbpc & 16) == 0) {
  1254. if (s->mcsel) {
  1255. s->current_picture.mb_type[xy] = MB_TYPE_GMC |
  1256. MB_TYPE_16x16 |
  1257. MB_TYPE_L0;
  1258. /* 16x16 global motion prediction */
  1259. s->mv_type = MV_TYPE_16X16;
  1260. mx = get_amv(ctx, 0);
  1261. my = get_amv(ctx, 1);
  1262. s->mv[0][0][0] = mx;
  1263. s->mv[0][0][1] = my;
  1264. } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
  1265. s->current_picture.mb_type[xy] = MB_TYPE_16x8 |
  1266. MB_TYPE_L0 |
  1267. MB_TYPE_INTERLACED;
  1268. /* 16x8 field motion prediction */
  1269. s->mv_type = MV_TYPE_FIELD;
  1270. s->field_select[0][0] = get_bits1(&s->gb);
  1271. s->field_select[0][1] = get_bits1(&s->gb);
  1272. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1273. for (i = 0; i < 2; i++) {
  1274. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1275. if (mx >= 0xffff)
  1276. return -1;
  1277. my = ff_h263_decode_motion(s, pred_y / 2, s->f_code);
  1278. if (my >= 0xffff)
  1279. return -1;
  1280. s->mv[0][i][0] = mx;
  1281. s->mv[0][i][1] = my;
  1282. }
  1283. } else {
  1284. s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  1285. /* 16x16 motion prediction */
  1286. s->mv_type = MV_TYPE_16X16;
  1287. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1288. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1289. if (mx >= 0xffff)
  1290. return -1;
  1291. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1292. if (my >= 0xffff)
  1293. return -1;
  1294. s->mv[0][0][0] = mx;
  1295. s->mv[0][0][1] = my;
  1296. }
  1297. } else {
  1298. s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
  1299. s->mv_type = MV_TYPE_8X8;
  1300. for (i = 0; i < 4; i++) {
  1301. mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  1302. mx = ff_h263_decode_motion(s, pred_x, s->f_code);
  1303. if (mx >= 0xffff)
  1304. return -1;
  1305. my = ff_h263_decode_motion(s, pred_y, s->f_code);
  1306. if (my >= 0xffff)
  1307. return -1;
  1308. s->mv[0][i][0] = mx;
  1309. s->mv[0][i][1] = my;
  1310. mot_val[0] = mx;
  1311. mot_val[1] = my;
  1312. }
  1313. }
  1314. } else if (s->pict_type == AV_PICTURE_TYPE_B) {
  1315. int modb1; // first bit of modb
  1316. int modb2; // second bit of modb
  1317. int mb_type;
  1318. s->mb_intra = 0; // B-frames never contain intra blocks
  1319. s->mcsel = 0; // ... true gmc blocks
  1320. if (s->mb_x == 0) {
  1321. for (i = 0; i < 2; i++) {
  1322. s->last_mv[i][0][0] =
  1323. s->last_mv[i][0][1] =
  1324. s->last_mv[i][1][0] =
  1325. s->last_mv[i][1][1] = 0;
  1326. }
  1327. ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0);
  1328. }
  1329. /* if we skipped it in the future P-frame than skip it now too */
  1330. s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC
  1331. if (s->mb_skipped) {
  1332. /* skip mb */
  1333. for (i = 0; i < 6; i++)
  1334. s->block_last_index[i] = -1;
  1335. s->mv_dir = MV_DIR_FORWARD;
  1336. s->mv_type = MV_TYPE_16X16;
  1337. s->mv[0][0][0] =
  1338. s->mv[0][0][1] =
  1339. s->mv[1][0][0] =
  1340. s->mv[1][0][1] = 0;
  1341. s->current_picture.mb_type[xy] = MB_TYPE_SKIP |
  1342. MB_TYPE_16x16 |
  1343. MB_TYPE_L0;
  1344. goto end;
  1345. }
  1346. modb1 = get_bits1(&s->gb);
  1347. if (modb1) {
  1348. // like MB_TYPE_B_DIRECT but no vectors coded
  1349. mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1;
  1350. cbp = 0;
  1351. } else {
  1352. modb2 = get_bits1(&s->gb);
  1353. mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1);
  1354. if (mb_type < 0) {
  1355. av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n");
  1356. return -1;
  1357. }
  1358. mb_type = mb_type_b_map[mb_type];
  1359. if (modb2) {
  1360. cbp = 0;
  1361. } else {
  1362. s->bdsp.clear_blocks(s->block[0]);
  1363. cbp = get_bits(&s->gb, 6);
  1364. }
  1365. if ((!IS_DIRECT(mb_type)) && cbp) {
  1366. if (get_bits1(&s->gb))
  1367. ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2);
  1368. }
  1369. if (!s->progressive_sequence) {
  1370. if (cbp)
  1371. s->interlaced_dct = get_bits1(&s->gb);
  1372. if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) {
  1373. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1374. mb_type &= ~MB_TYPE_16x16;
  1375. if (USES_LIST(mb_type, 0)) {
  1376. s->field_select[0][0] = get_bits1(&s->gb);
  1377. s->field_select[0][1] = get_bits1(&s->gb);
  1378. }
  1379. if (USES_LIST(mb_type, 1)) {
  1380. s->field_select[1][0] = get_bits1(&s->gb);
  1381. s->field_select[1][1] = get_bits1(&s->gb);
  1382. }
  1383. }
  1384. }
  1385. s->mv_dir = 0;
  1386. if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) {
  1387. s->mv_type = MV_TYPE_16X16;
  1388. if (USES_LIST(mb_type, 0)) {
  1389. s->mv_dir = MV_DIR_FORWARD;
  1390. mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
  1391. my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
  1392. s->last_mv[0][1][0] =
  1393. s->last_mv[0][0][0] =
  1394. s->mv[0][0][0] = mx;
  1395. s->last_mv[0][1][1] =
  1396. s->last_mv[0][0][1] =
  1397. s->mv[0][0][1] = my;
  1398. }
  1399. if (USES_LIST(mb_type, 1)) {
  1400. s->mv_dir |= MV_DIR_BACKWARD;
  1401. mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
  1402. my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
  1403. s->last_mv[1][1][0] =
  1404. s->last_mv[1][0][0] =
  1405. s->mv[1][0][0] = mx;
  1406. s->last_mv[1][1][1] =
  1407. s->last_mv[1][0][1] =
  1408. s->mv[1][0][1] = my;
  1409. }
  1410. } else if (!IS_DIRECT(mb_type)) {
  1411. s->mv_type = MV_TYPE_FIELD;
  1412. if (USES_LIST(mb_type, 0)) {
  1413. s->mv_dir = MV_DIR_FORWARD;
  1414. for (i = 0; i < 2; i++) {
  1415. mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code);
  1416. my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code);
  1417. s->last_mv[0][i][0] =
  1418. s->mv[0][i][0] = mx;
  1419. s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2;
  1420. }
  1421. }
  1422. if (USES_LIST(mb_type, 1)) {
  1423. s->mv_dir |= MV_DIR_BACKWARD;
  1424. for (i = 0; i < 2; i++) {
  1425. mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code);
  1426. my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code);
  1427. s->last_mv[1][i][0] =
  1428. s->mv[1][i][0] = mx;
  1429. s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2;
  1430. }
  1431. }
  1432. }
  1433. }
  1434. if (IS_DIRECT(mb_type)) {
  1435. if (IS_SKIP(mb_type)) {
  1436. mx =
  1437. my = 0;
  1438. } else {
  1439. mx = ff_h263_decode_motion(s, 0, 1);
  1440. my = ff_h263_decode_motion(s, 0, 1);
  1441. }
  1442. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
  1443. mb_type |= ff_mpeg4_set_direct_mv(s, mx, my);
  1444. }
  1445. s->current_picture.mb_type[xy] = mb_type;
  1446. } else { /* I-Frame */
  1447. do {
  1448. cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  1449. if (cbpc < 0) {
  1450. av_log(s->avctx, AV_LOG_ERROR,
  1451. "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1452. return -1;
  1453. }
  1454. } while (cbpc == 8);
  1455. dquant = cbpc & 4;
  1456. s->mb_intra = 1;
  1457. intra:
  1458. s->ac_pred = get_bits1(&s->gb);
  1459. if (s->ac_pred)
  1460. s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
  1461. else
  1462. s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
  1463. cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1464. if (cbpy < 0) {
  1465. av_log(s->avctx, AV_LOG_ERROR,
  1466. "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1467. return -1;
  1468. }
  1469. cbp = (cbpc & 3) | (cbpy << 2);
  1470. ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold;
  1471. if (dquant)
  1472. ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]);
  1473. if (!s->progressive_sequence)
  1474. s->interlaced_dct = get_bits1(&s->gb);
  1475. s->bdsp.clear_blocks(s->block[0]);
  1476. /* decode each block */
  1477. for (i = 0; i < 6; i++) {
  1478. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0)
  1479. return -1;
  1480. cbp += cbp;
  1481. }
  1482. goto end;
  1483. }
  1484. /* decode each block */
  1485. for (i = 0; i < 6; i++) {
  1486. if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0)
  1487. return -1;
  1488. cbp += cbp;
  1489. }
  1490. end:
  1491. /* per-MB end of slice check */
  1492. if (s->codec_id == AV_CODEC_ID_MPEG4) {
  1493. int next = mpeg4_is_resync(ctx);
  1494. if (next) {
  1495. if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
  1496. return -1;
  1497. } else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next)
  1498. return SLICE_END;
  1499. if (s->pict_type == AV_PICTURE_TYPE_B) {
  1500. const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
  1501. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1502. (s->mb_x + delta >= s->mb_width)
  1503. ? FFMIN(s->mb_y + 1, s->mb_height - 1)
  1504. : s->mb_y, 0);
  1505. if (s->next_picture.mbskip_table[xy + delta])
  1506. return SLICE_OK;
  1507. }
  1508. return SLICE_END;
  1509. }
  1510. }
  1511. return SLICE_OK;
  1512. }
  1513. static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
  1514. {
  1515. int hours, minutes, seconds;
  1516. if (!show_bits(gb, 23)) {
  1517. av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n");
  1518. return -1;
  1519. }
  1520. hours = get_bits(gb, 5);
  1521. minutes = get_bits(gb, 6);
  1522. check_marker(s->avctx, gb, "in gop_header");
  1523. seconds = get_bits(gb, 6);
  1524. s->time_base = seconds + 60*(minutes + 60*hours);
  1525. skip_bits1(gb);
  1526. skip_bits1(gb);
  1527. return 0;
  1528. }
  1529. static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb)
  1530. {
  1531. s->avctx->profile = get_bits(gb, 4);
  1532. s->avctx->level = get_bits(gb, 4);
  1533. // for Simple profile, level 0
  1534. if (s->avctx->profile == 0 && s->avctx->level == 8) {
  1535. s->avctx->level = 0;
  1536. }
  1537. return 0;
  1538. }
  1539. static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  1540. {
  1541. MpegEncContext *s = &ctx->m;
  1542. int width, height, vo_ver_id;
  1543. /* vol header */
  1544. skip_bits(gb, 1); /* random access */
  1545. s->vo_type = get_bits(gb, 8);
  1546. if (get_bits1(gb) != 0) { /* is_ol_id */
  1547. vo_ver_id = get_bits(gb, 4); /* vo_ver_id */
  1548. skip_bits(gb, 3); /* vo_priority */
  1549. } else {
  1550. vo_ver_id = 1;
  1551. }
  1552. s->aspect_ratio_info = get_bits(gb, 4);
  1553. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  1554. s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width
  1555. s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height
  1556. } else {
  1557. s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info];
  1558. }
  1559. if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */
  1560. int chroma_format = get_bits(gb, 2);
  1561. if (chroma_format != CHROMA_420)
  1562. av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n");
  1563. s->low_delay = get_bits1(gb);
  1564. if (get_bits1(gb)) { /* vbv parameters */
  1565. get_bits(gb, 15); /* first_half_bitrate */
  1566. check_marker(s->avctx, gb, "after first_half_bitrate");
  1567. get_bits(gb, 15); /* latter_half_bitrate */
  1568. check_marker(s->avctx, gb, "after latter_half_bitrate");
  1569. get_bits(gb, 15); /* first_half_vbv_buffer_size */
  1570. check_marker(s->avctx, gb, "after first_half_vbv_buffer_size");
  1571. get_bits(gb, 3); /* latter_half_vbv_buffer_size */
  1572. get_bits(gb, 11); /* first_half_vbv_occupancy */
  1573. check_marker(s->avctx, gb, "after first_half_vbv_occupancy");
  1574. get_bits(gb, 15); /* latter_half_vbv_occupancy */
  1575. check_marker(s->avctx, gb, "after latter_half_vbv_occupancy");
  1576. }
  1577. } else {
  1578. /* is setting low delay flag only once the smartest thing to do?
  1579. * low delay detection will not be overridden. */
  1580. if (s->picture_number == 0) {
  1581. switch(s->vo_type) {
  1582. case SIMPLE_VO_TYPE:
  1583. case ADV_SIMPLE_VO_TYPE:
  1584. s->low_delay = 1;
  1585. break;
  1586. default:
  1587. s->low_delay = 0;
  1588. }
  1589. }
  1590. }
  1591. ctx->shape = get_bits(gb, 2); /* vol shape */
  1592. if (ctx->shape != RECT_SHAPE)
  1593. av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n");
  1594. if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) {
  1595. av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n");
  1596. skip_bits(gb, 4); /* video_object_layer_shape_extension */
  1597. }
  1598. check_marker(s->avctx, gb, "before time_increment_resolution");
  1599. s->avctx->framerate.num = get_bits(gb, 16);
  1600. if (!s->avctx->framerate.num) {
  1601. av_log(s->avctx, AV_LOG_ERROR, "framerate==0\n");
  1602. return AVERROR_INVALIDDATA;
  1603. }
  1604. ctx->time_increment_bits = av_log2(s->avctx->framerate.num - 1) + 1;
  1605. if (ctx->time_increment_bits < 1)
  1606. ctx->time_increment_bits = 1;
  1607. check_marker(s->avctx, gb, "before fixed_vop_rate");
  1608. if (get_bits1(gb) != 0) /* fixed_vop_rate */
  1609. s->avctx->framerate.den = get_bits(gb, ctx->time_increment_bits);
  1610. else
  1611. s->avctx->framerate.den = 1;
  1612. s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1}));
  1613. ctx->t_frame = 0;
  1614. if (ctx->shape != BIN_ONLY_SHAPE) {
  1615. if (ctx->shape == RECT_SHAPE) {
  1616. check_marker(s->avctx, gb, "before width");
  1617. width = get_bits(gb, 13);
  1618. check_marker(s->avctx, gb, "before height");
  1619. height = get_bits(gb, 13);
  1620. check_marker(s->avctx, gb, "after height");
  1621. if (width && height && /* they should be non zero but who knows */
  1622. !(s->width && s->codec_tag == AV_RL32("MP4S"))) {
  1623. if (s->width && s->height &&
  1624. (s->width != width || s->height != height))
  1625. s->context_reinit = 1;
  1626. s->width = width;
  1627. s->height = height;
  1628. }
  1629. }
  1630. s->progressive_sequence =
  1631. s->progressive_frame = get_bits1(gb) ^ 1;
  1632. s->interlaced_dct = 0;
  1633. if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO))
  1634. av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */
  1635. "MPEG-4 OBMC not supported (very likely buggy encoder)\n");
  1636. if (vo_ver_id == 1)
  1637. ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */
  1638. else
  1639. ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */
  1640. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1641. av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n");
  1642. if (ctx->vol_sprite_usage == STATIC_SPRITE ||
  1643. ctx->vol_sprite_usage == GMC_SPRITE) {
  1644. if (ctx->vol_sprite_usage == STATIC_SPRITE) {
  1645. skip_bits(gb, 13); // sprite_width
  1646. check_marker(s->avctx, gb, "after sprite_width");
  1647. skip_bits(gb, 13); // sprite_height
  1648. check_marker(s->avctx, gb, "after sprite_height");
  1649. skip_bits(gb, 13); // sprite_left
  1650. check_marker(s->avctx, gb, "after sprite_left");
  1651. skip_bits(gb, 13); // sprite_top
  1652. check_marker(s->avctx, gb, "after sprite_top");
  1653. }
  1654. ctx->num_sprite_warping_points = get_bits(gb, 6);
  1655. if (ctx->num_sprite_warping_points > 3) {
  1656. av_log(s->avctx, AV_LOG_ERROR,
  1657. "%d sprite_warping_points\n",
  1658. ctx->num_sprite_warping_points);
  1659. ctx->num_sprite_warping_points = 0;
  1660. return AVERROR_INVALIDDATA;
  1661. }
  1662. s->sprite_warping_accuracy = get_bits(gb, 2);
  1663. ctx->sprite_brightness_change = get_bits1(gb);
  1664. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  1665. skip_bits1(gb); // low_latency_sprite
  1666. }
  1667. // FIXME sadct disable bit if verid!=1 && shape not rect
  1668. if (get_bits1(gb) == 1) { /* not_8_bit */
  1669. s->quant_precision = get_bits(gb, 4); /* quant_precision */
  1670. if (get_bits(gb, 4) != 8) /* bits_per_pixel */
  1671. av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n");
  1672. if (s->quant_precision != 5)
  1673. av_log(s->avctx, AV_LOG_ERROR,
  1674. "quant precision %d\n", s->quant_precision);
  1675. if (s->quant_precision<3 || s->quant_precision>9) {
  1676. s->quant_precision = 5;
  1677. }
  1678. } else {
  1679. s->quant_precision = 5;
  1680. }
  1681. // FIXME a bunch of grayscale shape things
  1682. if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */
  1683. int i, v;
  1684. /* load default matrixes */
  1685. for (i = 0; i < 64; i++) {
  1686. int j = s->idsp.idct_permutation[i];
  1687. v = ff_mpeg4_default_intra_matrix[i];
  1688. s->intra_matrix[j] = v;
  1689. s->chroma_intra_matrix[j] = v;
  1690. v = ff_mpeg4_default_non_intra_matrix[i];
  1691. s->inter_matrix[j] = v;
  1692. s->chroma_inter_matrix[j] = v;
  1693. }
  1694. /* load custom intra matrix */
  1695. if (get_bits1(gb)) {
  1696. int last = 0;
  1697. for (i = 0; i < 64; i++) {
  1698. int j;
  1699. if (get_bits_left(gb) < 8) {
  1700. av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
  1701. return AVERROR_INVALIDDATA;
  1702. }
  1703. v = get_bits(gb, 8);
  1704. if (v == 0)
  1705. break;
  1706. last = v;
  1707. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1708. s->intra_matrix[j] = last;
  1709. s->chroma_intra_matrix[j] = last;
  1710. }
  1711. /* replicate last value */
  1712. for (; i < 64; i++) {
  1713. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1714. s->intra_matrix[j] = last;
  1715. s->chroma_intra_matrix[j] = last;
  1716. }
  1717. }
  1718. /* load custom non intra matrix */
  1719. if (get_bits1(gb)) {
  1720. int last = 0;
  1721. for (i = 0; i < 64; i++) {
  1722. int j;
  1723. if (get_bits_left(gb) < 8) {
  1724. av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
  1725. return AVERROR_INVALIDDATA;
  1726. }
  1727. v = get_bits(gb, 8);
  1728. if (v == 0)
  1729. break;
  1730. last = v;
  1731. j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1732. s->inter_matrix[j] = v;
  1733. s->chroma_inter_matrix[j] = v;
  1734. }
  1735. /* replicate last value */
  1736. for (; i < 64; i++) {
  1737. int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  1738. s->inter_matrix[j] = last;
  1739. s->chroma_inter_matrix[j] = last;
  1740. }
  1741. }
  1742. // FIXME a bunch of grayscale shape things
  1743. }
  1744. if (vo_ver_id != 1)
  1745. s->quarter_sample = get_bits1(gb);
  1746. else
  1747. s->quarter_sample = 0;
  1748. if (get_bits_left(gb) < 4) {
  1749. av_log(s->avctx, AV_LOG_ERROR, "VOL Header truncated\n");
  1750. return AVERROR_INVALIDDATA;
  1751. }
  1752. if (!get_bits1(gb)) {
  1753. int pos = get_bits_count(gb);
  1754. int estimation_method = get_bits(gb, 2);
  1755. if (estimation_method < 2) {
  1756. if (!get_bits1(gb)) {
  1757. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */
  1758. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */
  1759. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */
  1760. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */
  1761. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */
  1762. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */
  1763. }
  1764. if (!get_bits1(gb)) {
  1765. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */
  1766. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */
  1767. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */
  1768. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */
  1769. }
  1770. if (!check_marker(s->avctx, gb, "in complexity estimation part 1")) {
  1771. skip_bits_long(gb, pos - get_bits_count(gb));
  1772. goto no_cplx_est;
  1773. }
  1774. if (!get_bits1(gb)) {
  1775. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */
  1776. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */
  1777. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */
  1778. ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */
  1779. }
  1780. if (!get_bits1(gb)) {
  1781. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */
  1782. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */
  1783. ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */
  1784. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */
  1785. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */
  1786. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */
  1787. }
  1788. if (!check_marker(s->avctx, gb, "in complexity estimation part 2")) {
  1789. skip_bits_long(gb, pos - get_bits_count(gb));
  1790. goto no_cplx_est;
  1791. }
  1792. if (estimation_method == 1) {
  1793. ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */
  1794. ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */
  1795. }
  1796. } else
  1797. av_log(s->avctx, AV_LOG_ERROR,
  1798. "Invalid Complexity estimation method %d\n",
  1799. estimation_method);
  1800. } else {
  1801. no_cplx_est:
  1802. ctx->cplx_estimation_trash_i =
  1803. ctx->cplx_estimation_trash_p =
  1804. ctx->cplx_estimation_trash_b = 0;
  1805. }
  1806. ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */
  1807. s->data_partitioning = get_bits1(gb);
  1808. if (s->data_partitioning)
  1809. ctx->rvlc = get_bits1(gb);
  1810. if (vo_ver_id != 1) {
  1811. ctx->new_pred = get_bits1(gb);
  1812. if (ctx->new_pred) {
  1813. av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n");
  1814. skip_bits(gb, 2); /* requested upstream message type */
  1815. skip_bits1(gb); /* newpred segment type */
  1816. }
  1817. if (get_bits1(gb)) // reduced_res_vop
  1818. av_log(s->avctx, AV_LOG_ERROR,
  1819. "reduced resolution VOP not supported\n");
  1820. } else {
  1821. ctx->new_pred = 0;
  1822. }
  1823. ctx->scalability = get_bits1(gb);
  1824. if (ctx->scalability) {
  1825. GetBitContext bak = *gb;
  1826. int h_sampling_factor_n;
  1827. int h_sampling_factor_m;
  1828. int v_sampling_factor_n;
  1829. int v_sampling_factor_m;
  1830. skip_bits1(gb); // hierarchy_type
  1831. skip_bits(gb, 4); /* ref_layer_id */
  1832. skip_bits1(gb); /* ref_layer_sampling_dir */
  1833. h_sampling_factor_n = get_bits(gb, 5);
  1834. h_sampling_factor_m = get_bits(gb, 5);
  1835. v_sampling_factor_n = get_bits(gb, 5);
  1836. v_sampling_factor_m = get_bits(gb, 5);
  1837. ctx->enhancement_type = get_bits1(gb);
  1838. if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 ||
  1839. v_sampling_factor_n == 0 || v_sampling_factor_m == 0) {
  1840. /* illegal scalability header (VERY broken encoder),
  1841. * trying to workaround */
  1842. ctx->scalability = 0;
  1843. *gb = bak;
  1844. } else
  1845. av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n");
  1846. // bin shape stuff FIXME
  1847. }
  1848. }
  1849. if (s->avctx->debug&FF_DEBUG_PICT_INFO) {
  1850. av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n",
  1851. s->avctx->framerate.den, s->avctx->framerate.num,
  1852. ctx->time_increment_bits,
  1853. s->quant_precision,
  1854. s->progressive_sequence,
  1855. s->low_delay,
  1856. ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "",
  1857. s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : ""
  1858. );
  1859. }
  1860. return 0;
  1861. }
  1862. /**
  1863. * Decode the user data stuff in the header.
  1864. * Also initializes divx/xvid/lavc_version/build.
  1865. */
  1866. static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
  1867. {
  1868. MpegEncContext *s = &ctx->m;
  1869. char buf[256];
  1870. int i;
  1871. int e;
  1872. int ver = 0, build = 0, ver2 = 0, ver3 = 0;
  1873. char last;
  1874. for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) {
  1875. if (show_bits(gb, 23) == 0)
  1876. break;
  1877. buf[i] = get_bits(gb, 8);
  1878. }
  1879. buf[i] = 0;
  1880. /* divx detection */
  1881. e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
  1882. if (e < 2)
  1883. e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
  1884. if (e >= 2) {
  1885. ctx->divx_version = ver;
  1886. ctx->divx_build = build;
  1887. s->divx_packed = e == 3 && last == 'p';
  1888. }
  1889. /* libavcodec detection */
  1890. e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
  1891. if (e != 4)
  1892. e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
  1893. if (e != 4) {
  1894. e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1;
  1895. if (e > 1)
  1896. build = (ver << 16) + (ver2 << 8) + ver3;
  1897. }
  1898. if (e != 4) {
  1899. if (strcmp(buf, "ffmpeg") == 0)
  1900. ctx->lavc_build = 4600;
  1901. }
  1902. if (e == 4)
  1903. ctx->lavc_build = build;
  1904. /* Xvid detection */
  1905. e = sscanf(buf, "XviD%d", &build);
  1906. if (e == 1)
  1907. ctx->xvid_build = build;
  1908. return 0;
  1909. }
  1910. int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
  1911. {
  1912. Mpeg4DecContext *ctx = avctx->priv_data;
  1913. MpegEncContext *s = &ctx->m;
  1914. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
  1915. if (s->codec_tag == AV_RL32("XVID") ||
  1916. s->codec_tag == AV_RL32("XVIX") ||
  1917. s->codec_tag == AV_RL32("RMP4") ||
  1918. s->codec_tag == AV_RL32("ZMP4") ||
  1919. s->codec_tag == AV_RL32("SIPP"))
  1920. ctx->xvid_build = 0;
  1921. }
  1922. if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1)
  1923. if (s->codec_tag == AV_RL32("DIVX") && s->vo_type == 0 &&
  1924. ctx->vol_control_parameters == 0)
  1925. ctx->divx_version = 400; // divx 4
  1926. if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) {
  1927. ctx->divx_version =
  1928. ctx->divx_build = -1;
  1929. }
  1930. if (s->workaround_bugs & FF_BUG_AUTODETECT) {
  1931. if (s->codec_tag == AV_RL32("XVIX"))
  1932. s->workaround_bugs |= FF_BUG_XVID_ILACE;
  1933. if (s->codec_tag == AV_RL32("UMP4"))
  1934. s->workaround_bugs |= FF_BUG_UMP4;
  1935. if (ctx->divx_version >= 500 && ctx->divx_build < 1814)
  1936. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  1937. if (ctx->divx_version > 502 && ctx->divx_build < 1814)
  1938. s->workaround_bugs |= FF_BUG_QPEL_CHROMA2;
  1939. if (ctx->xvid_build <= 3U)
  1940. s->padding_bug_score = 256 * 256 * 256 * 64;
  1941. if (ctx->xvid_build <= 1U)
  1942. s->workaround_bugs |= FF_BUG_QPEL_CHROMA;
  1943. if (ctx->xvid_build <= 12U)
  1944. s->workaround_bugs |= FF_BUG_EDGE;
  1945. if (ctx->xvid_build <= 32U)
  1946. s->workaround_bugs |= FF_BUG_DC_CLIP;
  1947. #define SET_QPEL_FUNC(postfix1, postfix2) \
  1948. s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \
  1949. s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \
  1950. s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2;
  1951. if (ctx->lavc_build < 4653U)
  1952. s->workaround_bugs |= FF_BUG_STD_QPEL;
  1953. if (ctx->lavc_build < 4655U)
  1954. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  1955. if (ctx->lavc_build < 4670U)
  1956. s->workaround_bugs |= FF_BUG_EDGE;
  1957. if (ctx->lavc_build <= 4712U)
  1958. s->workaround_bugs |= FF_BUG_DC_CLIP;
  1959. if ((ctx->lavc_build&0xFF) >= 100) {
  1960. if (ctx->lavc_build > 3621476 && ctx->lavc_build < 3752552 &&
  1961. (ctx->lavc_build < 3752037 || ctx->lavc_build > 3752191) // 3.2.1+
  1962. )
  1963. s->workaround_bugs |= FF_BUG_IEDGE;
  1964. }
  1965. if (ctx->divx_version >= 0)
  1966. s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE;
  1967. if (ctx->divx_version == 501 && ctx->divx_build == 20020416)
  1968. s->padding_bug_score = 256 * 256 * 256 * 64;
  1969. if (ctx->divx_version < 500U)
  1970. s->workaround_bugs |= FF_BUG_EDGE;
  1971. if (ctx->divx_version >= 0)
  1972. s->workaround_bugs |= FF_BUG_HPEL_CHROMA;
  1973. }
  1974. if (s->workaround_bugs & FF_BUG_STD_QPEL) {
  1975. SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c)
  1976. SET_QPEL_FUNC(qpel_pixels_tab[0][7], qpel16_mc31_old_c)
  1977. SET_QPEL_FUNC(qpel_pixels_tab[0][9], qpel16_mc12_old_c)
  1978. SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c)
  1979. SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c)
  1980. SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c)
  1981. SET_QPEL_FUNC(qpel_pixels_tab[1][5], qpel8_mc11_old_c)
  1982. SET_QPEL_FUNC(qpel_pixels_tab[1][7], qpel8_mc31_old_c)
  1983. SET_QPEL_FUNC(qpel_pixels_tab[1][9], qpel8_mc12_old_c)
  1984. SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c)
  1985. SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c)
  1986. SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c)
  1987. }
  1988. if (avctx->debug & FF_DEBUG_BUGS)
  1989. av_log(s->avctx, AV_LOG_DEBUG,
  1990. "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n",
  1991. s->workaround_bugs, ctx->lavc_build, ctx->xvid_build,
  1992. ctx->divx_version, ctx->divx_build, s->divx_packed ? "p" : "");
  1993. if (CONFIG_MPEG4_DECODER && ctx->xvid_build >= 0 &&
  1994. s->codec_id == AV_CODEC_ID_MPEG4 &&
  1995. avctx->idct_algo == FF_IDCT_AUTO) {
  1996. avctx->idct_algo = FF_IDCT_XVID;
  1997. ff_mpv_idct_init(s);
  1998. return 1;
  1999. }
  2000. return 0;
  2001. }
  2002. static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2003. {
  2004. MpegEncContext *s = &ctx->m;
  2005. int time_incr, time_increment;
  2006. int64_t pts;
  2007. s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */
  2008. if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay &&
  2009. ctx->vol_control_parameters == 0 && !(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) {
  2010. av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n");
  2011. s->low_delay = 0;
  2012. }
  2013. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  2014. if (s->partitioned_frame)
  2015. s->decode_mb = mpeg4_decode_partitioned_mb;
  2016. else
  2017. s->decode_mb = mpeg4_decode_mb;
  2018. time_incr = 0;
  2019. while (get_bits1(gb) != 0)
  2020. time_incr++;
  2021. check_marker(s->avctx, gb, "before time_increment");
  2022. if (ctx->time_increment_bits == 0 ||
  2023. !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) {
  2024. av_log(s->avctx, AV_LOG_WARNING,
  2025. "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits);
  2026. for (ctx->time_increment_bits = 1;
  2027. ctx->time_increment_bits < 16;
  2028. ctx->time_increment_bits++) {
  2029. if (s->pict_type == AV_PICTURE_TYPE_P ||
  2030. (s->pict_type == AV_PICTURE_TYPE_S &&
  2031. ctx->vol_sprite_usage == GMC_SPRITE)) {
  2032. if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30)
  2033. break;
  2034. } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18)
  2035. break;
  2036. }
  2037. av_log(s->avctx, AV_LOG_WARNING,
  2038. "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits);
  2039. if (s->avctx->framerate.num && 4*s->avctx->framerate.num < 1<<ctx->time_increment_bits) {
  2040. s->avctx->framerate.num = 1<<ctx->time_increment_bits;
  2041. s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1}));
  2042. }
  2043. }
  2044. if (IS_3IV1)
  2045. time_increment = get_bits1(gb); // FIXME investigate further
  2046. else
  2047. time_increment = get_bits(gb, ctx->time_increment_bits);
  2048. if (s->pict_type != AV_PICTURE_TYPE_B) {
  2049. s->last_time_base = s->time_base;
  2050. s->time_base += time_incr;
  2051. s->time = s->time_base * (int64_t)s->avctx->framerate.num + time_increment;
  2052. if (s->workaround_bugs & FF_BUG_UMP4) {
  2053. if (s->time < s->last_non_b_time) {
  2054. /* header is not mpeg-4-compatible, broken encoder,
  2055. * trying to workaround */
  2056. s->time_base++;
  2057. s->time += s->avctx->framerate.num;
  2058. }
  2059. }
  2060. s->pp_time = s->time - s->last_non_b_time;
  2061. s->last_non_b_time = s->time;
  2062. } else {
  2063. s->time = (s->last_time_base + time_incr) * (int64_t)s->avctx->framerate.num + time_increment;
  2064. s->pb_time = s->pp_time - (s->last_non_b_time - s->time);
  2065. if (s->pp_time <= s->pb_time ||
  2066. s->pp_time <= s->pp_time - s->pb_time ||
  2067. s->pp_time <= 0) {
  2068. /* messed up order, maybe after seeking? skipping current B-frame */
  2069. return FRAME_SKIPPED;
  2070. }
  2071. ff_mpeg4_init_direct_mv(s);
  2072. if (ctx->t_frame == 0)
  2073. ctx->t_frame = s->pb_time;
  2074. if (ctx->t_frame == 0)
  2075. ctx->t_frame = 1; // 1/0 protection
  2076. s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) -
  2077. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  2078. s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) -
  2079. ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2;
  2080. if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) {
  2081. s->pb_field_time = 2;
  2082. s->pp_field_time = 4;
  2083. if (!s->progressive_sequence)
  2084. return FRAME_SKIPPED;
  2085. }
  2086. }
  2087. if (s->avctx->framerate.den)
  2088. pts = ROUNDED_DIV(s->time, s->avctx->framerate.den);
  2089. else
  2090. pts = AV_NOPTS_VALUE;
  2091. ff_dlog(s->avctx, "MPEG4 PTS: %"PRId64"\n", pts);
  2092. check_marker(s->avctx, gb, "before vop_coded");
  2093. /* vop coded */
  2094. if (get_bits1(gb) != 1) {
  2095. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  2096. av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
  2097. return FRAME_SKIPPED;
  2098. }
  2099. if (ctx->new_pred)
  2100. decode_new_pred(ctx, gb);
  2101. if (ctx->shape != BIN_ONLY_SHAPE &&
  2102. (s->pict_type == AV_PICTURE_TYPE_P ||
  2103. (s->pict_type == AV_PICTURE_TYPE_S &&
  2104. ctx->vol_sprite_usage == GMC_SPRITE))) {
  2105. /* rounding type for motion estimation */
  2106. s->no_rounding = get_bits1(gb);
  2107. } else {
  2108. s->no_rounding = 0;
  2109. }
  2110. // FIXME reduced res stuff
  2111. if (ctx->shape != RECT_SHAPE) {
  2112. if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) {
  2113. skip_bits(gb, 13); /* width */
  2114. check_marker(s->avctx, gb, "after width");
  2115. skip_bits(gb, 13); /* height */
  2116. check_marker(s->avctx, gb, "after height");
  2117. skip_bits(gb, 13); /* hor_spat_ref */
  2118. check_marker(s->avctx, gb, "after hor_spat_ref");
  2119. skip_bits(gb, 13); /* ver_spat_ref */
  2120. }
  2121. skip_bits1(gb); /* change_CR_disable */
  2122. if (get_bits1(gb) != 0)
  2123. skip_bits(gb, 8); /* constant_alpha_value */
  2124. }
  2125. // FIXME complexity estimation stuff
  2126. if (ctx->shape != BIN_ONLY_SHAPE) {
  2127. skip_bits_long(gb, ctx->cplx_estimation_trash_i);
  2128. if (s->pict_type != AV_PICTURE_TYPE_I)
  2129. skip_bits_long(gb, ctx->cplx_estimation_trash_p);
  2130. if (s->pict_type == AV_PICTURE_TYPE_B)
  2131. skip_bits_long(gb, ctx->cplx_estimation_trash_b);
  2132. if (get_bits_left(gb) < 3) {
  2133. av_log(s->avctx, AV_LOG_ERROR, "Header truncated\n");
  2134. return AVERROR_INVALIDDATA;
  2135. }
  2136. ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)];
  2137. if (!s->progressive_sequence) {
  2138. s->top_field_first = get_bits1(gb);
  2139. s->alternate_scan = get_bits1(gb);
  2140. } else
  2141. s->alternate_scan = 0;
  2142. }
  2143. if (s->alternate_scan) {
  2144. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  2145. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  2146. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan);
  2147. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  2148. } else {
  2149. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  2150. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  2151. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  2152. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  2153. }
  2154. if (s->pict_type == AV_PICTURE_TYPE_S) {
  2155. if((ctx->vol_sprite_usage == STATIC_SPRITE ||
  2156. ctx->vol_sprite_usage == GMC_SPRITE)) {
  2157. if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0)
  2158. return AVERROR_INVALIDDATA;
  2159. if (ctx->sprite_brightness_change)
  2160. av_log(s->avctx, AV_LOG_ERROR,
  2161. "sprite_brightness_change not supported\n");
  2162. if (ctx->vol_sprite_usage == STATIC_SPRITE)
  2163. av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
  2164. } else {
  2165. memset(s->sprite_offset, 0, sizeof(s->sprite_offset));
  2166. memset(s->sprite_delta, 0, sizeof(s->sprite_delta));
  2167. }
  2168. }
  2169. if (ctx->shape != BIN_ONLY_SHAPE) {
  2170. s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision);
  2171. if (s->qscale == 0) {
  2172. av_log(s->avctx, AV_LOG_ERROR,
  2173. "Error, header damaged or not MPEG-4 header (qscale=0)\n");
  2174. return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
  2175. }
  2176. if (s->pict_type != AV_PICTURE_TYPE_I) {
  2177. s->f_code = get_bits(gb, 3); /* fcode_for */
  2178. if (s->f_code == 0) {
  2179. av_log(s->avctx, AV_LOG_ERROR,
  2180. "Error, header damaged or not MPEG-4 header (f_code=0)\n");
  2181. s->f_code = 1;
  2182. return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
  2183. }
  2184. } else
  2185. s->f_code = 1;
  2186. if (s->pict_type == AV_PICTURE_TYPE_B) {
  2187. s->b_code = get_bits(gb, 3);
  2188. if (s->b_code == 0) {
  2189. av_log(s->avctx, AV_LOG_ERROR,
  2190. "Error, header damaged or not MPEG4 header (b_code=0)\n");
  2191. s->b_code=1;
  2192. return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly
  2193. }
  2194. } else
  2195. s->b_code = 1;
  2196. if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
  2197. av_log(s->avctx, AV_LOG_DEBUG,
  2198. "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n",
  2199. s->qscale, s->f_code, s->b_code,
  2200. s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
  2201. gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
  2202. s->top_field_first, s->quarter_sample ? "q" : "h",
  2203. s->data_partitioning, ctx->resync_marker,
  2204. ctx->num_sprite_warping_points, s->sprite_warping_accuracy,
  2205. 1 - s->no_rounding, s->vo_type,
  2206. ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold,
  2207. ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p,
  2208. ctx->cplx_estimation_trash_b,
  2209. s->time,
  2210. time_increment
  2211. );
  2212. }
  2213. if (!ctx->scalability) {
  2214. if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I)
  2215. skip_bits1(gb); // vop shape coding type
  2216. } else {
  2217. if (ctx->enhancement_type) {
  2218. int load_backward_shape = get_bits1(gb);
  2219. if (load_backward_shape)
  2220. av_log(s->avctx, AV_LOG_ERROR,
  2221. "load backward shape isn't supported\n");
  2222. }
  2223. skip_bits(gb, 2); // ref_select_code
  2224. }
  2225. }
  2226. /* detect buggy encoders which don't set the low_delay flag
  2227. * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames
  2228. * easily (although it's buggy too) */
  2229. if (s->vo_type == 0 && ctx->vol_control_parameters == 0 &&
  2230. ctx->divx_version == -1 && s->picture_number == 0) {
  2231. av_log(s->avctx, AV_LOG_WARNING,
  2232. "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n");
  2233. s->low_delay = 1;
  2234. }
  2235. s->picture_number++; // better than pic number==0 always ;)
  2236. // FIXME add short header support
  2237. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  2238. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  2239. if (s->workaround_bugs & FF_BUG_EDGE) {
  2240. s->h_edge_pos = s->width;
  2241. s->v_edge_pos = s->height;
  2242. }
  2243. return 0;
  2244. }
  2245. /**
  2246. * Decode MPEG-4 headers.
  2247. * @return <0 if no VOP found (or a damaged one)
  2248. * FRAME_SKIPPED if a not coded VOP is found
  2249. * 0 if a VOP is found
  2250. */
  2251. int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb)
  2252. {
  2253. MpegEncContext *s = &ctx->m;
  2254. unsigned startcode, v;
  2255. int ret;
  2256. int vol = 0;
  2257. /* search next start code */
  2258. align_get_bits(gb);
  2259. if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) {
  2260. skip_bits(gb, 24);
  2261. if (get_bits(gb, 8) == 0xF0)
  2262. goto end;
  2263. }
  2264. startcode = 0xff;
  2265. for (;;) {
  2266. if (get_bits_count(gb) >= gb->size_in_bits) {
  2267. if (gb->size_in_bits == 8 &&
  2268. (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) {
  2269. av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits);
  2270. return FRAME_SKIPPED; // divx bug
  2271. } else
  2272. return -1; // end of stream
  2273. }
  2274. /* use the bits after the test */
  2275. v = get_bits(gb, 8);
  2276. startcode = ((startcode << 8) | v) & 0xffffffff;
  2277. if ((startcode & 0xFFFFFF00) != 0x100)
  2278. continue; // no startcode
  2279. if (s->avctx->debug & FF_DEBUG_STARTCODE) {
  2280. av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode);
  2281. if (startcode <= 0x11F)
  2282. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start");
  2283. else if (startcode <= 0x12F)
  2284. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start");
  2285. else if (startcode <= 0x13F)
  2286. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2287. else if (startcode <= 0x15F)
  2288. av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start");
  2289. else if (startcode <= 0x1AF)
  2290. av_log(s->avctx, AV_LOG_DEBUG, "Reserved");
  2291. else if (startcode == 0x1B0)
  2292. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start");
  2293. else if (startcode == 0x1B1)
  2294. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End");
  2295. else if (startcode == 0x1B2)
  2296. av_log(s->avctx, AV_LOG_DEBUG, "User Data");
  2297. else if (startcode == 0x1B3)
  2298. av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start");
  2299. else if (startcode == 0x1B4)
  2300. av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error");
  2301. else if (startcode == 0x1B5)
  2302. av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start");
  2303. else if (startcode == 0x1B6)
  2304. av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start");
  2305. else if (startcode == 0x1B7)
  2306. av_log(s->avctx, AV_LOG_DEBUG, "slice start");
  2307. else if (startcode == 0x1B8)
  2308. av_log(s->avctx, AV_LOG_DEBUG, "extension start");
  2309. else if (startcode == 0x1B9)
  2310. av_log(s->avctx, AV_LOG_DEBUG, "fgs start");
  2311. else if (startcode == 0x1BA)
  2312. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start");
  2313. else if (startcode == 0x1BB)
  2314. av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start");
  2315. else if (startcode == 0x1BC)
  2316. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start");
  2317. else if (startcode == 0x1BD)
  2318. av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start");
  2319. else if (startcode == 0x1BE)
  2320. av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start");
  2321. else if (startcode == 0x1BF)
  2322. av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start");
  2323. else if (startcode == 0x1C0)
  2324. av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start");
  2325. else if (startcode == 0x1C1)
  2326. av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start");
  2327. else if (startcode == 0x1C2)
  2328. av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start");
  2329. else if (startcode == 0x1C3)
  2330. av_log(s->avctx, AV_LOG_DEBUG, "stuffing start");
  2331. else if (startcode <= 0x1C5)
  2332. av_log(s->avctx, AV_LOG_DEBUG, "reserved");
  2333. else if (startcode <= 0x1FF)
  2334. av_log(s->avctx, AV_LOG_DEBUG, "System start");
  2335. av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb));
  2336. }
  2337. if (startcode >= 0x120 && startcode <= 0x12F) {
  2338. if (vol) {
  2339. av_log(s->avctx, AV_LOG_ERROR, "Multiple VOL headers");
  2340. return AVERROR_INVALIDDATA;
  2341. }
  2342. vol++;
  2343. if ((ret = decode_vol_header(ctx, gb)) < 0)
  2344. return ret;
  2345. } else if (startcode == USER_DATA_STARTCODE) {
  2346. decode_user_data(ctx, gb);
  2347. } else if (startcode == GOP_STARTCODE) {
  2348. mpeg4_decode_gop_header(s, gb);
  2349. } else if (startcode == VOS_STARTCODE) {
  2350. mpeg4_decode_profile_level(s, gb);
  2351. } else if (startcode == VOP_STARTCODE) {
  2352. break;
  2353. }
  2354. align_get_bits(gb);
  2355. startcode = 0xff;
  2356. }
  2357. end:
  2358. if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
  2359. s->low_delay = 1;
  2360. s->avctx->has_b_frames = !s->low_delay;
  2361. return decode_vop_header(ctx, gb);
  2362. }
  2363. av_cold void ff_mpeg4videodec_static_init(void) {
  2364. static int done = 0;
  2365. if (!done) {
  2366. ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  2367. ff_rl_init(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]);
  2368. ff_rl_init(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]);
  2369. INIT_VLC_RL(ff_mpeg4_rl_intra, 554);
  2370. INIT_VLC_RL(ff_rvlc_rl_inter, 1072);
  2371. INIT_VLC_RL(ff_rvlc_rl_intra, 1072);
  2372. INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */,
  2373. &ff_mpeg4_DCtab_lum[0][1], 2, 1,
  2374. &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512);
  2375. INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */,
  2376. &ff_mpeg4_DCtab_chrom[0][1], 2, 1,
  2377. &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512);
  2378. INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15,
  2379. &ff_sprite_trajectory_tab[0][1], 4, 2,
  2380. &ff_sprite_trajectory_tab[0][0], 4, 2, 128);
  2381. INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
  2382. &ff_mb_type_b_tab[0][1], 2, 1,
  2383. &ff_mb_type_b_tab[0][0], 2, 1, 16);
  2384. done = 1;
  2385. }
  2386. }
  2387. int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
  2388. {
  2389. Mpeg4DecContext *ctx = avctx->priv_data;
  2390. MpegEncContext *s = &ctx->m;
  2391. /* divx 5.01+ bitstream reorder stuff */
  2392. /* Since this clobbers the input buffer and hwaccel codecs still need the
  2393. * data during hwaccel->end_frame we should not do this any earlier */
  2394. if (s->divx_packed) {
  2395. int current_pos = s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb) >> 3);
  2396. int startcode_found = 0;
  2397. if (buf_size - current_pos > 7) {
  2398. int i;
  2399. for (i = current_pos; i < buf_size - 4; i++)
  2400. if (buf[i] == 0 &&
  2401. buf[i + 1] == 0 &&
  2402. buf[i + 2] == 1 &&
  2403. buf[i + 3] == 0xB6) {
  2404. startcode_found = !(buf[i + 4] & 0x40);
  2405. break;
  2406. }
  2407. }
  2408. if (startcode_found) {
  2409. if (!ctx->showed_packed_warning) {
  2410. av_log(s->avctx, AV_LOG_INFO, "Video uses a non-standard and "
  2411. "wasteful way to store B-frames ('packed B-frames'). "
  2412. "Consider using the mpeg4_unpack_bframes bitstream filter without encoding but stream copy to fix it.\n");
  2413. ctx->showed_packed_warning = 1;
  2414. }
  2415. av_fast_padded_malloc(&s->bitstream_buffer,
  2416. &s->allocated_bitstream_buffer_size,
  2417. buf_size - current_pos);
  2418. if (!s->bitstream_buffer) {
  2419. s->bitstream_buffer_size = 0;
  2420. return AVERROR(ENOMEM);
  2421. }
  2422. memcpy(s->bitstream_buffer, buf + current_pos,
  2423. buf_size - current_pos);
  2424. s->bitstream_buffer_size = buf_size - current_pos;
  2425. }
  2426. }
  2427. return 0;
  2428. }
  2429. #if HAVE_THREADS
  2430. static int mpeg4_update_thread_context(AVCodecContext *dst,
  2431. const AVCodecContext *src)
  2432. {
  2433. Mpeg4DecContext *s = dst->priv_data;
  2434. const Mpeg4DecContext *s1 = src->priv_data;
  2435. int init = s->m.context_initialized;
  2436. int ret = ff_mpeg_update_thread_context(dst, src);
  2437. if (ret < 0)
  2438. return ret;
  2439. memcpy(((uint8_t*)s) + sizeof(MpegEncContext), ((uint8_t*)s1) + sizeof(MpegEncContext), sizeof(Mpeg4DecContext) - sizeof(MpegEncContext));
  2440. if (CONFIG_MPEG4_DECODER && !init && s1->xvid_build >= 0)
  2441. ff_xvid_idct_init(&s->m.idsp, dst);
  2442. return 0;
  2443. }
  2444. #endif
  2445. static av_cold int decode_init(AVCodecContext *avctx)
  2446. {
  2447. Mpeg4DecContext *ctx = avctx->priv_data;
  2448. MpegEncContext *s = &ctx->m;
  2449. int ret;
  2450. ctx->divx_version =
  2451. ctx->divx_build =
  2452. ctx->xvid_build =
  2453. ctx->lavc_build = -1;
  2454. if ((ret = ff_h263_decode_init(avctx)) < 0)
  2455. return ret;
  2456. ff_mpeg4videodec_static_init();
  2457. s->h263_pred = 1;
  2458. s->low_delay = 0; /* default, might be overridden in the vol header during header parsing */
  2459. s->decode_mb = mpeg4_decode_mb;
  2460. ctx->time_increment_bits = 4; /* default value for broken headers */
  2461. avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
  2462. avctx->internal->allocate_progress = 1;
  2463. return 0;
  2464. }
  2465. static const AVOption mpeg4_options[] = {
  2466. {"quarter_sample", "1/4 subpel MC", offsetof(MpegEncContext, quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
  2467. {"divx_packed", "divx style packed b frames", offsetof(MpegEncContext, divx_packed), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
  2468. {NULL}
  2469. };
  2470. static const AVClass mpeg4_class = {
  2471. "MPEG4 Video Decoder",
  2472. av_default_item_name,
  2473. mpeg4_options,
  2474. LIBAVUTIL_VERSION_INT,
  2475. };
  2476. AVCodec ff_mpeg4_decoder = {
  2477. .name = "mpeg4",
  2478. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  2479. .type = AVMEDIA_TYPE_VIDEO,
  2480. .id = AV_CODEC_ID_MPEG4,
  2481. .priv_data_size = sizeof(Mpeg4DecContext),
  2482. .init = decode_init,
  2483. .close = ff_h263_decode_end,
  2484. .decode = ff_h263_decode_frame,
  2485. .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
  2486. AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
  2487. AV_CODEC_CAP_FRAME_THREADS,
  2488. .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
  2489. .flush = ff_mpeg_flush,
  2490. .max_lowres = 3,
  2491. .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
  2492. .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles),
  2493. .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context),
  2494. .priv_class = &mpeg4_class,
  2495. };
  2496. #if CONFIG_MPEG4_VDPAU_DECODER && FF_API_VDPAU
  2497. static const AVClass mpeg4_vdpau_class = {
  2498. "MPEG4 Video VDPAU Decoder",
  2499. av_default_item_name,
  2500. mpeg4_options,
  2501. LIBAVUTIL_VERSION_INT,
  2502. };
  2503. AVCodec ff_mpeg4_vdpau_decoder = {
  2504. .name = "mpeg4_vdpau",
  2505. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
  2506. .type = AVMEDIA_TYPE_VIDEO,
  2507. .id = AV_CODEC_ID_MPEG4,
  2508. .priv_data_size = sizeof(Mpeg4DecContext),
  2509. .init = decode_init,
  2510. .close = ff_h263_decode_end,
  2511. .decode = ff_h263_decode_frame,
  2512. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
  2513. AV_CODEC_CAP_HWACCEL_VDPAU,
  2514. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_MPEG4,
  2515. AV_PIX_FMT_NONE },
  2516. .priv_class = &mpeg4_vdpau_class,
  2517. };
  2518. #endif