You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1409 lines
50KB

  1. /*
  2. * MPEG-4 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/opt.h"
  25. #include "mpegutils.h"
  26. #include "mpegvideo.h"
  27. #include "h263.h"
  28. #include "mpeg4video.h"
  29. #include "profiles.h"
  30. /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  31. * differences in MPEG-4. Unified in the sense that the specification specifies
  32. * this encoding in several steps. */
  33. static uint8_t uni_DCtab_lum_len[512];
  34. static uint8_t uni_DCtab_chrom_len[512];
  35. static uint16_t uni_DCtab_lum_bits[512];
  36. static uint16_t uni_DCtab_chrom_bits[512];
  37. /* Unified encoding tables for run length encoding of coefficients.
  38. * Unified in the sense that the specification specifies the encoding in several steps. */
  39. static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
  40. static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
  41. static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
  42. static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
  43. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
  44. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
  45. #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
  46. /* MPEG-4
  47. * inter
  48. * max level: 24/6
  49. * max run: 53/63
  50. *
  51. * intra
  52. * max level: 53/16
  53. * max run: 29/41
  54. */
  55. /**
  56. * Return the number of bits that encoding the 8x8 block in block would need.
  57. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  58. */
  59. static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
  60. int block_last_index, uint8_t scantable[64])
  61. {
  62. int last = 0;
  63. int j;
  64. int rate = 0;
  65. for (j = 1; j <= block_last_index; j++) {
  66. const int index = scantable[j];
  67. int level = block[index];
  68. if (level) {
  69. level += 64;
  70. if ((level & (~127)) == 0) {
  71. if (j < block_last_index)
  72. rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  73. else
  74. rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  75. } else
  76. rate += s->ac_esc_length;
  77. last = j;
  78. }
  79. }
  80. return rate;
  81. }
  82. /**
  83. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  84. * This function also restores s->block_last_index.
  85. * @param[in,out] block MB coefficients, these will be restored
  86. * @param[in] dir ac prediction direction for each 8x8 block
  87. * @param[out] st scantable for each 8x8 block
  88. * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  89. */
  90. static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
  91. const int dir[6], uint8_t *st[6],
  92. const int zigzag_last_index[6])
  93. {
  94. int i, n;
  95. memcpy(s->block_last_index, zigzag_last_index, sizeof(int) * 6);
  96. for (n = 0; n < 6; n++) {
  97. int16_t *ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16;
  98. st[n] = s->intra_scantable.permutated;
  99. if (dir[n]) {
  100. /* top prediction */
  101. for (i = 1; i < 8; i++)
  102. block[n][s->idsp.idct_permutation[i]] = ac_val[i + 8];
  103. } else {
  104. /* left prediction */
  105. for (i = 1; i < 8; i++)
  106. block[n][s->idsp.idct_permutation[i << 3]] = ac_val[i];
  107. }
  108. }
  109. }
  110. /**
  111. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
  112. * This function will also update s->block_last_index and s->ac_val.
  113. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  114. * @param[in] dir ac prediction direction for each 8x8 block
  115. * @param[out] st scantable for each 8x8 block
  116. * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  117. */
  118. static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
  119. const int dir[6], uint8_t *st[6],
  120. int zigzag_last_index[6])
  121. {
  122. int score = 0;
  123. int i, n;
  124. int8_t *const qscale_table = s->current_picture.qscale_table;
  125. memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
  126. for (n = 0; n < 6; n++) {
  127. int16_t *ac_val, *ac_val1;
  128. score -= get_block_rate(s, block[n], s->block_last_index[n],
  129. s->intra_scantable.permutated);
  130. ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16;
  131. ac_val1 = ac_val;
  132. if (dir[n]) {
  133. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  134. /* top prediction */
  135. ac_val -= s->block_wrap[n] * 16;
  136. if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) {
  137. /* same qscale */
  138. for (i = 1; i < 8; i++) {
  139. const int level = block[n][s->idsp.idct_permutation[i]];
  140. block[n][s->idsp.idct_permutation[i]] = level - ac_val[i + 8];
  141. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  142. ac_val1[i + 8] = level;
  143. }
  144. } else {
  145. /* different qscale, we must rescale */
  146. for (i = 1; i < 8; i++) {
  147. const int level = block[n][s->idsp.idct_permutation[i]];
  148. block[n][s->idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  149. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  150. ac_val1[i + 8] = level;
  151. }
  152. }
  153. st[n] = s->intra_h_scantable.permutated;
  154. } else {
  155. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  156. /* left prediction */
  157. ac_val -= 16;
  158. if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) {
  159. /* same qscale */
  160. for (i = 1; i < 8; i++) {
  161. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  162. block[n][s->idsp.idct_permutation[i << 3]] = level - ac_val[i];
  163. ac_val1[i] = level;
  164. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  165. }
  166. } else {
  167. /* different qscale, we must rescale */
  168. for (i = 1; i < 8; i++) {
  169. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  170. block[n][s->idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  171. ac_val1[i] = level;
  172. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  173. }
  174. }
  175. st[n] = s->intra_v_scantable.permutated;
  176. }
  177. for (i = 63; i > 0; i--) // FIXME optimize
  178. if (block[n][st[n][i]])
  179. break;
  180. s->block_last_index[n] = i;
  181. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  182. }
  183. if (score < 0) {
  184. return 1;
  185. } else {
  186. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  187. return 0;
  188. }
  189. }
  190. /**
  191. * modify mb_type & qscale so that encoding is actually possible in MPEG-4
  192. */
  193. void ff_clean_mpeg4_qscales(MpegEncContext *s)
  194. {
  195. int i;
  196. int8_t *const qscale_table = s->current_picture.qscale_table;
  197. ff_clean_h263_qscales(s);
  198. if (s->pict_type == AV_PICTURE_TYPE_B) {
  199. int odd = 0;
  200. /* ok, come on, this isn't funny anymore, there's more code for
  201. * handling this MPEG-4 mess than for the actual adaptive quantization */
  202. for (i = 0; i < s->mb_num; i++) {
  203. int mb_xy = s->mb_index2xy[i];
  204. odd += qscale_table[mb_xy] & 1;
  205. }
  206. if (2 * odd > s->mb_num)
  207. odd = 1;
  208. else
  209. odd = 0;
  210. for (i = 0; i < s->mb_num; i++) {
  211. int mb_xy = s->mb_index2xy[i];
  212. if ((qscale_table[mb_xy] & 1) != odd)
  213. qscale_table[mb_xy]++;
  214. if (qscale_table[mb_xy] > 31)
  215. qscale_table[mb_xy] = 31;
  216. }
  217. for (i = 1; i < s->mb_num; i++) {
  218. int mb_xy = s->mb_index2xy[i];
  219. if (qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i - 1]] &&
  220. (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
  221. s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
  222. }
  223. }
  224. }
  225. }
  226. /**
  227. * Encode the dc value.
  228. * @param n block index (0-3 are luma, 4-5 are chroma)
  229. */
  230. static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
  231. {
  232. /* DC will overflow if level is outside the [-255,255] range. */
  233. level += 256;
  234. if (n < 4) {
  235. /* luminance */
  236. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  237. } else {
  238. /* chrominance */
  239. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  240. }
  241. }
  242. static inline int mpeg4_get_dc_length(int level, int n)
  243. {
  244. if (n < 4)
  245. return uni_DCtab_lum_len[level + 256];
  246. else
  247. return uni_DCtab_chrom_len[level + 256];
  248. }
  249. /**
  250. * Encode an 8x8 block.
  251. * @param n block index (0-3 are luma, 4-5 are chroma)
  252. */
  253. static inline void mpeg4_encode_block(MpegEncContext *s,
  254. int16_t *block, int n, int intra_dc,
  255. uint8_t *scan_table, PutBitContext *dc_pb,
  256. PutBitContext *ac_pb)
  257. {
  258. int i, last_non_zero;
  259. uint32_t *bits_tab;
  260. uint8_t *len_tab;
  261. const int last_index = s->block_last_index[n];
  262. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  263. /* MPEG-4 based DC predictor */
  264. mpeg4_encode_dc(dc_pb, intra_dc, n);
  265. if (last_index < 1)
  266. return;
  267. i = 1;
  268. bits_tab = uni_mpeg4_intra_rl_bits;
  269. len_tab = uni_mpeg4_intra_rl_len;
  270. } else {
  271. if (last_index < 0)
  272. return;
  273. i = 0;
  274. bits_tab = uni_mpeg4_inter_rl_bits;
  275. len_tab = uni_mpeg4_inter_rl_len;
  276. }
  277. /* AC coefs */
  278. last_non_zero = i - 1;
  279. for (; i < last_index; i++) {
  280. int level = block[scan_table[i]];
  281. if (level) {
  282. int run = i - last_non_zero - 1;
  283. level += 64;
  284. if ((level & (~127)) == 0) {
  285. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  286. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  287. } else { // ESC3
  288. put_bits(ac_pb,
  289. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  290. (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
  291. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  292. }
  293. last_non_zero = i;
  294. }
  295. }
  296. /* if (i <= last_index) */ {
  297. int level = block[scan_table[i]];
  298. int run = i - last_non_zero - 1;
  299. level += 64;
  300. if ((level & (~127)) == 0) {
  301. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  302. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  303. } else { // ESC3
  304. put_bits(ac_pb,
  305. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  306. (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
  307. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  308. }
  309. }
  310. }
  311. static int mpeg4_get_block_length(MpegEncContext *s,
  312. int16_t *block, int n,
  313. int intra_dc, uint8_t *scan_table)
  314. {
  315. int i, last_non_zero;
  316. uint8_t *len_tab;
  317. const int last_index = s->block_last_index[n];
  318. int len = 0;
  319. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  320. /* MPEG-4 based DC predictor */
  321. len += mpeg4_get_dc_length(intra_dc, n);
  322. if (last_index < 1)
  323. return len;
  324. i = 1;
  325. len_tab = uni_mpeg4_intra_rl_len;
  326. } else {
  327. if (last_index < 0)
  328. return 0;
  329. i = 0;
  330. len_tab = uni_mpeg4_inter_rl_len;
  331. }
  332. /* AC coefs */
  333. last_non_zero = i - 1;
  334. for (; i < last_index; i++) {
  335. int level = block[scan_table[i]];
  336. if (level) {
  337. int run = i - last_non_zero - 1;
  338. level += 64;
  339. if ((level & (~127)) == 0) {
  340. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  341. len += len_tab[index];
  342. } else { // ESC3
  343. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  344. }
  345. last_non_zero = i;
  346. }
  347. }
  348. /* if (i <= last_index) */ {
  349. int level = block[scan_table[i]];
  350. int run = i - last_non_zero - 1;
  351. level += 64;
  352. if ((level & (~127)) == 0) {
  353. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  354. len += len_tab[index];
  355. } else { // ESC3
  356. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  357. }
  358. }
  359. return len;
  360. }
  361. static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
  362. int intra_dc[6], uint8_t **scan_table,
  363. PutBitContext *dc_pb,
  364. PutBitContext *ac_pb)
  365. {
  366. int i;
  367. if (scan_table) {
  368. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  369. for (i = 0; i < 6; i++)
  370. skip_put_bits(&s->pb,
  371. mpeg4_get_block_length(s, block[i], i,
  372. intra_dc[i], scan_table[i]));
  373. } else {
  374. /* encode each block */
  375. for (i = 0; i < 6; i++)
  376. mpeg4_encode_block(s, block[i], i,
  377. intra_dc[i], scan_table[i], dc_pb, ac_pb);
  378. }
  379. } else {
  380. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  381. for (i = 0; i < 6; i++)
  382. skip_put_bits(&s->pb,
  383. mpeg4_get_block_length(s, block[i], i, 0,
  384. s->intra_scantable.permutated));
  385. } else {
  386. /* encode each block */
  387. for (i = 0; i < 6; i++)
  388. mpeg4_encode_block(s, block[i], i, 0,
  389. s->intra_scantable.permutated, dc_pb, ac_pb);
  390. }
  391. }
  392. }
  393. static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
  394. int motion_x, int motion_y, int mb_type)
  395. {
  396. int cbp = 0, i;
  397. if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
  398. int score = 0;
  399. const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
  400. for (i = 0; i < 6; i++) {
  401. if (s->coded_score[i] < 0) {
  402. score += s->coded_score[i];
  403. cbp |= 1 << (5 - i);
  404. }
  405. }
  406. if (cbp) {
  407. int zero_score = -6;
  408. if ((motion_x | motion_y | s->dquant | mb_type) == 0)
  409. zero_score -= 4; // 2 * MV + mb_type + cbp bit
  410. zero_score *= lambda;
  411. if (zero_score <= score)
  412. cbp = 0;
  413. }
  414. for (i = 0; i < 6; i++) {
  415. if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
  416. s->block_last_index[i] = -1;
  417. s->bdsp.clear_block(s->block[i]);
  418. }
  419. }
  420. } else {
  421. for (i = 0; i < 6; i++) {
  422. if (s->block_last_index[i] >= 0)
  423. cbp |= 1 << (5 - i);
  424. }
  425. }
  426. return cbp;
  427. }
  428. // FIXME this is duplicated to h263.c
  429. static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
  430. void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
  431. int motion_x, int motion_y)
  432. {
  433. int cbpc, cbpy, pred_x, pred_y;
  434. PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  435. PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  436. PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  437. const int interleaved_stats = (s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  438. if (!s->mb_intra) {
  439. int i, cbp;
  440. if (s->pict_type == AV_PICTURE_TYPE_B) {
  441. /* convert from mv_dir to type */
  442. static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
  443. int mb_type = mb_type_table[s->mv_dir];
  444. if (s->mb_x == 0) {
  445. for (i = 0; i < 2; i++)
  446. s->last_mv[i][0][0] =
  447. s->last_mv[i][0][1] =
  448. s->last_mv[i][1][0] =
  449. s->last_mv[i][1][1] = 0;
  450. }
  451. av_assert2(s->dquant >= -2 && s->dquant <= 2);
  452. av_assert2((s->dquant & 1) == 0);
  453. av_assert2(mb_type >= 0);
  454. /* nothing to do if this MB was skipped in the next P-frame */
  455. if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
  456. s->skip_count++;
  457. s->mv[0][0][0] =
  458. s->mv[0][0][1] =
  459. s->mv[1][0][0] =
  460. s->mv[1][0][1] = 0;
  461. s->mv_dir = MV_DIR_FORWARD; // doesn't matter
  462. s->qscale -= s->dquant;
  463. // s->mb_skipped = 1;
  464. return;
  465. }
  466. cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
  467. if ((cbp | motion_x | motion_y | mb_type) == 0) {
  468. /* direct MB with MV={0,0} */
  469. av_assert2(s->dquant == 0);
  470. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  471. if (interleaved_stats) {
  472. s->misc_bits++;
  473. s->last_bits++;
  474. }
  475. s->skip_count++;
  476. return;
  477. }
  478. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  479. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
  480. put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
  481. if (cbp)
  482. put_bits(&s->pb, 6, cbp);
  483. if (cbp && mb_type) {
  484. if (s->dquant)
  485. put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
  486. else
  487. put_bits(&s->pb, 1, 0);
  488. } else
  489. s->qscale -= s->dquant;
  490. if (!s->progressive_sequence) {
  491. if (cbp)
  492. put_bits(&s->pb, 1, s->interlaced_dct);
  493. if (mb_type) // not direct mode
  494. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  495. }
  496. if (interleaved_stats)
  497. s->misc_bits += get_bits_diff(s);
  498. if (!mb_type) {
  499. av_assert2(s->mv_dir & MV_DIRECT);
  500. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  501. s->b_count++;
  502. s->f_count++;
  503. } else {
  504. av_assert2(mb_type > 0 && mb_type < 4);
  505. if (s->mv_type != MV_TYPE_FIELD) {
  506. if (s->mv_dir & MV_DIR_FORWARD) {
  507. ff_h263_encode_motion_vector(s,
  508. s->mv[0][0][0] - s->last_mv[0][0][0],
  509. s->mv[0][0][1] - s->last_mv[0][0][1],
  510. s->f_code);
  511. s->last_mv[0][0][0] =
  512. s->last_mv[0][1][0] = s->mv[0][0][0];
  513. s->last_mv[0][0][1] =
  514. s->last_mv[0][1][1] = s->mv[0][0][1];
  515. s->f_count++;
  516. }
  517. if (s->mv_dir & MV_DIR_BACKWARD) {
  518. ff_h263_encode_motion_vector(s,
  519. s->mv[1][0][0] - s->last_mv[1][0][0],
  520. s->mv[1][0][1] - s->last_mv[1][0][1],
  521. s->b_code);
  522. s->last_mv[1][0][0] =
  523. s->last_mv[1][1][0] = s->mv[1][0][0];
  524. s->last_mv[1][0][1] =
  525. s->last_mv[1][1][1] = s->mv[1][0][1];
  526. s->b_count++;
  527. }
  528. } else {
  529. if (s->mv_dir & MV_DIR_FORWARD) {
  530. put_bits(&s->pb, 1, s->field_select[0][0]);
  531. put_bits(&s->pb, 1, s->field_select[0][1]);
  532. }
  533. if (s->mv_dir & MV_DIR_BACKWARD) {
  534. put_bits(&s->pb, 1, s->field_select[1][0]);
  535. put_bits(&s->pb, 1, s->field_select[1][1]);
  536. }
  537. if (s->mv_dir & MV_DIR_FORWARD) {
  538. for (i = 0; i < 2; i++) {
  539. ff_h263_encode_motion_vector(s,
  540. s->mv[0][i][0] - s->last_mv[0][i][0],
  541. s->mv[0][i][1] - s->last_mv[0][i][1] / 2,
  542. s->f_code);
  543. s->last_mv[0][i][0] = s->mv[0][i][0];
  544. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  545. }
  546. s->f_count++;
  547. }
  548. if (s->mv_dir & MV_DIR_BACKWARD) {
  549. for (i = 0; i < 2; i++) {
  550. ff_h263_encode_motion_vector(s,
  551. s->mv[1][i][0] - s->last_mv[1][i][0],
  552. s->mv[1][i][1] - s->last_mv[1][i][1] / 2,
  553. s->b_code);
  554. s->last_mv[1][i][0] = s->mv[1][i][0];
  555. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  556. }
  557. s->b_count++;
  558. }
  559. }
  560. }
  561. if (interleaved_stats)
  562. s->mv_bits += get_bits_diff(s);
  563. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  564. if (interleaved_stats)
  565. s->p_tex_bits += get_bits_diff(s);
  566. } else { /* s->pict_type==AV_PICTURE_TYPE_B */
  567. cbp = get_p_cbp(s, block, motion_x, motion_y);
  568. if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
  569. s->mv_type == MV_TYPE_16X16) {
  570. /* Check if the B-frames can skip it too, as we must skip it
  571. * if we skip here why didn't they just compress
  572. * the skip-mb bits instead of reusing them ?! */
  573. if (s->max_b_frames > 0) {
  574. int i;
  575. int x, y, offset;
  576. uint8_t *p_pic;
  577. x = s->mb_x * 16;
  578. y = s->mb_y * 16;
  579. offset = x + y * s->linesize;
  580. p_pic = s->new_picture.f->data[0] + offset;
  581. s->mb_skipped = 1;
  582. for (i = 0; i < s->max_b_frames; i++) {
  583. uint8_t *b_pic;
  584. int diff;
  585. Picture *pic = s->reordered_input_picture[i + 1];
  586. if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
  587. break;
  588. b_pic = pic->f->data[0] + offset;
  589. if (!pic->shared)
  590. b_pic += INPLACE_OFFSET;
  591. if (x + 16 > s->width || y + 16 > s->height) {
  592. int x1, y1;
  593. int xe = FFMIN(16, s->width - x);
  594. int ye = FFMIN(16, s->height - y);
  595. diff = 0;
  596. for (y1 = 0; y1 < ye; y1++) {
  597. for (x1 = 0; x1 < xe; x1++) {
  598. diff += FFABS(p_pic[x1 + y1 * s->linesize] - b_pic[x1 + y1 * s->linesize]);
  599. }
  600. }
  601. diff = diff * 256 / (xe * ye);
  602. } else {
  603. diff = s->mecc.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  604. }
  605. if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
  606. s->mb_skipped = 0;
  607. break;
  608. }
  609. }
  610. } else
  611. s->mb_skipped = 1;
  612. if (s->mb_skipped == 1) {
  613. /* skip macroblock */
  614. put_bits(&s->pb, 1, 1);
  615. if (interleaved_stats) {
  616. s->misc_bits++;
  617. s->last_bits++;
  618. }
  619. s->skip_count++;
  620. return;
  621. }
  622. }
  623. put_bits(&s->pb, 1, 0); /* mb coded */
  624. cbpc = cbp & 3;
  625. cbpy = cbp >> 2;
  626. cbpy ^= 0xf;
  627. if (s->mv_type == MV_TYPE_16X16) {
  628. if (s->dquant)
  629. cbpc += 8;
  630. put_bits(&s->pb,
  631. ff_h263_inter_MCBPC_bits[cbpc],
  632. ff_h263_inter_MCBPC_code[cbpc]);
  633. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  634. if (s->dquant)
  635. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  636. if (!s->progressive_sequence) {
  637. if (cbp)
  638. put_bits(pb2, 1, s->interlaced_dct);
  639. put_bits(pb2, 1, 0);
  640. }
  641. if (interleaved_stats)
  642. s->misc_bits += get_bits_diff(s);
  643. /* motion vectors: 16x16 mode */
  644. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  645. ff_h263_encode_motion_vector(s,
  646. motion_x - pred_x,
  647. motion_y - pred_y,
  648. s->f_code);
  649. } else if (s->mv_type == MV_TYPE_FIELD) {
  650. if (s->dquant)
  651. cbpc += 8;
  652. put_bits(&s->pb,
  653. ff_h263_inter_MCBPC_bits[cbpc],
  654. ff_h263_inter_MCBPC_code[cbpc]);
  655. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  656. if (s->dquant)
  657. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  658. av_assert2(!s->progressive_sequence);
  659. if (cbp)
  660. put_bits(pb2, 1, s->interlaced_dct);
  661. put_bits(pb2, 1, 1);
  662. if (interleaved_stats)
  663. s->misc_bits += get_bits_diff(s);
  664. /* motion vectors: 16x8 interlaced mode */
  665. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  666. pred_y /= 2;
  667. put_bits(&s->pb, 1, s->field_select[0][0]);
  668. put_bits(&s->pb, 1, s->field_select[0][1]);
  669. ff_h263_encode_motion_vector(s,
  670. s->mv[0][0][0] - pred_x,
  671. s->mv[0][0][1] - pred_y,
  672. s->f_code);
  673. ff_h263_encode_motion_vector(s,
  674. s->mv[0][1][0] - pred_x,
  675. s->mv[0][1][1] - pred_y,
  676. s->f_code);
  677. } else {
  678. av_assert2(s->mv_type == MV_TYPE_8X8);
  679. put_bits(&s->pb,
  680. ff_h263_inter_MCBPC_bits[cbpc + 16],
  681. ff_h263_inter_MCBPC_code[cbpc + 16]);
  682. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  683. if (!s->progressive_sequence && cbp)
  684. put_bits(pb2, 1, s->interlaced_dct);
  685. if (interleaved_stats)
  686. s->misc_bits += get_bits_diff(s);
  687. for (i = 0; i < 4; i++) {
  688. /* motion vectors: 8x8 mode*/
  689. ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  690. ff_h263_encode_motion_vector(s,
  691. s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
  692. s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
  693. s->f_code);
  694. }
  695. }
  696. if (interleaved_stats)
  697. s->mv_bits += get_bits_diff(s);
  698. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  699. if (interleaved_stats)
  700. s->p_tex_bits += get_bits_diff(s);
  701. s->f_count++;
  702. }
  703. } else {
  704. int cbp;
  705. int dc_diff[6]; // dc values with the dc prediction subtracted
  706. int dir[6]; // prediction direction
  707. int zigzag_last_index[6];
  708. uint8_t *scan_table[6];
  709. int i;
  710. for (i = 0; i < 6; i++)
  711. dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  712. if (s->avctx->flags & AV_CODEC_FLAG_AC_PRED) {
  713. s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  714. } else {
  715. for (i = 0; i < 6; i++)
  716. scan_table[i] = s->intra_scantable.permutated;
  717. }
  718. /* compute cbp */
  719. cbp = 0;
  720. for (i = 0; i < 6; i++)
  721. if (s->block_last_index[i] >= 1)
  722. cbp |= 1 << (5 - i);
  723. cbpc = cbp & 3;
  724. if (s->pict_type == AV_PICTURE_TYPE_I) {
  725. if (s->dquant)
  726. cbpc += 4;
  727. put_bits(&s->pb,
  728. ff_h263_intra_MCBPC_bits[cbpc],
  729. ff_h263_intra_MCBPC_code[cbpc]);
  730. } else {
  731. if (s->dquant)
  732. cbpc += 8;
  733. put_bits(&s->pb, 1, 0); /* mb coded */
  734. put_bits(&s->pb,
  735. ff_h263_inter_MCBPC_bits[cbpc + 4],
  736. ff_h263_inter_MCBPC_code[cbpc + 4]);
  737. }
  738. put_bits(pb2, 1, s->ac_pred);
  739. cbpy = cbp >> 2;
  740. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  741. if (s->dquant)
  742. put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
  743. if (!s->progressive_sequence)
  744. put_bits(dc_pb, 1, s->interlaced_dct);
  745. if (interleaved_stats)
  746. s->misc_bits += get_bits_diff(s);
  747. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  748. if (interleaved_stats)
  749. s->i_tex_bits += get_bits_diff(s);
  750. s->i_count++;
  751. /* restore ac coeffs & last_index stuff
  752. * if we messed them up with the prediction */
  753. if (s->ac_pred)
  754. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  755. }
  756. }
  757. /**
  758. * add MPEG-4 stuffing bits (01...1)
  759. */
  760. void ff_mpeg4_stuffing(PutBitContext *pbc)
  761. {
  762. int length;
  763. put_bits(pbc, 1, 0);
  764. length = (-put_bits_count(pbc)) & 7;
  765. if (length)
  766. put_bits(pbc, length, (1 << length) - 1);
  767. }
  768. /* must be called before writing the header */
  769. void ff_set_mpeg4_time(MpegEncContext *s)
  770. {
  771. if (s->pict_type == AV_PICTURE_TYPE_B) {
  772. ff_mpeg4_init_direct_mv(s);
  773. } else {
  774. s->last_time_base = s->time_base;
  775. s->time_base = FFUDIV(s->time, s->avctx->time_base.den);
  776. }
  777. }
  778. static void mpeg4_encode_gop_header(MpegEncContext *s)
  779. {
  780. int64_t hours, minutes, seconds;
  781. int64_t time;
  782. put_bits(&s->pb, 16, 0);
  783. put_bits(&s->pb, 16, GOP_STARTCODE);
  784. time = s->current_picture_ptr->f->pts;
  785. if (s->reordered_input_picture[1])
  786. time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
  787. time = time * s->avctx->time_base.num;
  788. s->last_time_base = FFUDIV(time, s->avctx->time_base.den);
  789. seconds = FFUDIV(time, s->avctx->time_base.den);
  790. minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
  791. hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
  792. hours = FFUMOD(hours , 24);
  793. put_bits(&s->pb, 5, hours);
  794. put_bits(&s->pb, 6, minutes);
  795. put_bits(&s->pb, 1, 1);
  796. put_bits(&s->pb, 6, seconds);
  797. put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
  798. put_bits(&s->pb, 1, 0); // broken link == NO
  799. ff_mpeg4_stuffing(&s->pb);
  800. }
  801. static void mpeg4_encode_visual_object_header(MpegEncContext *s)
  802. {
  803. int profile_and_level_indication;
  804. int vo_ver_id;
  805. if (s->avctx->profile != FF_PROFILE_UNKNOWN) {
  806. profile_and_level_indication = s->avctx->profile << 4;
  807. } else if (s->max_b_frames || s->quarter_sample) {
  808. profile_and_level_indication = 0xF0; // adv simple
  809. } else {
  810. profile_and_level_indication = 0x00; // simple
  811. }
  812. if (s->avctx->level != FF_LEVEL_UNKNOWN)
  813. profile_and_level_indication |= s->avctx->level;
  814. else
  815. profile_and_level_indication |= 1; // level 1
  816. if (profile_and_level_indication >> 4 == 0xF)
  817. vo_ver_id = 5;
  818. else
  819. vo_ver_id = 1;
  820. // FIXME levels
  821. put_bits(&s->pb, 16, 0);
  822. put_bits(&s->pb, 16, VOS_STARTCODE);
  823. put_bits(&s->pb, 8, profile_and_level_indication);
  824. put_bits(&s->pb, 16, 0);
  825. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  826. put_bits(&s->pb, 1, 1);
  827. put_bits(&s->pb, 4, vo_ver_id);
  828. put_bits(&s->pb, 3, 1); // priority
  829. put_bits(&s->pb, 4, 1); // visual obj type== video obj
  830. put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
  831. ff_mpeg4_stuffing(&s->pb);
  832. }
  833. static void mpeg4_encode_vol_header(MpegEncContext *s,
  834. int vo_number,
  835. int vol_number)
  836. {
  837. int vo_ver_id;
  838. if (s->max_b_frames || s->quarter_sample) {
  839. vo_ver_id = 5;
  840. s->vo_type = ADV_SIMPLE_VO_TYPE;
  841. } else {
  842. vo_ver_id = 1;
  843. s->vo_type = SIMPLE_VO_TYPE;
  844. }
  845. put_bits(&s->pb, 16, 0);
  846. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  847. put_bits(&s->pb, 16, 0);
  848. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  849. put_bits(&s->pb, 1, 0); /* random access vol */
  850. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  851. if (s->workaround_bugs & FF_BUG_MS) {
  852. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  853. } else {
  854. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  855. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  856. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  857. }
  858. s->aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  859. put_bits(&s->pb, 4, s->aspect_ratio_info); /* aspect ratio info */
  860. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  861. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  862. s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den, 255);
  863. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  864. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  865. }
  866. if (s->workaround_bugs & FF_BUG_MS) {
  867. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  868. } else {
  869. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  870. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  871. put_bits(&s->pb, 1, s->low_delay);
  872. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  873. }
  874. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  875. put_bits(&s->pb, 1, 1); /* marker bit */
  876. put_bits(&s->pb, 16, s->avctx->time_base.den);
  877. if (s->time_increment_bits < 1)
  878. s->time_increment_bits = 1;
  879. put_bits(&s->pb, 1, 1); /* marker bit */
  880. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  881. put_bits(&s->pb, 1, 1); /* marker bit */
  882. put_bits(&s->pb, 13, s->width); /* vol width */
  883. put_bits(&s->pb, 1, 1); /* marker bit */
  884. put_bits(&s->pb, 13, s->height); /* vol height */
  885. put_bits(&s->pb, 1, 1); /* marker bit */
  886. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  887. put_bits(&s->pb, 1, 1); /* obmc disable */
  888. if (vo_ver_id == 1)
  889. put_bits(&s->pb, 1, 0); /* sprite enable */
  890. else
  891. put_bits(&s->pb, 2, 0); /* sprite enable */
  892. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  893. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
  894. if (s->mpeg_quant) {
  895. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  896. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  897. }
  898. if (vo_ver_id != 1)
  899. put_bits(&s->pb, 1, s->quarter_sample);
  900. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  901. put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
  902. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  903. if (s->data_partitioning)
  904. put_bits(&s->pb, 1, 0); /* no rvlc */
  905. if (vo_ver_id != 1) {
  906. put_bits(&s->pb, 1, 0); /* newpred */
  907. put_bits(&s->pb, 1, 0); /* reduced res vop */
  908. }
  909. put_bits(&s->pb, 1, 0); /* scalability */
  910. ff_mpeg4_stuffing(&s->pb);
  911. /* user data */
  912. if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  913. put_bits(&s->pb, 16, 0);
  914. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  915. ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  916. }
  917. }
  918. /* write MPEG-4 VOP header */
  919. int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
  920. {
  921. uint64_t time_incr;
  922. int64_t time_div, time_mod;
  923. if (s->pict_type == AV_PICTURE_TYPE_I) {
  924. if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  925. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
  926. mpeg4_encode_visual_object_header(s);
  927. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy
  928. mpeg4_encode_vol_header(s, 0, 0);
  929. }
  930. if (!(s->workaround_bugs & FF_BUG_MS))
  931. mpeg4_encode_gop_header(s);
  932. }
  933. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  934. put_bits(&s->pb, 16, 0); /* vop header */
  935. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  936. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  937. time_div = FFUDIV(s->time, s->avctx->time_base.den);
  938. time_mod = FFUMOD(s->time, s->avctx->time_base.den);
  939. time_incr = time_div - s->last_time_base;
  940. // This limits the frame duration to max 1 hour
  941. if (time_incr > 3600) {
  942. av_log(s->avctx, AV_LOG_ERROR, "time_incr %"PRIu64" too large\n", time_incr);
  943. return AVERROR(EINVAL);
  944. }
  945. while (time_incr--)
  946. put_bits(&s->pb, 1, 1);
  947. put_bits(&s->pb, 1, 0);
  948. put_bits(&s->pb, 1, 1); /* marker */
  949. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  950. put_bits(&s->pb, 1, 1); /* marker */
  951. put_bits(&s->pb, 1, 1); /* vop coded */
  952. if (s->pict_type == AV_PICTURE_TYPE_P) {
  953. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  954. }
  955. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  956. if (!s->progressive_sequence) {
  957. put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
  958. put_bits(&s->pb, 1, s->alternate_scan);
  959. }
  960. // FIXME sprite stuff
  961. put_bits(&s->pb, 5, s->qscale);
  962. if (s->pict_type != AV_PICTURE_TYPE_I)
  963. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  964. if (s->pict_type == AV_PICTURE_TYPE_B)
  965. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  966. return 0;
  967. }
  968. static av_cold void init_uni_dc_tab(void)
  969. {
  970. int level, uni_code, uni_len;
  971. for (level = -256; level < 256; level++) {
  972. int size, v, l;
  973. /* find number of bits */
  974. size = 0;
  975. v = abs(level);
  976. while (v) {
  977. v >>= 1;
  978. size++;
  979. }
  980. if (level < 0)
  981. l = (-level) ^ ((1 << size) - 1);
  982. else
  983. l = level;
  984. /* luminance */
  985. uni_code = ff_mpeg4_DCtab_lum[size][0];
  986. uni_len = ff_mpeg4_DCtab_lum[size][1];
  987. if (size > 0) {
  988. uni_code <<= size;
  989. uni_code |= l;
  990. uni_len += size;
  991. if (size > 8) {
  992. uni_code <<= 1;
  993. uni_code |= 1;
  994. uni_len++;
  995. }
  996. }
  997. uni_DCtab_lum_bits[level + 256] = uni_code;
  998. uni_DCtab_lum_len[level + 256] = uni_len;
  999. /* chrominance */
  1000. uni_code = ff_mpeg4_DCtab_chrom[size][0];
  1001. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  1002. if (size > 0) {
  1003. uni_code <<= size;
  1004. uni_code |= l;
  1005. uni_len += size;
  1006. if (size > 8) {
  1007. uni_code <<= 1;
  1008. uni_code |= 1;
  1009. uni_len++;
  1010. }
  1011. }
  1012. uni_DCtab_chrom_bits[level + 256] = uni_code;
  1013. uni_DCtab_chrom_len[level + 256] = uni_len;
  1014. }
  1015. }
  1016. static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
  1017. uint8_t *len_tab)
  1018. {
  1019. int slevel, run, last;
  1020. av_assert0(MAX_LEVEL >= 64);
  1021. av_assert0(MAX_RUN >= 63);
  1022. for (slevel = -64; slevel < 64; slevel++) {
  1023. if (slevel == 0)
  1024. continue;
  1025. for (run = 0; run < 64; run++) {
  1026. for (last = 0; last <= 1; last++) {
  1027. const int index = UNI_MPEG4_ENC_INDEX(last, run, slevel + 64);
  1028. int level = slevel < 0 ? -slevel : slevel;
  1029. int sign = slevel < 0 ? 1 : 0;
  1030. int bits, len, code;
  1031. int level1, run1;
  1032. len_tab[index] = 100;
  1033. /* ESC0 */
  1034. code = get_rl_index(rl, last, run, level);
  1035. bits = rl->table_vlc[code][0];
  1036. len = rl->table_vlc[code][1];
  1037. bits = bits * 2 + sign;
  1038. len++;
  1039. if (code != rl->n && len < len_tab[index]) {
  1040. bits_tab[index] = bits;
  1041. len_tab[index] = len;
  1042. }
  1043. /* ESC1 */
  1044. bits = rl->table_vlc[rl->n][0];
  1045. len = rl->table_vlc[rl->n][1];
  1046. bits = bits * 2;
  1047. len++; // esc1
  1048. level1 = level - rl->max_level[last][run];
  1049. if (level1 > 0) {
  1050. code = get_rl_index(rl, last, run, level1);
  1051. bits <<= rl->table_vlc[code][1];
  1052. len += rl->table_vlc[code][1];
  1053. bits += rl->table_vlc[code][0];
  1054. bits = bits * 2 + sign;
  1055. len++;
  1056. if (code != rl->n && len < len_tab[index]) {
  1057. bits_tab[index] = bits;
  1058. len_tab[index] = len;
  1059. }
  1060. }
  1061. /* ESC2 */
  1062. bits = rl->table_vlc[rl->n][0];
  1063. len = rl->table_vlc[rl->n][1];
  1064. bits = bits * 4 + 2;
  1065. len += 2; // esc2
  1066. run1 = run - rl->max_run[last][level] - 1;
  1067. if (run1 >= 0) {
  1068. code = get_rl_index(rl, last, run1, level);
  1069. bits <<= rl->table_vlc[code][1];
  1070. len += rl->table_vlc[code][1];
  1071. bits += rl->table_vlc[code][0];
  1072. bits = bits * 2 + sign;
  1073. len++;
  1074. if (code != rl->n && len < len_tab[index]) {
  1075. bits_tab[index] = bits;
  1076. len_tab[index] = len;
  1077. }
  1078. }
  1079. /* ESC3 */
  1080. bits = rl->table_vlc[rl->n][0];
  1081. len = rl->table_vlc[rl->n][1];
  1082. bits = bits * 4 + 3;
  1083. len += 2; // esc3
  1084. bits = bits * 2 + last;
  1085. len++;
  1086. bits = bits * 64 + run;
  1087. len += 6;
  1088. bits = bits * 2 + 1;
  1089. len++; // marker
  1090. bits = bits * 4096 + (slevel & 0xfff);
  1091. len += 12;
  1092. bits = bits * 2 + 1;
  1093. len++; // marker
  1094. if (len < len_tab[index]) {
  1095. bits_tab[index] = bits;
  1096. len_tab[index] = len;
  1097. }
  1098. }
  1099. }
  1100. }
  1101. }
  1102. static av_cold int encode_init(AVCodecContext *avctx)
  1103. {
  1104. MpegEncContext *s = avctx->priv_data;
  1105. int ret;
  1106. static int done = 0;
  1107. if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
  1108. av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n");
  1109. return AVERROR(EINVAL);
  1110. }
  1111. if ((ret = ff_mpv_encode_init(avctx)) < 0)
  1112. return ret;
  1113. if (!done) {
  1114. done = 1;
  1115. init_uni_dc_tab();
  1116. ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1117. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1118. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1119. }
  1120. s->min_qcoeff = -2048;
  1121. s->max_qcoeff = 2047;
  1122. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1123. s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
  1124. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1125. s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
  1126. s->luma_dc_vlc_length = uni_DCtab_lum_len;
  1127. s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
  1128. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  1129. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  1130. if (s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1131. s->avctx->extradata = av_malloc(1024);
  1132. if (!s->avctx->extradata)
  1133. return AVERROR(ENOMEM);
  1134. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1135. if (!(s->workaround_bugs & FF_BUG_MS))
  1136. mpeg4_encode_visual_object_header(s);
  1137. mpeg4_encode_vol_header(s, 0, 0);
  1138. // ff_mpeg4_stuffing(&s->pb); ?
  1139. flush_put_bits(&s->pb);
  1140. s->avctx->extradata_size = put_bytes_output(&s->pb);
  1141. }
  1142. return 0;
  1143. }
  1144. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1145. {
  1146. uint8_t *start = put_bits_ptr(&s->pb);
  1147. uint8_t *end = s->pb.buf_end;
  1148. int size = end - start;
  1149. int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
  1150. int tex_size = (size - 2 * pb_size) & (~3);
  1151. set_put_bits_buffer_size(&s->pb, pb_size);
  1152. init_put_bits(&s->tex_pb, start + pb_size, tex_size);
  1153. init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
  1154. }
  1155. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1156. {
  1157. const int pb2_len = put_bits_count(&s->pb2);
  1158. const int tex_pb_len = put_bits_count(&s->tex_pb);
  1159. const int bits = put_bits_count(&s->pb);
  1160. if (s->pict_type == AV_PICTURE_TYPE_I) {
  1161. put_bits(&s->pb, 19, DC_MARKER);
  1162. s->misc_bits += 19 + pb2_len + bits - s->last_bits;
  1163. s->i_tex_bits += tex_pb_len;
  1164. } else {
  1165. put_bits(&s->pb, 17, MOTION_MARKER);
  1166. s->misc_bits += 17 + pb2_len;
  1167. s->mv_bits += bits - s->last_bits;
  1168. s->p_tex_bits += tex_pb_len;
  1169. }
  1170. flush_put_bits(&s->pb2);
  1171. flush_put_bits(&s->tex_pb);
  1172. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1173. ff_copy_bits(&s->pb, s->pb2.buf, pb2_len);
  1174. ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1175. s->last_bits = put_bits_count(&s->pb);
  1176. }
  1177. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1178. {
  1179. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  1180. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1181. put_bits(&s->pb, 1, 1);
  1182. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y * s->mb_width);
  1183. put_bits(&s->pb, s->quant_precision, s->qscale);
  1184. put_bits(&s->pb, 1, 0); /* no HEC */
  1185. }
  1186. #define OFFSET(x) offsetof(MpegEncContext, x)
  1187. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1188. static const AVOption options[] = {
  1189. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1190. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1191. { "mpeg_quant", "Use MPEG quantizers instead of H.263",
  1192. OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, VE },
  1193. FF_MPV_COMMON_OPTS
  1194. #if FF_API_MPEGVIDEO_OPTS
  1195. FF_MPV_DEPRECATED_A53_CC_OPT
  1196. #endif
  1197. FF_MPEG4_PROFILE_OPTS
  1198. { NULL },
  1199. };
  1200. static const AVClass mpeg4enc_class = {
  1201. .class_name = "MPEG4 encoder",
  1202. .item_name = av_default_item_name,
  1203. .option = options,
  1204. .version = LIBAVUTIL_VERSION_INT,
  1205. };
  1206. AVCodec ff_mpeg4_encoder = {
  1207. .name = "mpeg4",
  1208. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1209. .type = AVMEDIA_TYPE_VIDEO,
  1210. .id = AV_CODEC_ID_MPEG4,
  1211. .priv_data_size = sizeof(MpegEncContext),
  1212. .init = encode_init,
  1213. .encode2 = ff_mpv_encode_picture,
  1214. .close = ff_mpv_encode_end,
  1215. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  1216. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
  1217. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  1218. .priv_class = &mpeg4enc_class,
  1219. };