You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1410 lines
49KB

  1. /*
  2. * MPEG-4 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/opt.h"
  25. #include "mpegutils.h"
  26. #include "mpegvideo.h"
  27. #include "h263.h"
  28. #include "mpeg4video.h"
  29. /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  30. * differences in MPEG-4. Unified in the sense that the specification specifies
  31. * this encoding in several steps. */
  32. static uint8_t uni_DCtab_lum_len[512];
  33. static uint8_t uni_DCtab_chrom_len[512];
  34. static uint16_t uni_DCtab_lum_bits[512];
  35. static uint16_t uni_DCtab_chrom_bits[512];
  36. /* Unified encoding tables for run length encoding of coefficients.
  37. * Unified in the sense that the specification specifies the encoding in several steps. */
  38. static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
  39. static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
  40. static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
  41. static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
  42. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
  43. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
  44. #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
  45. /* MPEG-4
  46. * inter
  47. * max level: 24/6
  48. * max run: 53/63
  49. *
  50. * intra
  51. * max level: 53/16
  52. * max run: 29/41
  53. */
  54. /**
  55. * Return the number of bits that encoding the 8x8 block in block would need.
  56. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  57. */
  58. static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
  59. int block_last_index, uint8_t scantable[64])
  60. {
  61. int last = 0;
  62. int j;
  63. int rate = 0;
  64. for (j = 1; j <= block_last_index; j++) {
  65. const int index = scantable[j];
  66. int level = block[index];
  67. if (level) {
  68. level += 64;
  69. if ((level & (~127)) == 0) {
  70. if (j < block_last_index)
  71. rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  72. else
  73. rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  74. } else
  75. rate += s->ac_esc_length;
  76. last = j;
  77. }
  78. }
  79. return rate;
  80. }
  81. /**
  82. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  83. * This function also restores s->block_last_index.
  84. * @param[in,out] block MB coefficients, these will be restored
  85. * @param[in] dir ac prediction direction for each 8x8 block
  86. * @param[out] st scantable for each 8x8 block
  87. * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  88. */
  89. static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
  90. const int dir[6], uint8_t *st[6],
  91. const int zigzag_last_index[6])
  92. {
  93. int i, n;
  94. memcpy(s->block_last_index, zigzag_last_index, sizeof(int) * 6);
  95. for (n = 0; n < 6; n++) {
  96. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  97. st[n] = s->intra_scantable.permutated;
  98. if (dir[n]) {
  99. /* top prediction */
  100. for (i = 1; i < 8; i++)
  101. block[n][s->idsp.idct_permutation[i]] = ac_val[i + 8];
  102. } else {
  103. /* left prediction */
  104. for (i = 1; i < 8; i++)
  105. block[n][s->idsp.idct_permutation[i << 3]] = ac_val[i];
  106. }
  107. }
  108. }
  109. /**
  110. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
  111. * This function will also update s->block_last_index and s->ac_val.
  112. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  113. * @param[in] dir ac prediction direction for each 8x8 block
  114. * @param[out] st scantable for each 8x8 block
  115. * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  116. */
  117. static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
  118. const int dir[6], uint8_t *st[6],
  119. int zigzag_last_index[6])
  120. {
  121. int score = 0;
  122. int i, n;
  123. int8_t *const qscale_table = s->current_picture.qscale_table;
  124. memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
  125. for (n = 0; n < 6; n++) {
  126. int16_t *ac_val, *ac_val1;
  127. score -= get_block_rate(s, block[n], s->block_last_index[n],
  128. s->intra_scantable.permutated);
  129. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  130. ac_val1 = ac_val;
  131. if (dir[n]) {
  132. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  133. /* top prediction */
  134. ac_val -= s->block_wrap[n] * 16;
  135. if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) {
  136. /* same qscale */
  137. for (i = 1; i < 8; i++) {
  138. const int level = block[n][s->idsp.idct_permutation[i]];
  139. block[n][s->idsp.idct_permutation[i]] = level - ac_val[i + 8];
  140. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  141. ac_val1[i + 8] = level;
  142. }
  143. } else {
  144. /* different qscale, we must rescale */
  145. for (i = 1; i < 8; i++) {
  146. const int level = block[n][s->idsp.idct_permutation[i]];
  147. block[n][s->idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  148. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  149. ac_val1[i + 8] = level;
  150. }
  151. }
  152. st[n] = s->intra_h_scantable.permutated;
  153. } else {
  154. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  155. /* left prediction */
  156. ac_val -= 16;
  157. if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) {
  158. /* same qscale */
  159. for (i = 1; i < 8; i++) {
  160. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  161. block[n][s->idsp.idct_permutation[i << 3]] = level - ac_val[i];
  162. ac_val1[i] = level;
  163. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  164. }
  165. } else {
  166. /* different qscale, we must rescale */
  167. for (i = 1; i < 8; i++) {
  168. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  169. block[n][s->idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  170. ac_val1[i] = level;
  171. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  172. }
  173. }
  174. st[n] = s->intra_v_scantable.permutated;
  175. }
  176. for (i = 63; i > 0; i--) // FIXME optimize
  177. if (block[n][st[n][i]])
  178. break;
  179. s->block_last_index[n] = i;
  180. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  181. }
  182. if (score < 0) {
  183. return 1;
  184. } else {
  185. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  186. return 0;
  187. }
  188. }
  189. /**
  190. * modify mb_type & qscale so that encoding is actually possible in MPEG-4
  191. */
  192. void ff_clean_mpeg4_qscales(MpegEncContext *s)
  193. {
  194. int i;
  195. int8_t *const qscale_table = s->current_picture.qscale_table;
  196. ff_clean_h263_qscales(s);
  197. if (s->pict_type == AV_PICTURE_TYPE_B) {
  198. int odd = 0;
  199. /* ok, come on, this isn't funny anymore, there's more code for
  200. * handling this MPEG-4 mess than for the actual adaptive quantization */
  201. for (i = 0; i < s->mb_num; i++) {
  202. int mb_xy = s->mb_index2xy[i];
  203. odd += qscale_table[mb_xy] & 1;
  204. }
  205. if (2 * odd > s->mb_num)
  206. odd = 1;
  207. else
  208. odd = 0;
  209. for (i = 0; i < s->mb_num; i++) {
  210. int mb_xy = s->mb_index2xy[i];
  211. if ((qscale_table[mb_xy] & 1) != odd)
  212. qscale_table[mb_xy]++;
  213. if (qscale_table[mb_xy] > 31)
  214. qscale_table[mb_xy] = 31;
  215. }
  216. for (i = 1; i < s->mb_num; i++) {
  217. int mb_xy = s->mb_index2xy[i];
  218. if (qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i - 1]] &&
  219. (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
  220. s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
  221. }
  222. }
  223. }
  224. }
  225. /**
  226. * Encode the dc value.
  227. * @param n block index (0-3 are luma, 4-5 are chroma)
  228. */
  229. static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
  230. {
  231. #if 1
  232. /* DC will overflow if level is outside the [-255,255] range. */
  233. level += 256;
  234. if (n < 4) {
  235. /* luminance */
  236. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  237. } else {
  238. /* chrominance */
  239. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  240. }
  241. #else
  242. int size, v;
  243. /* find number of bits */
  244. size = 0;
  245. v = abs(level);
  246. while (v) {
  247. v >>= 1;
  248. size++;
  249. }
  250. if (n < 4) {
  251. /* luminance */
  252. put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
  253. } else {
  254. /* chrominance */
  255. put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
  256. }
  257. /* encode remaining bits */
  258. if (size > 0) {
  259. if (level < 0)
  260. level = (-level) ^ ((1 << size) - 1);
  261. put_bits(&s->pb, size, level);
  262. if (size > 8)
  263. put_bits(&s->pb, 1, 1);
  264. }
  265. #endif
  266. }
  267. static inline int mpeg4_get_dc_length(int level, int n)
  268. {
  269. if (n < 4)
  270. return uni_DCtab_lum_len[level + 256];
  271. else
  272. return uni_DCtab_chrom_len[level + 256];
  273. }
  274. /**
  275. * Encode an 8x8 block.
  276. * @param n block index (0-3 are luma, 4-5 are chroma)
  277. */
  278. static inline void mpeg4_encode_block(MpegEncContext *s,
  279. int16_t *block, int n, int intra_dc,
  280. uint8_t *scan_table, PutBitContext *dc_pb,
  281. PutBitContext *ac_pb)
  282. {
  283. int i, last_non_zero;
  284. uint32_t *bits_tab;
  285. uint8_t *len_tab;
  286. const int last_index = s->block_last_index[n];
  287. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  288. /* MPEG-4 based DC predictor */
  289. mpeg4_encode_dc(dc_pb, intra_dc, n);
  290. if (last_index < 1)
  291. return;
  292. i = 1;
  293. bits_tab = uni_mpeg4_intra_rl_bits;
  294. len_tab = uni_mpeg4_intra_rl_len;
  295. } else {
  296. if (last_index < 0)
  297. return;
  298. i = 0;
  299. bits_tab = uni_mpeg4_inter_rl_bits;
  300. len_tab = uni_mpeg4_inter_rl_len;
  301. }
  302. /* AC coefs */
  303. last_non_zero = i - 1;
  304. for (; i < last_index; i++) {
  305. int level = block[scan_table[i]];
  306. if (level) {
  307. int run = i - last_non_zero - 1;
  308. level += 64;
  309. if ((level & (~127)) == 0) {
  310. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  311. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  312. } else { // ESC3
  313. put_bits(ac_pb,
  314. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  315. (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
  316. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  317. }
  318. last_non_zero = i;
  319. }
  320. }
  321. /* if (i <= last_index) */ {
  322. int level = block[scan_table[i]];
  323. int run = i - last_non_zero - 1;
  324. level += 64;
  325. if ((level & (~127)) == 0) {
  326. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  327. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  328. } else { // ESC3
  329. put_bits(ac_pb,
  330. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  331. (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
  332. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  333. }
  334. }
  335. }
  336. static int mpeg4_get_block_length(MpegEncContext *s,
  337. int16_t *block, int n,
  338. int intra_dc, uint8_t *scan_table)
  339. {
  340. int i, last_non_zero;
  341. uint8_t *len_tab;
  342. const int last_index = s->block_last_index[n];
  343. int len = 0;
  344. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  345. /* MPEG-4 based DC predictor */
  346. len += mpeg4_get_dc_length(intra_dc, n);
  347. if (last_index < 1)
  348. return len;
  349. i = 1;
  350. len_tab = uni_mpeg4_intra_rl_len;
  351. } else {
  352. if (last_index < 0)
  353. return 0;
  354. i = 0;
  355. len_tab = uni_mpeg4_inter_rl_len;
  356. }
  357. /* AC coefs */
  358. last_non_zero = i - 1;
  359. for (; i < last_index; i++) {
  360. int level = block[scan_table[i]];
  361. if (level) {
  362. int run = i - last_non_zero - 1;
  363. level += 64;
  364. if ((level & (~127)) == 0) {
  365. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  366. len += len_tab[index];
  367. } else { // ESC3
  368. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  369. }
  370. last_non_zero = i;
  371. }
  372. }
  373. /* if (i <= last_index) */ {
  374. int level = block[scan_table[i]];
  375. int run = i - last_non_zero - 1;
  376. level += 64;
  377. if ((level & (~127)) == 0) {
  378. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  379. len += len_tab[index];
  380. } else { // ESC3
  381. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  382. }
  383. }
  384. return len;
  385. }
  386. static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
  387. int intra_dc[6], uint8_t **scan_table,
  388. PutBitContext *dc_pb,
  389. PutBitContext *ac_pb)
  390. {
  391. int i;
  392. if (scan_table) {
  393. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  394. for (i = 0; i < 6; i++)
  395. skip_put_bits(&s->pb,
  396. mpeg4_get_block_length(s, block[i], i,
  397. intra_dc[i], scan_table[i]));
  398. } else {
  399. /* encode each block */
  400. for (i = 0; i < 6; i++)
  401. mpeg4_encode_block(s, block[i], i,
  402. intra_dc[i], scan_table[i], dc_pb, ac_pb);
  403. }
  404. } else {
  405. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  406. for (i = 0; i < 6; i++)
  407. skip_put_bits(&s->pb,
  408. mpeg4_get_block_length(s, block[i], i, 0,
  409. s->intra_scantable.permutated));
  410. } else {
  411. /* encode each block */
  412. for (i = 0; i < 6; i++)
  413. mpeg4_encode_block(s, block[i], i, 0,
  414. s->intra_scantable.permutated, dc_pb, ac_pb);
  415. }
  416. }
  417. }
  418. static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
  419. int motion_x, int motion_y, int mb_type)
  420. {
  421. int cbp = 0, i;
  422. if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
  423. int score = 0;
  424. const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
  425. for (i = 0; i < 6; i++) {
  426. if (s->coded_score[i] < 0) {
  427. score += s->coded_score[i];
  428. cbp |= 1 << (5 - i);
  429. }
  430. }
  431. if (cbp) {
  432. int zero_score = -6;
  433. if ((motion_x | motion_y | s->dquant | mb_type) == 0)
  434. zero_score -= 4; // 2 * MV + mb_type + cbp bit
  435. zero_score *= lambda;
  436. if (zero_score <= score)
  437. cbp = 0;
  438. }
  439. for (i = 0; i < 6; i++) {
  440. if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
  441. s->block_last_index[i] = -1;
  442. s->bdsp.clear_block(s->block[i]);
  443. }
  444. }
  445. } else {
  446. for (i = 0; i < 6; i++) {
  447. if (s->block_last_index[i] >= 0)
  448. cbp |= 1 << (5 - i);
  449. }
  450. }
  451. return cbp;
  452. }
  453. // FIXME this is duplicated to h263.c
  454. static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
  455. void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
  456. int motion_x, int motion_y)
  457. {
  458. int cbpc, cbpy, pred_x, pred_y;
  459. PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  460. PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  461. PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  462. const int interleaved_stats = (s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  463. if (!s->mb_intra) {
  464. int i, cbp;
  465. if (s->pict_type == AV_PICTURE_TYPE_B) {
  466. /* convert from mv_dir to type */
  467. static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
  468. int mb_type = mb_type_table[s->mv_dir];
  469. if (s->mb_x == 0) {
  470. for (i = 0; i < 2; i++)
  471. s->last_mv[i][0][0] =
  472. s->last_mv[i][0][1] =
  473. s->last_mv[i][1][0] =
  474. s->last_mv[i][1][1] = 0;
  475. }
  476. assert(s->dquant >= -2 && s->dquant <= 2);
  477. assert((s->dquant & 1) == 0);
  478. assert(mb_type >= 0);
  479. /* nothing to do if this MB was skipped in the next P-frame */
  480. if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
  481. s->skip_count++;
  482. s->mv[0][0][0] =
  483. s->mv[0][0][1] =
  484. s->mv[1][0][0] =
  485. s->mv[1][0][1] = 0;
  486. s->mv_dir = MV_DIR_FORWARD; // doesn't matter
  487. s->qscale -= s->dquant;
  488. // s->mb_skipped = 1;
  489. return;
  490. }
  491. cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
  492. if ((cbp | motion_x | motion_y | mb_type) == 0) {
  493. /* direct MB with MV={0,0} */
  494. assert(s->dquant == 0);
  495. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  496. if (interleaved_stats) {
  497. s->misc_bits++;
  498. s->last_bits++;
  499. }
  500. s->skip_count++;
  501. return;
  502. }
  503. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  504. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
  505. put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
  506. if (cbp)
  507. put_bits(&s->pb, 6, cbp);
  508. if (cbp && mb_type) {
  509. if (s->dquant)
  510. put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
  511. else
  512. put_bits(&s->pb, 1, 0);
  513. } else
  514. s->qscale -= s->dquant;
  515. if (!s->progressive_sequence) {
  516. if (cbp)
  517. put_bits(&s->pb, 1, s->interlaced_dct);
  518. if (mb_type) // not direct mode
  519. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  520. }
  521. if (interleaved_stats)
  522. s->misc_bits += get_bits_diff(s);
  523. if (!mb_type) {
  524. assert(s->mv_dir & MV_DIRECT);
  525. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  526. s->b_count++;
  527. s->f_count++;
  528. } else {
  529. assert(mb_type > 0 && mb_type < 4);
  530. if (s->mv_type != MV_TYPE_FIELD) {
  531. if (s->mv_dir & MV_DIR_FORWARD) {
  532. ff_h263_encode_motion_vector(s,
  533. s->mv[0][0][0] - s->last_mv[0][0][0],
  534. s->mv[0][0][1] - s->last_mv[0][0][1],
  535. s->f_code);
  536. s->last_mv[0][0][0] =
  537. s->last_mv[0][1][0] = s->mv[0][0][0];
  538. s->last_mv[0][0][1] =
  539. s->last_mv[0][1][1] = s->mv[0][0][1];
  540. s->f_count++;
  541. }
  542. if (s->mv_dir & MV_DIR_BACKWARD) {
  543. ff_h263_encode_motion_vector(s,
  544. s->mv[1][0][0] - s->last_mv[1][0][0],
  545. s->mv[1][0][1] - s->last_mv[1][0][1],
  546. s->b_code);
  547. s->last_mv[1][0][0] =
  548. s->last_mv[1][1][0] = s->mv[1][0][0];
  549. s->last_mv[1][0][1] =
  550. s->last_mv[1][1][1] = s->mv[1][0][1];
  551. s->b_count++;
  552. }
  553. } else {
  554. if (s->mv_dir & MV_DIR_FORWARD) {
  555. put_bits(&s->pb, 1, s->field_select[0][0]);
  556. put_bits(&s->pb, 1, s->field_select[0][1]);
  557. }
  558. if (s->mv_dir & MV_DIR_BACKWARD) {
  559. put_bits(&s->pb, 1, s->field_select[1][0]);
  560. put_bits(&s->pb, 1, s->field_select[1][1]);
  561. }
  562. if (s->mv_dir & MV_DIR_FORWARD) {
  563. for (i = 0; i < 2; i++) {
  564. ff_h263_encode_motion_vector(s,
  565. s->mv[0][i][0] - s->last_mv[0][i][0],
  566. s->mv[0][i][1] - s->last_mv[0][i][1] / 2,
  567. s->f_code);
  568. s->last_mv[0][i][0] = s->mv[0][i][0];
  569. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  570. }
  571. s->f_count++;
  572. }
  573. if (s->mv_dir & MV_DIR_BACKWARD) {
  574. for (i = 0; i < 2; i++) {
  575. ff_h263_encode_motion_vector(s,
  576. s->mv[1][i][0] - s->last_mv[1][i][0],
  577. s->mv[1][i][1] - s->last_mv[1][i][1] / 2,
  578. s->b_code);
  579. s->last_mv[1][i][0] = s->mv[1][i][0];
  580. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  581. }
  582. s->b_count++;
  583. }
  584. }
  585. }
  586. if (interleaved_stats)
  587. s->mv_bits += get_bits_diff(s);
  588. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  589. if (interleaved_stats)
  590. s->p_tex_bits += get_bits_diff(s);
  591. } else { /* s->pict_type==AV_PICTURE_TYPE_B */
  592. cbp = get_p_cbp(s, block, motion_x, motion_y);
  593. if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
  594. s->mv_type == MV_TYPE_16X16) {
  595. /* Check if the B-frames can skip it too, as we must skip it
  596. * if we skip here why didn't they just compress
  597. * the skip-mb bits instead of reusing them ?! */
  598. if (s->max_b_frames > 0) {
  599. int i;
  600. int x, y, offset;
  601. uint8_t *p_pic;
  602. x = s->mb_x * 16;
  603. y = s->mb_y * 16;
  604. if (x + 16 > s->width)
  605. x = s->width - 16;
  606. if (y + 16 > s->height)
  607. y = s->height - 16;
  608. offset = x + y * s->linesize;
  609. p_pic = s->new_picture.f->data[0] + offset;
  610. s->mb_skipped = 1;
  611. for (i = 0; i < s->max_b_frames; i++) {
  612. uint8_t *b_pic;
  613. int diff;
  614. Picture *pic = s->reordered_input_picture[i + 1];
  615. if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
  616. break;
  617. b_pic = pic->f->data[0] + offset;
  618. if (!pic->shared)
  619. b_pic += INPLACE_OFFSET;
  620. diff = s->mecc.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  621. if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
  622. s->mb_skipped = 0;
  623. break;
  624. }
  625. }
  626. } else
  627. s->mb_skipped = 1;
  628. if (s->mb_skipped == 1) {
  629. /* skip macroblock */
  630. put_bits(&s->pb, 1, 1);
  631. if (interleaved_stats) {
  632. s->misc_bits++;
  633. s->last_bits++;
  634. }
  635. s->skip_count++;
  636. return;
  637. }
  638. }
  639. put_bits(&s->pb, 1, 0); /* mb coded */
  640. cbpc = cbp & 3;
  641. cbpy = cbp >> 2;
  642. cbpy ^= 0xf;
  643. if (s->mv_type == MV_TYPE_16X16) {
  644. if (s->dquant)
  645. cbpc += 8;
  646. put_bits(&s->pb,
  647. ff_h263_inter_MCBPC_bits[cbpc],
  648. ff_h263_inter_MCBPC_code[cbpc]);
  649. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  650. if (s->dquant)
  651. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  652. if (!s->progressive_sequence) {
  653. if (cbp)
  654. put_bits(pb2, 1, s->interlaced_dct);
  655. put_bits(pb2, 1, 0);
  656. }
  657. if (interleaved_stats)
  658. s->misc_bits += get_bits_diff(s);
  659. /* motion vectors: 16x16 mode */
  660. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  661. ff_h263_encode_motion_vector(s,
  662. motion_x - pred_x,
  663. motion_y - pred_y,
  664. s->f_code);
  665. } else if (s->mv_type == MV_TYPE_FIELD) {
  666. if (s->dquant)
  667. cbpc += 8;
  668. put_bits(&s->pb,
  669. ff_h263_inter_MCBPC_bits[cbpc],
  670. ff_h263_inter_MCBPC_code[cbpc]);
  671. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  672. if (s->dquant)
  673. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  674. assert(!s->progressive_sequence);
  675. if (cbp)
  676. put_bits(pb2, 1, s->interlaced_dct);
  677. put_bits(pb2, 1, 1);
  678. if (interleaved_stats)
  679. s->misc_bits += get_bits_diff(s);
  680. /* motion vectors: 16x8 interlaced mode */
  681. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  682. pred_y /= 2;
  683. put_bits(&s->pb, 1, s->field_select[0][0]);
  684. put_bits(&s->pb, 1, s->field_select[0][1]);
  685. ff_h263_encode_motion_vector(s,
  686. s->mv[0][0][0] - pred_x,
  687. s->mv[0][0][1] - pred_y,
  688. s->f_code);
  689. ff_h263_encode_motion_vector(s,
  690. s->mv[0][1][0] - pred_x,
  691. s->mv[0][1][1] - pred_y,
  692. s->f_code);
  693. } else {
  694. assert(s->mv_type == MV_TYPE_8X8);
  695. put_bits(&s->pb,
  696. ff_h263_inter_MCBPC_bits[cbpc + 16],
  697. ff_h263_inter_MCBPC_code[cbpc + 16]);
  698. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  699. if (!s->progressive_sequence && cbp)
  700. put_bits(pb2, 1, s->interlaced_dct);
  701. if (interleaved_stats)
  702. s->misc_bits += get_bits_diff(s);
  703. for (i = 0; i < 4; i++) {
  704. /* motion vectors: 8x8 mode*/
  705. ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  706. ff_h263_encode_motion_vector(s,
  707. s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
  708. s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
  709. s->f_code);
  710. }
  711. }
  712. if (interleaved_stats)
  713. s->mv_bits += get_bits_diff(s);
  714. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  715. if (interleaved_stats)
  716. s->p_tex_bits += get_bits_diff(s);
  717. s->f_count++;
  718. }
  719. } else {
  720. int cbp;
  721. int dc_diff[6]; // dc values with the dc prediction subtracted
  722. int dir[6]; // prediction direction
  723. int zigzag_last_index[6];
  724. uint8_t *scan_table[6];
  725. int i;
  726. for (i = 0; i < 6; i++)
  727. dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  728. if (s->avctx->flags & AV_CODEC_FLAG_AC_PRED) {
  729. s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  730. } else {
  731. for (i = 0; i < 6; i++)
  732. scan_table[i] = s->intra_scantable.permutated;
  733. }
  734. /* compute cbp */
  735. cbp = 0;
  736. for (i = 0; i < 6; i++)
  737. if (s->block_last_index[i] >= 1)
  738. cbp |= 1 << (5 - i);
  739. cbpc = cbp & 3;
  740. if (s->pict_type == AV_PICTURE_TYPE_I) {
  741. if (s->dquant)
  742. cbpc += 4;
  743. put_bits(&s->pb,
  744. ff_h263_intra_MCBPC_bits[cbpc],
  745. ff_h263_intra_MCBPC_code[cbpc]);
  746. } else {
  747. if (s->dquant)
  748. cbpc += 8;
  749. put_bits(&s->pb, 1, 0); /* mb coded */
  750. put_bits(&s->pb,
  751. ff_h263_inter_MCBPC_bits[cbpc + 4],
  752. ff_h263_inter_MCBPC_code[cbpc + 4]);
  753. }
  754. put_bits(pb2, 1, s->ac_pred);
  755. cbpy = cbp >> 2;
  756. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  757. if (s->dquant)
  758. put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
  759. if (!s->progressive_sequence)
  760. put_bits(dc_pb, 1, s->interlaced_dct);
  761. if (interleaved_stats)
  762. s->misc_bits += get_bits_diff(s);
  763. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  764. if (interleaved_stats)
  765. s->i_tex_bits += get_bits_diff(s);
  766. s->i_count++;
  767. /* restore ac coeffs & last_index stuff
  768. * if we messed them up with the prediction */
  769. if (s->ac_pred)
  770. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  771. }
  772. }
  773. /**
  774. * add MPEG-4 stuffing bits (01...1)
  775. */
  776. void ff_mpeg4_stuffing(PutBitContext *pbc)
  777. {
  778. int length;
  779. put_bits(pbc, 1, 0);
  780. length = (-put_bits_count(pbc)) & 7;
  781. if (length)
  782. put_bits(pbc, length, (1 << length) - 1);
  783. }
  784. /* must be called before writing the header */
  785. void ff_set_mpeg4_time(MpegEncContext *s)
  786. {
  787. if (s->pict_type == AV_PICTURE_TYPE_B) {
  788. ff_mpeg4_init_direct_mv(s);
  789. } else {
  790. s->last_time_base = s->time_base;
  791. s->time_base = s->time / s->avctx->time_base.den;
  792. }
  793. }
  794. static void mpeg4_encode_gop_header(MpegEncContext *s)
  795. {
  796. int hours, minutes, seconds;
  797. int64_t time;
  798. put_bits(&s->pb, 16, 0);
  799. put_bits(&s->pb, 16, GOP_STARTCODE);
  800. time = s->current_picture_ptr->f->pts;
  801. if (s->reordered_input_picture[1])
  802. time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
  803. time = time * s->avctx->time_base.num;
  804. seconds = time / s->avctx->time_base.den;
  805. minutes = seconds / 60;
  806. seconds %= 60;
  807. hours = minutes / 60;
  808. minutes %= 60;
  809. hours %= 24;
  810. put_bits(&s->pb, 5, hours);
  811. put_bits(&s->pb, 6, minutes);
  812. put_bits(&s->pb, 1, 1);
  813. put_bits(&s->pb, 6, seconds);
  814. put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
  815. put_bits(&s->pb, 1, 0); // broken link == NO
  816. s->last_time_base = time / s->avctx->time_base.den;
  817. ff_mpeg4_stuffing(&s->pb);
  818. }
  819. static void mpeg4_encode_visual_object_header(MpegEncContext *s)
  820. {
  821. int profile_and_level_indication;
  822. int vo_ver_id;
  823. if (s->avctx->profile != FF_PROFILE_UNKNOWN) {
  824. profile_and_level_indication = s->avctx->profile << 4;
  825. } else if (s->max_b_frames || s->quarter_sample) {
  826. profile_and_level_indication = 0xF0; // adv simple
  827. } else {
  828. profile_and_level_indication = 0x00; // simple
  829. }
  830. if (s->avctx->level != FF_LEVEL_UNKNOWN)
  831. profile_and_level_indication |= s->avctx->level;
  832. else
  833. profile_and_level_indication |= 1; // level 1
  834. if (profile_and_level_indication >> 4 == 0xF)
  835. vo_ver_id = 5;
  836. else
  837. vo_ver_id = 1;
  838. // FIXME levels
  839. put_bits(&s->pb, 16, 0);
  840. put_bits(&s->pb, 16, VOS_STARTCODE);
  841. put_bits(&s->pb, 8, profile_and_level_indication);
  842. put_bits(&s->pb, 16, 0);
  843. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  844. put_bits(&s->pb, 1, 1);
  845. put_bits(&s->pb, 4, vo_ver_id);
  846. put_bits(&s->pb, 3, 1); // priority
  847. put_bits(&s->pb, 4, 1); // visual obj type== video obj
  848. put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
  849. ff_mpeg4_stuffing(&s->pb);
  850. }
  851. static void mpeg4_encode_vol_header(MpegEncContext *s,
  852. int vo_number,
  853. int vol_number)
  854. {
  855. int vo_ver_id;
  856. if (!CONFIG_MPEG4_ENCODER)
  857. return;
  858. if (s->max_b_frames || s->quarter_sample) {
  859. vo_ver_id = 5;
  860. s->vo_type = ADV_SIMPLE_VO_TYPE;
  861. } else {
  862. vo_ver_id = 1;
  863. s->vo_type = SIMPLE_VO_TYPE;
  864. }
  865. put_bits(&s->pb, 16, 0);
  866. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  867. put_bits(&s->pb, 16, 0);
  868. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  869. put_bits(&s->pb, 1, 0); /* random access vol */
  870. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  871. if (s->workaround_bugs & FF_BUG_MS) {
  872. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  873. } else {
  874. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  875. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  876. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  877. }
  878. s->aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  879. put_bits(&s->pb, 4, s->aspect_ratio_info); /* aspect ratio info */
  880. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  881. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  882. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  883. }
  884. if (s->workaround_bugs & FF_BUG_MS) {
  885. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  886. } else {
  887. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  888. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  889. put_bits(&s->pb, 1, s->low_delay);
  890. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  891. }
  892. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  893. put_bits(&s->pb, 1, 1); /* marker bit */
  894. put_bits(&s->pb, 16, s->avctx->time_base.den);
  895. if (s->time_increment_bits < 1)
  896. s->time_increment_bits = 1;
  897. put_bits(&s->pb, 1, 1); /* marker bit */
  898. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  899. put_bits(&s->pb, 1, 1); /* marker bit */
  900. put_bits(&s->pb, 13, s->width); /* vol width */
  901. put_bits(&s->pb, 1, 1); /* marker bit */
  902. put_bits(&s->pb, 13, s->height); /* vol height */
  903. put_bits(&s->pb, 1, 1); /* marker bit */
  904. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  905. put_bits(&s->pb, 1, 1); /* obmc disable */
  906. if (vo_ver_id == 1)
  907. put_bits(&s->pb, 1, 0); /* sprite enable */
  908. else
  909. put_bits(&s->pb, 2, 0); /* sprite enable */
  910. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  911. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
  912. if (s->mpeg_quant) {
  913. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  914. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  915. }
  916. if (vo_ver_id != 1)
  917. put_bits(&s->pb, 1, s->quarter_sample);
  918. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  919. put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
  920. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  921. if (s->data_partitioning)
  922. put_bits(&s->pb, 1, 0); /* no rvlc */
  923. if (vo_ver_id != 1) {
  924. put_bits(&s->pb, 1, 0); /* newpred */
  925. put_bits(&s->pb, 1, 0); /* reduced res vop */
  926. }
  927. put_bits(&s->pb, 1, 0); /* scalability */
  928. ff_mpeg4_stuffing(&s->pb);
  929. /* user data */
  930. if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  931. put_bits(&s->pb, 16, 0);
  932. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  933. avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  934. }
  935. }
  936. /* write MPEG-4 VOP header */
  937. void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
  938. {
  939. int time_incr;
  940. int time_div, time_mod;
  941. if (s->pict_type == AV_PICTURE_TYPE_I) {
  942. if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  943. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
  944. mpeg4_encode_visual_object_header(s);
  945. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy
  946. mpeg4_encode_vol_header(s, 0, 0);
  947. }
  948. if (!(s->workaround_bugs & FF_BUG_MS))
  949. mpeg4_encode_gop_header(s);
  950. }
  951. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  952. put_bits(&s->pb, 16, 0); /* vop header */
  953. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  954. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  955. assert(s->time >= 0);
  956. time_div = s->time / s->avctx->time_base.den;
  957. time_mod = s->time % s->avctx->time_base.den;
  958. time_incr = time_div - s->last_time_base;
  959. assert(time_incr >= 0);
  960. while (time_incr--)
  961. put_bits(&s->pb, 1, 1);
  962. put_bits(&s->pb, 1, 0);
  963. put_bits(&s->pb, 1, 1); /* marker */
  964. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  965. put_bits(&s->pb, 1, 1); /* marker */
  966. put_bits(&s->pb, 1, 1); /* vop coded */
  967. if (s->pict_type == AV_PICTURE_TYPE_P) {
  968. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  969. }
  970. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  971. if (!s->progressive_sequence) {
  972. put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
  973. put_bits(&s->pb, 1, s->alternate_scan);
  974. }
  975. // FIXME sprite stuff
  976. put_bits(&s->pb, 5, s->qscale);
  977. if (s->pict_type != AV_PICTURE_TYPE_I)
  978. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  979. if (s->pict_type == AV_PICTURE_TYPE_B)
  980. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  981. }
  982. static av_cold void init_uni_dc_tab(void)
  983. {
  984. int level, uni_code, uni_len;
  985. for (level = -256; level < 256; level++) {
  986. int size, v, l;
  987. /* find number of bits */
  988. size = 0;
  989. v = abs(level);
  990. while (v) {
  991. v >>= 1;
  992. size++;
  993. }
  994. if (level < 0)
  995. l = (-level) ^ ((1 << size) - 1);
  996. else
  997. l = level;
  998. /* luminance */
  999. uni_code = ff_mpeg4_DCtab_lum[size][0];
  1000. uni_len = ff_mpeg4_DCtab_lum[size][1];
  1001. if (size > 0) {
  1002. uni_code <<= size;
  1003. uni_code |= l;
  1004. uni_len += size;
  1005. if (size > 8) {
  1006. uni_code <<= 1;
  1007. uni_code |= 1;
  1008. uni_len++;
  1009. }
  1010. }
  1011. uni_DCtab_lum_bits[level + 256] = uni_code;
  1012. uni_DCtab_lum_len[level + 256] = uni_len;
  1013. /* chrominance */
  1014. uni_code = ff_mpeg4_DCtab_chrom[size][0];
  1015. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  1016. if (size > 0) {
  1017. uni_code <<= size;
  1018. uni_code |= l;
  1019. uni_len += size;
  1020. if (size > 8) {
  1021. uni_code <<= 1;
  1022. uni_code |= 1;
  1023. uni_len++;
  1024. }
  1025. }
  1026. uni_DCtab_chrom_bits[level + 256] = uni_code;
  1027. uni_DCtab_chrom_len[level + 256] = uni_len;
  1028. }
  1029. }
  1030. static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
  1031. uint8_t *len_tab)
  1032. {
  1033. int slevel, run, last;
  1034. assert(MAX_LEVEL >= 64);
  1035. assert(MAX_RUN >= 63);
  1036. for (slevel = -64; slevel < 64; slevel++) {
  1037. if (slevel == 0)
  1038. continue;
  1039. for (run = 0; run < 64; run++) {
  1040. for (last = 0; last <= 1; last++) {
  1041. const int index = UNI_MPEG4_ENC_INDEX(last, run, slevel + 64);
  1042. int level = slevel < 0 ? -slevel : slevel;
  1043. int sign = slevel < 0 ? 1 : 0;
  1044. int bits, len, code;
  1045. int level1, run1;
  1046. len_tab[index] = 100;
  1047. /* ESC0 */
  1048. code = get_rl_index(rl, last, run, level);
  1049. bits = rl->table_vlc[code][0];
  1050. len = rl->table_vlc[code][1];
  1051. bits = bits * 2 + sign;
  1052. len++;
  1053. if (code != rl->n && len < len_tab[index]) {
  1054. bits_tab[index] = bits;
  1055. len_tab[index] = len;
  1056. }
  1057. /* ESC1 */
  1058. bits = rl->table_vlc[rl->n][0];
  1059. len = rl->table_vlc[rl->n][1];
  1060. bits = bits * 2;
  1061. len++; // esc1
  1062. level1 = level - rl->max_level[last][run];
  1063. if (level1 > 0) {
  1064. code = get_rl_index(rl, last, run, level1);
  1065. bits <<= rl->table_vlc[code][1];
  1066. len += rl->table_vlc[code][1];
  1067. bits += rl->table_vlc[code][0];
  1068. bits = bits * 2 + sign;
  1069. len++;
  1070. if (code != rl->n && len < len_tab[index]) {
  1071. bits_tab[index] = bits;
  1072. len_tab[index] = len;
  1073. }
  1074. }
  1075. /* ESC2 */
  1076. bits = rl->table_vlc[rl->n][0];
  1077. len = rl->table_vlc[rl->n][1];
  1078. bits = bits * 4 + 2;
  1079. len += 2; // esc2
  1080. run1 = run - rl->max_run[last][level] - 1;
  1081. if (run1 >= 0) {
  1082. code = get_rl_index(rl, last, run1, level);
  1083. bits <<= rl->table_vlc[code][1];
  1084. len += rl->table_vlc[code][1];
  1085. bits += rl->table_vlc[code][0];
  1086. bits = bits * 2 + sign;
  1087. len++;
  1088. if (code != rl->n && len < len_tab[index]) {
  1089. bits_tab[index] = bits;
  1090. len_tab[index] = len;
  1091. }
  1092. }
  1093. /* ESC3 */
  1094. bits = rl->table_vlc[rl->n][0];
  1095. len = rl->table_vlc[rl->n][1];
  1096. bits = bits * 4 + 3;
  1097. len += 2; // esc3
  1098. bits = bits * 2 + last;
  1099. len++;
  1100. bits = bits * 64 + run;
  1101. len += 6;
  1102. bits = bits * 2 + 1;
  1103. len++; // marker
  1104. bits = bits * 4096 + (slevel & 0xfff);
  1105. len += 12;
  1106. bits = bits * 2 + 1;
  1107. len++; // marker
  1108. if (len < len_tab[index]) {
  1109. bits_tab[index] = bits;
  1110. len_tab[index] = len;
  1111. }
  1112. }
  1113. }
  1114. }
  1115. }
  1116. static av_cold int encode_init(AVCodecContext *avctx)
  1117. {
  1118. MpegEncContext *s = avctx->priv_data;
  1119. int ret;
  1120. static int done = 0;
  1121. if ((ret = ff_mpv_encode_init(avctx)) < 0)
  1122. return ret;
  1123. if (!done) {
  1124. done = 1;
  1125. init_uni_dc_tab();
  1126. ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1127. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1128. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1129. }
  1130. s->min_qcoeff = -2048;
  1131. s->max_qcoeff = 2047;
  1132. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1133. s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
  1134. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1135. s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
  1136. s->luma_dc_vlc_length = uni_DCtab_lum_len;
  1137. s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
  1138. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  1139. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  1140. if (s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1141. s->avctx->extradata = av_malloc(1024);
  1142. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1143. if (!(s->workaround_bugs & FF_BUG_MS))
  1144. mpeg4_encode_visual_object_header(s);
  1145. mpeg4_encode_vol_header(s, 0, 0);
  1146. // ff_mpeg4_stuffing(&s->pb); ?
  1147. flush_put_bits(&s->pb);
  1148. s->avctx->extradata_size = (put_bits_count(&s->pb) + 7) >> 3;
  1149. }
  1150. return 0;
  1151. }
  1152. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1153. {
  1154. uint8_t *start = put_bits_ptr(&s->pb);
  1155. uint8_t *end = s->pb.buf_end;
  1156. int size = end - start;
  1157. int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
  1158. int tex_size = (size - 2 * pb_size) & (~3);
  1159. set_put_bits_buffer_size(&s->pb, pb_size);
  1160. init_put_bits(&s->tex_pb, start + pb_size, tex_size);
  1161. init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
  1162. }
  1163. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1164. {
  1165. const int pb2_len = put_bits_count(&s->pb2);
  1166. const int tex_pb_len = put_bits_count(&s->tex_pb);
  1167. const int bits = put_bits_count(&s->pb);
  1168. if (s->pict_type == AV_PICTURE_TYPE_I) {
  1169. put_bits(&s->pb, 19, DC_MARKER);
  1170. s->misc_bits += 19 + pb2_len + bits - s->last_bits;
  1171. s->i_tex_bits += tex_pb_len;
  1172. } else {
  1173. put_bits(&s->pb, 17, MOTION_MARKER);
  1174. s->misc_bits += 17 + pb2_len;
  1175. s->mv_bits += bits - s->last_bits;
  1176. s->p_tex_bits += tex_pb_len;
  1177. }
  1178. flush_put_bits(&s->pb2);
  1179. flush_put_bits(&s->tex_pb);
  1180. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1181. avpriv_copy_bits(&s->pb, s->pb2.buf, pb2_len);
  1182. avpriv_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1183. s->last_bits = put_bits_count(&s->pb);
  1184. }
  1185. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1186. {
  1187. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  1188. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1189. put_bits(&s->pb, 1, 1);
  1190. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y * s->mb_width);
  1191. put_bits(&s->pb, s->quant_precision, s->qscale);
  1192. put_bits(&s->pb, 1, 0); /* no HEC */
  1193. }
  1194. #define OFFSET(x) offsetof(MpegEncContext, x)
  1195. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1196. static const AVOption options[] = {
  1197. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1198. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1199. FF_MPV_COMMON_OPTS
  1200. { NULL },
  1201. };
  1202. static const AVClass mpeg4enc_class = {
  1203. .class_name = "MPEG4 encoder",
  1204. .item_name = av_default_item_name,
  1205. .option = options,
  1206. .version = LIBAVUTIL_VERSION_INT,
  1207. };
  1208. AVCodec ff_mpeg4_encoder = {
  1209. .name = "mpeg4",
  1210. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1211. .type = AVMEDIA_TYPE_VIDEO,
  1212. .id = AV_CODEC_ID_MPEG4,
  1213. .priv_data_size = sizeof(MpegEncContext),
  1214. .init = encode_init,
  1215. .encode2 = ff_mpv_encode_picture,
  1216. .close = ff_mpv_encode_end,
  1217. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  1218. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
  1219. .priv_class = &mpeg4enc_class,
  1220. };