You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1382 lines
49KB

  1. /*
  2. * MPEG-4 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/opt.h"
  25. #include "mpegutils.h"
  26. #include "mpegvideo.h"
  27. #include "h263.h"
  28. #include "mpeg4video.h"
  29. /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  30. * differences in MPEG-4. Unified in the sense that the specification specifies
  31. * this encoding in several steps. */
  32. static uint8_t uni_DCtab_lum_len[512];
  33. static uint8_t uni_DCtab_chrom_len[512];
  34. static uint16_t uni_DCtab_lum_bits[512];
  35. static uint16_t uni_DCtab_chrom_bits[512];
  36. /* Unified encoding tables for run length encoding of coefficients.
  37. * Unified in the sense that the specification specifies the encoding in several steps. */
  38. static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
  39. static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
  40. static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
  41. static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
  42. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
  43. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
  44. #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
  45. /* MPEG-4
  46. * inter
  47. * max level: 24/6
  48. * max run: 53/63
  49. *
  50. * intra
  51. * max level: 53/16
  52. * max run: 29/41
  53. */
  54. /**
  55. * Return the number of bits that encoding the 8x8 block in block would need.
  56. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  57. */
  58. static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
  59. int block_last_index, uint8_t scantable[64])
  60. {
  61. int last = 0;
  62. int j;
  63. int rate = 0;
  64. for (j = 1; j <= block_last_index; j++) {
  65. const int index = scantable[j];
  66. int level = block[index];
  67. if (level) {
  68. level += 64;
  69. if ((level & (~127)) == 0) {
  70. if (j < block_last_index)
  71. rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  72. else
  73. rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  74. } else
  75. rate += s->ac_esc_length;
  76. last = j;
  77. }
  78. }
  79. return rate;
  80. }
  81. /**
  82. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  83. * This function also restores s->block_last_index.
  84. * @param[in,out] block MB coefficients, these will be restored
  85. * @param[in] dir ac prediction direction for each 8x8 block
  86. * @param[out] st scantable for each 8x8 block
  87. * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  88. */
  89. static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
  90. const int dir[6], uint8_t *st[6],
  91. const int zigzag_last_index[6])
  92. {
  93. int i, n;
  94. memcpy(s->block_last_index, zigzag_last_index, sizeof(int) * 6);
  95. for (n = 0; n < 6; n++) {
  96. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  97. st[n] = s->intra_scantable.permutated;
  98. if (dir[n]) {
  99. /* top prediction */
  100. for (i = 1; i < 8; i++)
  101. block[n][s->idsp.idct_permutation[i]] = ac_val[i + 8];
  102. } else {
  103. /* left prediction */
  104. for (i = 1; i < 8; i++)
  105. block[n][s->idsp.idct_permutation[i << 3]] = ac_val[i];
  106. }
  107. }
  108. }
  109. /**
  110. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
  111. * This function will also update s->block_last_index and s->ac_val.
  112. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  113. * @param[in] dir ac prediction direction for each 8x8 block
  114. * @param[out] st scantable for each 8x8 block
  115. * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  116. */
  117. static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
  118. const int dir[6], uint8_t *st[6],
  119. int zigzag_last_index[6])
  120. {
  121. int score = 0;
  122. int i, n;
  123. int8_t *const qscale_table = s->current_picture.qscale_table;
  124. memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
  125. for (n = 0; n < 6; n++) {
  126. int16_t *ac_val, *ac_val1;
  127. score -= get_block_rate(s, block[n], s->block_last_index[n],
  128. s->intra_scantable.permutated);
  129. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  130. ac_val1 = ac_val;
  131. if (dir[n]) {
  132. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  133. /* top prediction */
  134. ac_val -= s->block_wrap[n] * 16;
  135. if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) {
  136. /* same qscale */
  137. for (i = 1; i < 8; i++) {
  138. const int level = block[n][s->idsp.idct_permutation[i]];
  139. block[n][s->idsp.idct_permutation[i]] = level - ac_val[i + 8];
  140. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  141. ac_val1[i + 8] = level;
  142. }
  143. } else {
  144. /* different qscale, we must rescale */
  145. for (i = 1; i < 8; i++) {
  146. const int level = block[n][s->idsp.idct_permutation[i]];
  147. block[n][s->idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  148. ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]];
  149. ac_val1[i + 8] = level;
  150. }
  151. }
  152. st[n] = s->intra_h_scantable.permutated;
  153. } else {
  154. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  155. /* left prediction */
  156. ac_val -= 16;
  157. if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) {
  158. /* same qscale */
  159. for (i = 1; i < 8; i++) {
  160. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  161. block[n][s->idsp.idct_permutation[i << 3]] = level - ac_val[i];
  162. ac_val1[i] = level;
  163. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  164. }
  165. } else {
  166. /* different qscale, we must rescale */
  167. for (i = 1; i < 8; i++) {
  168. const int level = block[n][s->idsp.idct_permutation[i << 3]];
  169. block[n][s->idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  170. ac_val1[i] = level;
  171. ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]];
  172. }
  173. }
  174. st[n] = s->intra_v_scantable.permutated;
  175. }
  176. for (i = 63; i > 0; i--) // FIXME optimize
  177. if (block[n][st[n][i]])
  178. break;
  179. s->block_last_index[n] = i;
  180. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  181. }
  182. if (score < 0) {
  183. return 1;
  184. } else {
  185. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  186. return 0;
  187. }
  188. }
  189. /**
  190. * modify mb_type & qscale so that encoding is actually possible in MPEG-4
  191. */
  192. void ff_clean_mpeg4_qscales(MpegEncContext *s)
  193. {
  194. int i;
  195. int8_t *const qscale_table = s->current_picture.qscale_table;
  196. ff_clean_h263_qscales(s);
  197. if (s->pict_type == AV_PICTURE_TYPE_B) {
  198. int odd = 0;
  199. /* ok, come on, this isn't funny anymore, there's more code for
  200. * handling this MPEG-4 mess than for the actual adaptive quantization */
  201. for (i = 0; i < s->mb_num; i++) {
  202. int mb_xy = s->mb_index2xy[i];
  203. odd += qscale_table[mb_xy] & 1;
  204. }
  205. if (2 * odd > s->mb_num)
  206. odd = 1;
  207. else
  208. odd = 0;
  209. for (i = 0; i < s->mb_num; i++) {
  210. int mb_xy = s->mb_index2xy[i];
  211. if ((qscale_table[mb_xy] & 1) != odd)
  212. qscale_table[mb_xy]++;
  213. if (qscale_table[mb_xy] > 31)
  214. qscale_table[mb_xy] = 31;
  215. }
  216. for (i = 1; i < s->mb_num; i++) {
  217. int mb_xy = s->mb_index2xy[i];
  218. if (qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i - 1]] &&
  219. (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
  220. s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
  221. }
  222. }
  223. }
  224. }
  225. /**
  226. * Encode the dc value.
  227. * @param n block index (0-3 are luma, 4-5 are chroma)
  228. */
  229. static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
  230. {
  231. /* DC will overflow if level is outside the [-255,255] range. */
  232. level += 256;
  233. if (n < 4) {
  234. /* luminance */
  235. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  236. } else {
  237. /* chrominance */
  238. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  239. }
  240. }
  241. static inline int mpeg4_get_dc_length(int level, int n)
  242. {
  243. if (n < 4)
  244. return uni_DCtab_lum_len[level + 256];
  245. else
  246. return uni_DCtab_chrom_len[level + 256];
  247. }
  248. /**
  249. * Encode an 8x8 block.
  250. * @param n block index (0-3 are luma, 4-5 are chroma)
  251. */
  252. static inline void mpeg4_encode_block(MpegEncContext *s,
  253. int16_t *block, int n, int intra_dc,
  254. uint8_t *scan_table, PutBitContext *dc_pb,
  255. PutBitContext *ac_pb)
  256. {
  257. int i, last_non_zero;
  258. uint32_t *bits_tab;
  259. uint8_t *len_tab;
  260. const int last_index = s->block_last_index[n];
  261. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  262. /* MPEG-4 based DC predictor */
  263. mpeg4_encode_dc(dc_pb, intra_dc, n);
  264. if (last_index < 1)
  265. return;
  266. i = 1;
  267. bits_tab = uni_mpeg4_intra_rl_bits;
  268. len_tab = uni_mpeg4_intra_rl_len;
  269. } else {
  270. if (last_index < 0)
  271. return;
  272. i = 0;
  273. bits_tab = uni_mpeg4_inter_rl_bits;
  274. len_tab = uni_mpeg4_inter_rl_len;
  275. }
  276. /* AC coefs */
  277. last_non_zero = i - 1;
  278. for (; i < last_index; i++) {
  279. int level = block[scan_table[i]];
  280. if (level) {
  281. int run = i - last_non_zero - 1;
  282. level += 64;
  283. if ((level & (~127)) == 0) {
  284. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  285. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  286. } else { // ESC3
  287. put_bits(ac_pb,
  288. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  289. (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
  290. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  291. }
  292. last_non_zero = i;
  293. }
  294. }
  295. /* if (i <= last_index) */ {
  296. int level = block[scan_table[i]];
  297. int run = i - last_non_zero - 1;
  298. level += 64;
  299. if ((level & (~127)) == 0) {
  300. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  301. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  302. } else { // ESC3
  303. put_bits(ac_pb,
  304. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  305. (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
  306. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  307. }
  308. }
  309. }
  310. static int mpeg4_get_block_length(MpegEncContext *s,
  311. int16_t *block, int n,
  312. int intra_dc, uint8_t *scan_table)
  313. {
  314. int i, last_non_zero;
  315. uint8_t *len_tab;
  316. const int last_index = s->block_last_index[n];
  317. int len = 0;
  318. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  319. /* MPEG-4 based DC predictor */
  320. len += mpeg4_get_dc_length(intra_dc, n);
  321. if (last_index < 1)
  322. return len;
  323. i = 1;
  324. len_tab = uni_mpeg4_intra_rl_len;
  325. } else {
  326. if (last_index < 0)
  327. return 0;
  328. i = 0;
  329. len_tab = uni_mpeg4_inter_rl_len;
  330. }
  331. /* AC coefs */
  332. last_non_zero = i - 1;
  333. for (; i < last_index; i++) {
  334. int level = block[scan_table[i]];
  335. if (level) {
  336. int run = i - last_non_zero - 1;
  337. level += 64;
  338. if ((level & (~127)) == 0) {
  339. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  340. len += len_tab[index];
  341. } else { // ESC3
  342. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  343. }
  344. last_non_zero = i;
  345. }
  346. }
  347. /* if (i <= last_index) */ {
  348. int level = block[scan_table[i]];
  349. int run = i - last_non_zero - 1;
  350. level += 64;
  351. if ((level & (~127)) == 0) {
  352. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  353. len += len_tab[index];
  354. } else { // ESC3
  355. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  356. }
  357. }
  358. return len;
  359. }
  360. static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
  361. int intra_dc[6], uint8_t **scan_table,
  362. PutBitContext *dc_pb,
  363. PutBitContext *ac_pb)
  364. {
  365. int i;
  366. if (scan_table) {
  367. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  368. for (i = 0; i < 6; i++)
  369. skip_put_bits(&s->pb,
  370. mpeg4_get_block_length(s, block[i], i,
  371. intra_dc[i], scan_table[i]));
  372. } else {
  373. /* encode each block */
  374. for (i = 0; i < 6; i++)
  375. mpeg4_encode_block(s, block[i], i,
  376. intra_dc[i], scan_table[i], dc_pb, ac_pb);
  377. }
  378. } else {
  379. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
  380. for (i = 0; i < 6; i++)
  381. skip_put_bits(&s->pb,
  382. mpeg4_get_block_length(s, block[i], i, 0,
  383. s->intra_scantable.permutated));
  384. } else {
  385. /* encode each block */
  386. for (i = 0; i < 6; i++)
  387. mpeg4_encode_block(s, block[i], i, 0,
  388. s->intra_scantable.permutated, dc_pb, ac_pb);
  389. }
  390. }
  391. }
  392. static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
  393. int motion_x, int motion_y, int mb_type)
  394. {
  395. int cbp = 0, i;
  396. if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
  397. int score = 0;
  398. const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
  399. for (i = 0; i < 6; i++) {
  400. if (s->coded_score[i] < 0) {
  401. score += s->coded_score[i];
  402. cbp |= 1 << (5 - i);
  403. }
  404. }
  405. if (cbp) {
  406. int zero_score = -6;
  407. if ((motion_x | motion_y | s->dquant | mb_type) == 0)
  408. zero_score -= 4; // 2 * MV + mb_type + cbp bit
  409. zero_score *= lambda;
  410. if (zero_score <= score)
  411. cbp = 0;
  412. }
  413. for (i = 0; i < 6; i++) {
  414. if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
  415. s->block_last_index[i] = -1;
  416. s->bdsp.clear_block(s->block[i]);
  417. }
  418. }
  419. } else {
  420. for (i = 0; i < 6; i++) {
  421. if (s->block_last_index[i] >= 0)
  422. cbp |= 1 << (5 - i);
  423. }
  424. }
  425. return cbp;
  426. }
  427. // FIXME this is duplicated to h263.c
  428. static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
  429. void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
  430. int motion_x, int motion_y)
  431. {
  432. int cbpc, cbpy, pred_x, pred_y;
  433. PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  434. PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  435. PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  436. const int interleaved_stats = (s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  437. if (!s->mb_intra) {
  438. int i, cbp;
  439. if (s->pict_type == AV_PICTURE_TYPE_B) {
  440. /* convert from mv_dir to type */
  441. static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
  442. int mb_type = mb_type_table[s->mv_dir];
  443. if (s->mb_x == 0) {
  444. for (i = 0; i < 2; i++)
  445. s->last_mv[i][0][0] =
  446. s->last_mv[i][0][1] =
  447. s->last_mv[i][1][0] =
  448. s->last_mv[i][1][1] = 0;
  449. }
  450. assert(s->dquant >= -2 && s->dquant <= 2);
  451. assert((s->dquant & 1) == 0);
  452. assert(mb_type >= 0);
  453. /* nothing to do if this MB was skipped in the next P-frame */
  454. if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
  455. s->skip_count++;
  456. s->mv[0][0][0] =
  457. s->mv[0][0][1] =
  458. s->mv[1][0][0] =
  459. s->mv[1][0][1] = 0;
  460. s->mv_dir = MV_DIR_FORWARD; // doesn't matter
  461. s->qscale -= s->dquant;
  462. // s->mb_skipped = 1;
  463. return;
  464. }
  465. cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
  466. if ((cbp | motion_x | motion_y | mb_type) == 0) {
  467. /* direct MB with MV={0,0} */
  468. assert(s->dquant == 0);
  469. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  470. if (interleaved_stats) {
  471. s->misc_bits++;
  472. s->last_bits++;
  473. }
  474. s->skip_count++;
  475. return;
  476. }
  477. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  478. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
  479. put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
  480. if (cbp)
  481. put_bits(&s->pb, 6, cbp);
  482. if (cbp && mb_type) {
  483. if (s->dquant)
  484. put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
  485. else
  486. put_bits(&s->pb, 1, 0);
  487. } else
  488. s->qscale -= s->dquant;
  489. if (!s->progressive_sequence) {
  490. if (cbp)
  491. put_bits(&s->pb, 1, s->interlaced_dct);
  492. if (mb_type) // not direct mode
  493. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  494. }
  495. if (interleaved_stats)
  496. s->misc_bits += get_bits_diff(s);
  497. if (!mb_type) {
  498. assert(s->mv_dir & MV_DIRECT);
  499. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  500. s->b_count++;
  501. s->f_count++;
  502. } else {
  503. assert(mb_type > 0 && mb_type < 4);
  504. if (s->mv_type != MV_TYPE_FIELD) {
  505. if (s->mv_dir & MV_DIR_FORWARD) {
  506. ff_h263_encode_motion_vector(s,
  507. s->mv[0][0][0] - s->last_mv[0][0][0],
  508. s->mv[0][0][1] - s->last_mv[0][0][1],
  509. s->f_code);
  510. s->last_mv[0][0][0] =
  511. s->last_mv[0][1][0] = s->mv[0][0][0];
  512. s->last_mv[0][0][1] =
  513. s->last_mv[0][1][1] = s->mv[0][0][1];
  514. s->f_count++;
  515. }
  516. if (s->mv_dir & MV_DIR_BACKWARD) {
  517. ff_h263_encode_motion_vector(s,
  518. s->mv[1][0][0] - s->last_mv[1][0][0],
  519. s->mv[1][0][1] - s->last_mv[1][0][1],
  520. s->b_code);
  521. s->last_mv[1][0][0] =
  522. s->last_mv[1][1][0] = s->mv[1][0][0];
  523. s->last_mv[1][0][1] =
  524. s->last_mv[1][1][1] = s->mv[1][0][1];
  525. s->b_count++;
  526. }
  527. } else {
  528. if (s->mv_dir & MV_DIR_FORWARD) {
  529. put_bits(&s->pb, 1, s->field_select[0][0]);
  530. put_bits(&s->pb, 1, s->field_select[0][1]);
  531. }
  532. if (s->mv_dir & MV_DIR_BACKWARD) {
  533. put_bits(&s->pb, 1, s->field_select[1][0]);
  534. put_bits(&s->pb, 1, s->field_select[1][1]);
  535. }
  536. if (s->mv_dir & MV_DIR_FORWARD) {
  537. for (i = 0; i < 2; i++) {
  538. ff_h263_encode_motion_vector(s,
  539. s->mv[0][i][0] - s->last_mv[0][i][0],
  540. s->mv[0][i][1] - s->last_mv[0][i][1] / 2,
  541. s->f_code);
  542. s->last_mv[0][i][0] = s->mv[0][i][0];
  543. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  544. }
  545. s->f_count++;
  546. }
  547. if (s->mv_dir & MV_DIR_BACKWARD) {
  548. for (i = 0; i < 2; i++) {
  549. ff_h263_encode_motion_vector(s,
  550. s->mv[1][i][0] - s->last_mv[1][i][0],
  551. s->mv[1][i][1] - s->last_mv[1][i][1] / 2,
  552. s->b_code);
  553. s->last_mv[1][i][0] = s->mv[1][i][0];
  554. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  555. }
  556. s->b_count++;
  557. }
  558. }
  559. }
  560. if (interleaved_stats)
  561. s->mv_bits += get_bits_diff(s);
  562. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  563. if (interleaved_stats)
  564. s->p_tex_bits += get_bits_diff(s);
  565. } else { /* s->pict_type==AV_PICTURE_TYPE_B */
  566. cbp = get_p_cbp(s, block, motion_x, motion_y);
  567. if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
  568. s->mv_type == MV_TYPE_16X16) {
  569. /* Check if the B-frames can skip it too, as we must skip it
  570. * if we skip here why didn't they just compress
  571. * the skip-mb bits instead of reusing them ?! */
  572. if (s->max_b_frames > 0) {
  573. int i;
  574. int x, y, offset;
  575. uint8_t *p_pic;
  576. x = s->mb_x * 16;
  577. y = s->mb_y * 16;
  578. if (x + 16 > s->width)
  579. x = s->width - 16;
  580. if (y + 16 > s->height)
  581. y = s->height - 16;
  582. offset = x + y * s->linesize;
  583. p_pic = s->new_picture.f->data[0] + offset;
  584. s->mb_skipped = 1;
  585. for (i = 0; i < s->max_b_frames; i++) {
  586. uint8_t *b_pic;
  587. int diff;
  588. Picture *pic = s->reordered_input_picture[i + 1];
  589. if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
  590. break;
  591. b_pic = pic->f->data[0] + offset;
  592. if (!pic->shared)
  593. b_pic += INPLACE_OFFSET;
  594. diff = s->mecc.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  595. if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
  596. s->mb_skipped = 0;
  597. break;
  598. }
  599. }
  600. } else
  601. s->mb_skipped = 1;
  602. if (s->mb_skipped == 1) {
  603. /* skip macroblock */
  604. put_bits(&s->pb, 1, 1);
  605. if (interleaved_stats) {
  606. s->misc_bits++;
  607. s->last_bits++;
  608. }
  609. s->skip_count++;
  610. return;
  611. }
  612. }
  613. put_bits(&s->pb, 1, 0); /* mb coded */
  614. cbpc = cbp & 3;
  615. cbpy = cbp >> 2;
  616. cbpy ^= 0xf;
  617. if (s->mv_type == MV_TYPE_16X16) {
  618. if (s->dquant)
  619. cbpc += 8;
  620. put_bits(&s->pb,
  621. ff_h263_inter_MCBPC_bits[cbpc],
  622. ff_h263_inter_MCBPC_code[cbpc]);
  623. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  624. if (s->dquant)
  625. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  626. if (!s->progressive_sequence) {
  627. if (cbp)
  628. put_bits(pb2, 1, s->interlaced_dct);
  629. put_bits(pb2, 1, 0);
  630. }
  631. if (interleaved_stats)
  632. s->misc_bits += get_bits_diff(s);
  633. /* motion vectors: 16x16 mode */
  634. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  635. ff_h263_encode_motion_vector(s,
  636. motion_x - pred_x,
  637. motion_y - pred_y,
  638. s->f_code);
  639. } else if (s->mv_type == MV_TYPE_FIELD) {
  640. if (s->dquant)
  641. cbpc += 8;
  642. put_bits(&s->pb,
  643. ff_h263_inter_MCBPC_bits[cbpc],
  644. ff_h263_inter_MCBPC_code[cbpc]);
  645. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  646. if (s->dquant)
  647. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  648. assert(!s->progressive_sequence);
  649. if (cbp)
  650. put_bits(pb2, 1, s->interlaced_dct);
  651. put_bits(pb2, 1, 1);
  652. if (interleaved_stats)
  653. s->misc_bits += get_bits_diff(s);
  654. /* motion vectors: 16x8 interlaced mode */
  655. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  656. pred_y /= 2;
  657. put_bits(&s->pb, 1, s->field_select[0][0]);
  658. put_bits(&s->pb, 1, s->field_select[0][1]);
  659. ff_h263_encode_motion_vector(s,
  660. s->mv[0][0][0] - pred_x,
  661. s->mv[0][0][1] - pred_y,
  662. s->f_code);
  663. ff_h263_encode_motion_vector(s,
  664. s->mv[0][1][0] - pred_x,
  665. s->mv[0][1][1] - pred_y,
  666. s->f_code);
  667. } else {
  668. assert(s->mv_type == MV_TYPE_8X8);
  669. put_bits(&s->pb,
  670. ff_h263_inter_MCBPC_bits[cbpc + 16],
  671. ff_h263_inter_MCBPC_code[cbpc + 16]);
  672. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  673. if (!s->progressive_sequence && cbp)
  674. put_bits(pb2, 1, s->interlaced_dct);
  675. if (interleaved_stats)
  676. s->misc_bits += get_bits_diff(s);
  677. for (i = 0; i < 4; i++) {
  678. /* motion vectors: 8x8 mode*/
  679. ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  680. ff_h263_encode_motion_vector(s,
  681. s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
  682. s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
  683. s->f_code);
  684. }
  685. }
  686. if (interleaved_stats)
  687. s->mv_bits += get_bits_diff(s);
  688. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  689. if (interleaved_stats)
  690. s->p_tex_bits += get_bits_diff(s);
  691. s->f_count++;
  692. }
  693. } else {
  694. int cbp;
  695. int dc_diff[6]; // dc values with the dc prediction subtracted
  696. int dir[6]; // prediction direction
  697. int zigzag_last_index[6];
  698. uint8_t *scan_table[6];
  699. int i;
  700. for (i = 0; i < 6; i++)
  701. dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  702. if (s->avctx->flags & AV_CODEC_FLAG_AC_PRED) {
  703. s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  704. } else {
  705. for (i = 0; i < 6; i++)
  706. scan_table[i] = s->intra_scantable.permutated;
  707. }
  708. /* compute cbp */
  709. cbp = 0;
  710. for (i = 0; i < 6; i++)
  711. if (s->block_last_index[i] >= 1)
  712. cbp |= 1 << (5 - i);
  713. cbpc = cbp & 3;
  714. if (s->pict_type == AV_PICTURE_TYPE_I) {
  715. if (s->dquant)
  716. cbpc += 4;
  717. put_bits(&s->pb,
  718. ff_h263_intra_MCBPC_bits[cbpc],
  719. ff_h263_intra_MCBPC_code[cbpc]);
  720. } else {
  721. if (s->dquant)
  722. cbpc += 8;
  723. put_bits(&s->pb, 1, 0); /* mb coded */
  724. put_bits(&s->pb,
  725. ff_h263_inter_MCBPC_bits[cbpc + 4],
  726. ff_h263_inter_MCBPC_code[cbpc + 4]);
  727. }
  728. put_bits(pb2, 1, s->ac_pred);
  729. cbpy = cbp >> 2;
  730. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  731. if (s->dquant)
  732. put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
  733. if (!s->progressive_sequence)
  734. put_bits(dc_pb, 1, s->interlaced_dct);
  735. if (interleaved_stats)
  736. s->misc_bits += get_bits_diff(s);
  737. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  738. if (interleaved_stats)
  739. s->i_tex_bits += get_bits_diff(s);
  740. s->i_count++;
  741. /* restore ac coeffs & last_index stuff
  742. * if we messed them up with the prediction */
  743. if (s->ac_pred)
  744. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  745. }
  746. }
  747. /**
  748. * add MPEG-4 stuffing bits (01...1)
  749. */
  750. void ff_mpeg4_stuffing(PutBitContext *pbc)
  751. {
  752. int length;
  753. put_bits(pbc, 1, 0);
  754. length = (-put_bits_count(pbc)) & 7;
  755. if (length)
  756. put_bits(pbc, length, (1 << length) - 1);
  757. }
  758. /* must be called before writing the header */
  759. void ff_set_mpeg4_time(MpegEncContext *s)
  760. {
  761. if (s->pict_type == AV_PICTURE_TYPE_B) {
  762. ff_mpeg4_init_direct_mv(s);
  763. } else {
  764. s->last_time_base = s->time_base;
  765. s->time_base = s->time / s->avctx->time_base.den;
  766. }
  767. }
  768. static void mpeg4_encode_gop_header(MpegEncContext *s)
  769. {
  770. int hours, minutes, seconds;
  771. int64_t time;
  772. put_bits(&s->pb, 16, 0);
  773. put_bits(&s->pb, 16, GOP_STARTCODE);
  774. time = s->current_picture_ptr->f->pts;
  775. if (s->reordered_input_picture[1])
  776. time = FFMIN(time, s->reordered_input_picture[1]->f->pts);
  777. time = time * s->avctx->time_base.num;
  778. seconds = time / s->avctx->time_base.den;
  779. minutes = seconds / 60;
  780. seconds %= 60;
  781. hours = minutes / 60;
  782. minutes %= 60;
  783. hours %= 24;
  784. put_bits(&s->pb, 5, hours);
  785. put_bits(&s->pb, 6, minutes);
  786. put_bits(&s->pb, 1, 1);
  787. put_bits(&s->pb, 6, seconds);
  788. put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
  789. put_bits(&s->pb, 1, 0); // broken link == NO
  790. s->last_time_base = time / s->avctx->time_base.den;
  791. ff_mpeg4_stuffing(&s->pb);
  792. }
  793. static void mpeg4_encode_visual_object_header(MpegEncContext *s)
  794. {
  795. int profile_and_level_indication;
  796. int vo_ver_id;
  797. if (s->avctx->profile != FF_PROFILE_UNKNOWN) {
  798. profile_and_level_indication = s->avctx->profile << 4;
  799. } else if (s->max_b_frames || s->quarter_sample) {
  800. profile_and_level_indication = 0xF0; // adv simple
  801. } else {
  802. profile_and_level_indication = 0x00; // simple
  803. }
  804. if (s->avctx->level != FF_LEVEL_UNKNOWN)
  805. profile_and_level_indication |= s->avctx->level;
  806. else
  807. profile_and_level_indication |= 1; // level 1
  808. if (profile_and_level_indication >> 4 == 0xF)
  809. vo_ver_id = 5;
  810. else
  811. vo_ver_id = 1;
  812. // FIXME levels
  813. put_bits(&s->pb, 16, 0);
  814. put_bits(&s->pb, 16, VOS_STARTCODE);
  815. put_bits(&s->pb, 8, profile_and_level_indication);
  816. put_bits(&s->pb, 16, 0);
  817. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  818. put_bits(&s->pb, 1, 1);
  819. put_bits(&s->pb, 4, vo_ver_id);
  820. put_bits(&s->pb, 3, 1); // priority
  821. put_bits(&s->pb, 4, 1); // visual obj type== video obj
  822. put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
  823. ff_mpeg4_stuffing(&s->pb);
  824. }
  825. static void mpeg4_encode_vol_header(MpegEncContext *s,
  826. int vo_number,
  827. int vol_number)
  828. {
  829. int vo_ver_id;
  830. if (!CONFIG_MPEG4_ENCODER)
  831. return;
  832. if (s->max_b_frames || s->quarter_sample) {
  833. vo_ver_id = 5;
  834. s->vo_type = ADV_SIMPLE_VO_TYPE;
  835. } else {
  836. vo_ver_id = 1;
  837. s->vo_type = SIMPLE_VO_TYPE;
  838. }
  839. put_bits(&s->pb, 16, 0);
  840. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  841. put_bits(&s->pb, 16, 0);
  842. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  843. put_bits(&s->pb, 1, 0); /* random access vol */
  844. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  845. if (s->workaround_bugs & FF_BUG_MS) {
  846. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  847. } else {
  848. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  849. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  850. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  851. }
  852. s->aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  853. put_bits(&s->pb, 4, s->aspect_ratio_info); /* aspect ratio info */
  854. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  855. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  856. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  857. }
  858. if (s->workaround_bugs & FF_BUG_MS) {
  859. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  860. } else {
  861. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  862. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  863. put_bits(&s->pb, 1, s->low_delay);
  864. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  865. }
  866. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  867. put_bits(&s->pb, 1, 1); /* marker bit */
  868. put_bits(&s->pb, 16, s->avctx->time_base.den);
  869. if (s->time_increment_bits < 1)
  870. s->time_increment_bits = 1;
  871. put_bits(&s->pb, 1, 1); /* marker bit */
  872. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  873. put_bits(&s->pb, 1, 1); /* marker bit */
  874. put_bits(&s->pb, 13, s->width); /* vol width */
  875. put_bits(&s->pb, 1, 1); /* marker bit */
  876. put_bits(&s->pb, 13, s->height); /* vol height */
  877. put_bits(&s->pb, 1, 1); /* marker bit */
  878. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  879. put_bits(&s->pb, 1, 1); /* obmc disable */
  880. if (vo_ver_id == 1)
  881. put_bits(&s->pb, 1, 0); /* sprite enable */
  882. else
  883. put_bits(&s->pb, 2, 0); /* sprite enable */
  884. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  885. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */
  886. if (s->mpeg_quant) {
  887. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  888. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  889. }
  890. if (vo_ver_id != 1)
  891. put_bits(&s->pb, 1, s->quarter_sample);
  892. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  893. put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */
  894. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  895. if (s->data_partitioning)
  896. put_bits(&s->pb, 1, 0); /* no rvlc */
  897. if (vo_ver_id != 1) {
  898. put_bits(&s->pb, 1, 0); /* newpred */
  899. put_bits(&s->pb, 1, 0); /* reduced res vop */
  900. }
  901. put_bits(&s->pb, 1, 0); /* scalability */
  902. ff_mpeg4_stuffing(&s->pb);
  903. /* user data */
  904. if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
  905. put_bits(&s->pb, 16, 0);
  906. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  907. avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  908. }
  909. }
  910. /* write MPEG-4 VOP header */
  911. void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
  912. {
  913. int time_incr;
  914. int time_div, time_mod;
  915. if (s->pict_type == AV_PICTURE_TYPE_I) {
  916. if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  917. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
  918. mpeg4_encode_visual_object_header(s);
  919. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy
  920. mpeg4_encode_vol_header(s, 0, 0);
  921. }
  922. if (!(s->workaround_bugs & FF_BUG_MS))
  923. mpeg4_encode_gop_header(s);
  924. }
  925. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  926. put_bits(&s->pb, 16, 0); /* vop header */
  927. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  928. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  929. assert(s->time >= 0);
  930. time_div = s->time / s->avctx->time_base.den;
  931. time_mod = s->time % s->avctx->time_base.den;
  932. time_incr = time_div - s->last_time_base;
  933. assert(time_incr >= 0);
  934. while (time_incr--)
  935. put_bits(&s->pb, 1, 1);
  936. put_bits(&s->pb, 1, 0);
  937. put_bits(&s->pb, 1, 1); /* marker */
  938. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  939. put_bits(&s->pb, 1, 1); /* marker */
  940. put_bits(&s->pb, 1, 1); /* vop coded */
  941. if (s->pict_type == AV_PICTURE_TYPE_P) {
  942. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  943. }
  944. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  945. if (!s->progressive_sequence) {
  946. put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
  947. put_bits(&s->pb, 1, s->alternate_scan);
  948. }
  949. // FIXME sprite stuff
  950. put_bits(&s->pb, 5, s->qscale);
  951. if (s->pict_type != AV_PICTURE_TYPE_I)
  952. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  953. if (s->pict_type == AV_PICTURE_TYPE_B)
  954. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  955. }
  956. static av_cold void init_uni_dc_tab(void)
  957. {
  958. int level, uni_code, uni_len;
  959. for (level = -256; level < 256; level++) {
  960. int size, v, l;
  961. /* find number of bits */
  962. size = 0;
  963. v = abs(level);
  964. while (v) {
  965. v >>= 1;
  966. size++;
  967. }
  968. if (level < 0)
  969. l = (-level) ^ ((1 << size) - 1);
  970. else
  971. l = level;
  972. /* luminance */
  973. uni_code = ff_mpeg4_DCtab_lum[size][0];
  974. uni_len = ff_mpeg4_DCtab_lum[size][1];
  975. if (size > 0) {
  976. uni_code <<= size;
  977. uni_code |= l;
  978. uni_len += size;
  979. if (size > 8) {
  980. uni_code <<= 1;
  981. uni_code |= 1;
  982. uni_len++;
  983. }
  984. }
  985. uni_DCtab_lum_bits[level + 256] = uni_code;
  986. uni_DCtab_lum_len[level + 256] = uni_len;
  987. /* chrominance */
  988. uni_code = ff_mpeg4_DCtab_chrom[size][0];
  989. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  990. if (size > 0) {
  991. uni_code <<= size;
  992. uni_code |= l;
  993. uni_len += size;
  994. if (size > 8) {
  995. uni_code <<= 1;
  996. uni_code |= 1;
  997. uni_len++;
  998. }
  999. }
  1000. uni_DCtab_chrom_bits[level + 256] = uni_code;
  1001. uni_DCtab_chrom_len[level + 256] = uni_len;
  1002. }
  1003. }
  1004. static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
  1005. uint8_t *len_tab)
  1006. {
  1007. int slevel, run, last;
  1008. assert(MAX_LEVEL >= 64);
  1009. assert(MAX_RUN >= 63);
  1010. for (slevel = -64; slevel < 64; slevel++) {
  1011. if (slevel == 0)
  1012. continue;
  1013. for (run = 0; run < 64; run++) {
  1014. for (last = 0; last <= 1; last++) {
  1015. const int index = UNI_MPEG4_ENC_INDEX(last, run, slevel + 64);
  1016. int level = slevel < 0 ? -slevel : slevel;
  1017. int sign = slevel < 0 ? 1 : 0;
  1018. int bits, len, code;
  1019. int level1, run1;
  1020. len_tab[index] = 100;
  1021. /* ESC0 */
  1022. code = get_rl_index(rl, last, run, level);
  1023. bits = rl->table_vlc[code][0];
  1024. len = rl->table_vlc[code][1];
  1025. bits = bits * 2 + sign;
  1026. len++;
  1027. if (code != rl->n && len < len_tab[index]) {
  1028. bits_tab[index] = bits;
  1029. len_tab[index] = len;
  1030. }
  1031. /* ESC1 */
  1032. bits = rl->table_vlc[rl->n][0];
  1033. len = rl->table_vlc[rl->n][1];
  1034. bits = bits * 2;
  1035. len++; // esc1
  1036. level1 = level - rl->max_level[last][run];
  1037. if (level1 > 0) {
  1038. code = get_rl_index(rl, last, run, level1);
  1039. bits <<= rl->table_vlc[code][1];
  1040. len += rl->table_vlc[code][1];
  1041. bits += rl->table_vlc[code][0];
  1042. bits = bits * 2 + sign;
  1043. len++;
  1044. if (code != rl->n && len < len_tab[index]) {
  1045. bits_tab[index] = bits;
  1046. len_tab[index] = len;
  1047. }
  1048. }
  1049. /* ESC2 */
  1050. bits = rl->table_vlc[rl->n][0];
  1051. len = rl->table_vlc[rl->n][1];
  1052. bits = bits * 4 + 2;
  1053. len += 2; // esc2
  1054. run1 = run - rl->max_run[last][level] - 1;
  1055. if (run1 >= 0) {
  1056. code = get_rl_index(rl, last, run1, level);
  1057. bits <<= rl->table_vlc[code][1];
  1058. len += rl->table_vlc[code][1];
  1059. bits += rl->table_vlc[code][0];
  1060. bits = bits * 2 + sign;
  1061. len++;
  1062. if (code != rl->n && len < len_tab[index]) {
  1063. bits_tab[index] = bits;
  1064. len_tab[index] = len;
  1065. }
  1066. }
  1067. /* ESC3 */
  1068. bits = rl->table_vlc[rl->n][0];
  1069. len = rl->table_vlc[rl->n][1];
  1070. bits = bits * 4 + 3;
  1071. len += 2; // esc3
  1072. bits = bits * 2 + last;
  1073. len++;
  1074. bits = bits * 64 + run;
  1075. len += 6;
  1076. bits = bits * 2 + 1;
  1077. len++; // marker
  1078. bits = bits * 4096 + (slevel & 0xfff);
  1079. len += 12;
  1080. bits = bits * 2 + 1;
  1081. len++; // marker
  1082. if (len < len_tab[index]) {
  1083. bits_tab[index] = bits;
  1084. len_tab[index] = len;
  1085. }
  1086. }
  1087. }
  1088. }
  1089. }
  1090. static av_cold int encode_init(AVCodecContext *avctx)
  1091. {
  1092. MpegEncContext *s = avctx->priv_data;
  1093. int ret;
  1094. static int done = 0;
  1095. if ((ret = ff_mpv_encode_init(avctx)) < 0)
  1096. return ret;
  1097. if (!done) {
  1098. done = 1;
  1099. init_uni_dc_tab();
  1100. ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1101. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1102. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1103. }
  1104. s->min_qcoeff = -2048;
  1105. s->max_qcoeff = 2047;
  1106. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1107. s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
  1108. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1109. s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
  1110. s->luma_dc_vlc_length = uni_DCtab_lum_len;
  1111. s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
  1112. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  1113. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  1114. if (s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1115. s->avctx->extradata = av_malloc(1024);
  1116. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1117. if (!(s->workaround_bugs & FF_BUG_MS))
  1118. mpeg4_encode_visual_object_header(s);
  1119. mpeg4_encode_vol_header(s, 0, 0);
  1120. // ff_mpeg4_stuffing(&s->pb); ?
  1121. flush_put_bits(&s->pb);
  1122. s->avctx->extradata_size = (put_bits_count(&s->pb) + 7) >> 3;
  1123. }
  1124. return 0;
  1125. }
  1126. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1127. {
  1128. uint8_t *start = put_bits_ptr(&s->pb);
  1129. uint8_t *end = s->pb.buf_end;
  1130. int size = end - start;
  1131. int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
  1132. int tex_size = (size - 2 * pb_size) & (~3);
  1133. set_put_bits_buffer_size(&s->pb, pb_size);
  1134. init_put_bits(&s->tex_pb, start + pb_size, tex_size);
  1135. init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
  1136. }
  1137. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1138. {
  1139. const int pb2_len = put_bits_count(&s->pb2);
  1140. const int tex_pb_len = put_bits_count(&s->tex_pb);
  1141. const int bits = put_bits_count(&s->pb);
  1142. if (s->pict_type == AV_PICTURE_TYPE_I) {
  1143. put_bits(&s->pb, 19, DC_MARKER);
  1144. s->misc_bits += 19 + pb2_len + bits - s->last_bits;
  1145. s->i_tex_bits += tex_pb_len;
  1146. } else {
  1147. put_bits(&s->pb, 17, MOTION_MARKER);
  1148. s->misc_bits += 17 + pb2_len;
  1149. s->mv_bits += bits - s->last_bits;
  1150. s->p_tex_bits += tex_pb_len;
  1151. }
  1152. flush_put_bits(&s->pb2);
  1153. flush_put_bits(&s->tex_pb);
  1154. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1155. avpriv_copy_bits(&s->pb, s->pb2.buf, pb2_len);
  1156. avpriv_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1157. s->last_bits = put_bits_count(&s->pb);
  1158. }
  1159. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1160. {
  1161. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  1162. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1163. put_bits(&s->pb, 1, 1);
  1164. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y * s->mb_width);
  1165. put_bits(&s->pb, s->quant_precision, s->qscale);
  1166. put_bits(&s->pb, 1, 0); /* no HEC */
  1167. }
  1168. #define OFFSET(x) offsetof(MpegEncContext, x)
  1169. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1170. static const AVOption options[] = {
  1171. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1172. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1173. FF_MPV_COMMON_OPTS
  1174. { NULL },
  1175. };
  1176. static const AVClass mpeg4enc_class = {
  1177. .class_name = "MPEG4 encoder",
  1178. .item_name = av_default_item_name,
  1179. .option = options,
  1180. .version = LIBAVUTIL_VERSION_INT,
  1181. };
  1182. AVCodec ff_mpeg4_encoder = {
  1183. .name = "mpeg4",
  1184. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1185. .type = AVMEDIA_TYPE_VIDEO,
  1186. .id = AV_CODEC_ID_MPEG4,
  1187. .priv_data_size = sizeof(MpegEncContext),
  1188. .init = encode_init,
  1189. .encode2 = ff_mpv_encode_picture,
  1190. .close = ff_mpv_encode_end,
  1191. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  1192. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
  1193. .priv_class = &mpeg4enc_class,
  1194. };