You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1423 lines
50KB

  1. /*
  2. * MPEG4 encoder.
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/opt.h"
  25. #include "mpegvideo.h"
  26. #include "h263.h"
  27. #include "mpeg4video.h"
  28. /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  29. * differences in mpeg4. Unified in the sense that the specification specifies
  30. * this encoding in several steps. */
  31. static uint8_t uni_DCtab_lum_len[512];
  32. static uint8_t uni_DCtab_chrom_len[512];
  33. static uint16_t uni_DCtab_lum_bits[512];
  34. static uint16_t uni_DCtab_chrom_bits[512];
  35. /* Unified encoding tables for run length encoding of coefficients.
  36. * Unified in the sense that the specification specifies the encoding in several steps. */
  37. static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2];
  38. static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2];
  39. static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2];
  40. static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
  41. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level))
  42. //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64)
  43. #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
  44. /* mpeg4
  45. * inter
  46. * max level: 24/6
  47. * max run: 53/63
  48. *
  49. * intra
  50. * max level: 53/16
  51. * max run: 29/41
  52. */
  53. /**
  54. * Return the number of bits that encoding the 8x8 block in block would need.
  55. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  56. */
  57. static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
  58. int block_last_index, uint8_t scantable[64])
  59. {
  60. int last = 0;
  61. int j;
  62. int rate = 0;
  63. for (j = 1; j <= block_last_index; j++) {
  64. const int index = scantable[j];
  65. int level = block[index];
  66. if (level) {
  67. level += 64;
  68. if ((level & (~127)) == 0) {
  69. if (j < block_last_index)
  70. rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  71. else
  72. rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)];
  73. } else
  74. rate += s->ac_esc_length;
  75. last = j;
  76. }
  77. }
  78. return rate;
  79. }
  80. /**
  81. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  82. * This function also restores s->block_last_index.
  83. * @param[in,out] block MB coefficients, these will be restored
  84. * @param[in] dir ac prediction direction for each 8x8 block
  85. * @param[out] st scantable for each 8x8 block
  86. * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  87. */
  88. static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
  89. const int dir[6], uint8_t *st[6],
  90. const int zigzag_last_index[6])
  91. {
  92. int i, n;
  93. memcpy(s->block_last_index, zigzag_last_index, sizeof(int) * 6);
  94. for (n = 0; n < 6; n++) {
  95. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  96. st[n] = s->intra_scantable.permutated;
  97. if (dir[n]) {
  98. /* top prediction */
  99. for (i = 1; i < 8; i++)
  100. block[n][s->dsp.idct_permutation[i]] = ac_val[i + 8];
  101. } else {
  102. /* left prediction */
  103. for (i = 1; i < 8; i++)
  104. block[n][s->dsp.idct_permutation[i << 3]] = ac_val[i];
  105. }
  106. }
  107. }
  108. /**
  109. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in mpeg4.
  110. * This function will also update s->block_last_index and s->ac_val.
  111. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  112. * @param[in] dir ac prediction direction for each 8x8 block
  113. * @param[out] st scantable for each 8x8 block
  114. * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  115. */
  116. static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
  117. const int dir[6], uint8_t *st[6],
  118. int zigzag_last_index[6])
  119. {
  120. int score = 0;
  121. int i, n;
  122. int8_t *const qscale_table = s->current_picture.qscale_table;
  123. memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
  124. for (n = 0; n < 6; n++) {
  125. int16_t *ac_val, *ac_val1;
  126. score -= get_block_rate(s, block[n], s->block_last_index[n],
  127. s->intra_scantable.permutated);
  128. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  129. ac_val1 = ac_val;
  130. if (dir[n]) {
  131. const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride;
  132. /* top prediction */
  133. ac_val -= s->block_wrap[n] * 16;
  134. if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) {
  135. /* same qscale */
  136. for (i = 1; i < 8; i++) {
  137. const int level = block[n][s->dsp.idct_permutation[i]];
  138. block[n][s->dsp.idct_permutation[i]] = level - ac_val[i + 8];
  139. ac_val1[i] = block[n][s->dsp.idct_permutation[i << 3]];
  140. ac_val1[i + 8] = level;
  141. }
  142. } else {
  143. /* different qscale, we must rescale */
  144. for (i = 1; i < 8; i++) {
  145. const int level = block[n][s->dsp.idct_permutation[i]];
  146. block[n][s->dsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale);
  147. ac_val1[i] = block[n][s->dsp.idct_permutation[i << 3]];
  148. ac_val1[i + 8] = level;
  149. }
  150. }
  151. st[n] = s->intra_h_scantable.permutated;
  152. } else {
  153. const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride;
  154. /* left prediction */
  155. ac_val -= 16;
  156. if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) {
  157. /* same qscale */
  158. for (i = 1; i < 8; i++) {
  159. const int level = block[n][s->dsp.idct_permutation[i << 3]];
  160. block[n][s->dsp.idct_permutation[i << 3]] = level - ac_val[i];
  161. ac_val1[i] = level;
  162. ac_val1[i + 8] = block[n][s->dsp.idct_permutation[i]];
  163. }
  164. } else {
  165. /* different qscale, we must rescale */
  166. for (i = 1; i < 8; i++) {
  167. const int level = block[n][s->dsp.idct_permutation[i << 3]];
  168. block[n][s->dsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale);
  169. ac_val1[i] = level;
  170. ac_val1[i + 8] = block[n][s->dsp.idct_permutation[i]];
  171. }
  172. }
  173. st[n] = s->intra_v_scantable.permutated;
  174. }
  175. for (i = 63; i > 0; i--) // FIXME optimize
  176. if (block[n][st[n][i]])
  177. break;
  178. s->block_last_index[n] = i;
  179. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  180. }
  181. if (score < 0) {
  182. return 1;
  183. } else {
  184. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  185. return 0;
  186. }
  187. }
  188. /**
  189. * modify mb_type & qscale so that encoding is actually possible in mpeg4
  190. */
  191. void ff_clean_mpeg4_qscales(MpegEncContext *s)
  192. {
  193. int i;
  194. int8_t *const qscale_table = s->current_picture.qscale_table;
  195. ff_clean_h263_qscales(s);
  196. if (s->pict_type == AV_PICTURE_TYPE_B) {
  197. int odd = 0;
  198. /* ok, come on, this isn't funny anymore, there's more code for
  199. * handling this mpeg4 mess than for the actual adaptive quantization */
  200. for (i = 0; i < s->mb_num; i++) {
  201. int mb_xy = s->mb_index2xy[i];
  202. odd += qscale_table[mb_xy] & 1;
  203. }
  204. if (2 * odd > s->mb_num)
  205. odd = 1;
  206. else
  207. odd = 0;
  208. for (i = 0; i < s->mb_num; i++) {
  209. int mb_xy = s->mb_index2xy[i];
  210. if ((qscale_table[mb_xy] & 1) != odd)
  211. qscale_table[mb_xy]++;
  212. if (qscale_table[mb_xy] > 31)
  213. qscale_table[mb_xy] = 31;
  214. }
  215. for (i = 1; i < s->mb_num; i++) {
  216. int mb_xy = s->mb_index2xy[i];
  217. if (qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i - 1]] &&
  218. (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) {
  219. s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR;
  220. }
  221. }
  222. }
  223. }
  224. /**
  225. * Encode the dc value.
  226. * @param n block index (0-3 are luma, 4-5 are chroma)
  227. */
  228. static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
  229. {
  230. #if 1
  231. /* DC will overflow if level is outside the [-255,255] range. */
  232. level += 256;
  233. if (n < 4) {
  234. /* luminance */
  235. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  236. } else {
  237. /* chrominance */
  238. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  239. }
  240. #else
  241. int size, v;
  242. /* find number of bits */
  243. size = 0;
  244. v = abs(level);
  245. while (v) {
  246. v >>= 1;
  247. size++;
  248. }
  249. if (n < 4) {
  250. /* luminance */
  251. put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
  252. } else {
  253. /* chrominance */
  254. put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
  255. }
  256. /* encode remaining bits */
  257. if (size > 0) {
  258. if (level < 0)
  259. level = (-level) ^ ((1 << size) - 1);
  260. put_bits(&s->pb, size, level);
  261. if (size > 8)
  262. put_bits(&s->pb, 1, 1);
  263. }
  264. #endif
  265. }
  266. static inline int mpeg4_get_dc_length(int level, int n)
  267. {
  268. if (n < 4)
  269. return uni_DCtab_lum_len[level + 256];
  270. else
  271. return uni_DCtab_chrom_len[level + 256];
  272. }
  273. /**
  274. * Encode an 8x8 block.
  275. * @param n block index (0-3 are luma, 4-5 are chroma)
  276. */
  277. static inline void mpeg4_encode_block(MpegEncContext *s,
  278. int16_t *block, int n, int intra_dc,
  279. uint8_t *scan_table, PutBitContext *dc_pb,
  280. PutBitContext *ac_pb)
  281. {
  282. int i, last_non_zero;
  283. uint32_t *bits_tab;
  284. uint8_t *len_tab;
  285. const int last_index = s->block_last_index[n];
  286. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  287. /* mpeg4 based DC predictor */
  288. mpeg4_encode_dc(dc_pb, intra_dc, n);
  289. if (last_index < 1)
  290. return;
  291. i = 1;
  292. bits_tab = uni_mpeg4_intra_rl_bits;
  293. len_tab = uni_mpeg4_intra_rl_len;
  294. } else {
  295. if (last_index < 0)
  296. return;
  297. i = 0;
  298. bits_tab = uni_mpeg4_inter_rl_bits;
  299. len_tab = uni_mpeg4_inter_rl_len;
  300. }
  301. /* AC coefs */
  302. last_non_zero = i - 1;
  303. for (; i < last_index; i++) {
  304. int level = block[scan_table[i]];
  305. if (level) {
  306. int run = i - last_non_zero - 1;
  307. level += 64;
  308. if ((level & (~127)) == 0) {
  309. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  310. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  311. } else { // ESC3
  312. put_bits(ac_pb,
  313. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  314. (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) +
  315. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  316. }
  317. last_non_zero = i;
  318. }
  319. }
  320. /* if (i <= last_index) */ {
  321. int level = block[scan_table[i]];
  322. int run = i - last_non_zero - 1;
  323. level += 64;
  324. if ((level & (~127)) == 0) {
  325. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  326. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  327. } else { // ESC3
  328. put_bits(ac_pb,
  329. 7 + 2 + 1 + 6 + 1 + 12 + 1,
  330. (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) +
  331. (1 << 13) + (((level - 64) & 0xfff) << 1) + 1);
  332. }
  333. }
  334. }
  335. static int mpeg4_get_block_length(MpegEncContext *s,
  336. int16_t *block, int n,
  337. int intra_dc, uint8_t *scan_table)
  338. {
  339. int i, last_non_zero;
  340. uint8_t *len_tab;
  341. const int last_index = s->block_last_index[n];
  342. int len = 0;
  343. if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
  344. /* mpeg4 based DC predictor */
  345. len += mpeg4_get_dc_length(intra_dc, n);
  346. if (last_index < 1)
  347. return len;
  348. i = 1;
  349. len_tab = uni_mpeg4_intra_rl_len;
  350. } else {
  351. if (last_index < 0)
  352. return 0;
  353. i = 0;
  354. len_tab = uni_mpeg4_inter_rl_len;
  355. }
  356. /* AC coefs */
  357. last_non_zero = i - 1;
  358. for (; i < last_index; i++) {
  359. int level = block[scan_table[i]];
  360. if (level) {
  361. int run = i - last_non_zero - 1;
  362. level += 64;
  363. if ((level & (~127)) == 0) {
  364. const int index = UNI_MPEG4_ENC_INDEX(0, run, level);
  365. len += len_tab[index];
  366. } else { // ESC3
  367. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  368. }
  369. last_non_zero = i;
  370. }
  371. }
  372. /* if (i <= last_index) */ {
  373. int level = block[scan_table[i]];
  374. int run = i - last_non_zero - 1;
  375. level += 64;
  376. if ((level & (~127)) == 0) {
  377. const int index = UNI_MPEG4_ENC_INDEX(1, run, level);
  378. len += len_tab[index];
  379. } else { // ESC3
  380. len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
  381. }
  382. }
  383. return len;
  384. }
  385. static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
  386. int intra_dc[6], uint8_t **scan_table,
  387. PutBitContext *dc_pb,
  388. PutBitContext *ac_pb)
  389. {
  390. int i;
  391. if (scan_table) {
  392. if (s->flags2 & CODEC_FLAG2_NO_OUTPUT) {
  393. for (i = 0; i < 6; i++)
  394. skip_put_bits(&s->pb,
  395. mpeg4_get_block_length(s, block[i], i,
  396. intra_dc[i], scan_table[i]));
  397. } else {
  398. /* encode each block */
  399. for (i = 0; i < 6; i++)
  400. mpeg4_encode_block(s, block[i], i,
  401. intra_dc[i], scan_table[i], dc_pb, ac_pb);
  402. }
  403. } else {
  404. if (s->flags2 & CODEC_FLAG2_NO_OUTPUT) {
  405. for (i = 0; i < 6; i++)
  406. skip_put_bits(&s->pb,
  407. mpeg4_get_block_length(s, block[i], i, 0,
  408. s->intra_scantable.permutated));
  409. } else {
  410. /* encode each block */
  411. for (i = 0; i < 6; i++)
  412. mpeg4_encode_block(s, block[i], i, 0,
  413. s->intra_scantable.permutated, dc_pb, ac_pb);
  414. }
  415. }
  416. }
  417. static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
  418. int motion_x, int motion_y, int mb_type)
  419. {
  420. int cbp = 0, i;
  421. if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
  422. int score = 0;
  423. const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
  424. for (i = 0; i < 6; i++) {
  425. if (s->coded_score[i] < 0) {
  426. score += s->coded_score[i];
  427. cbp |= 1 << (5 - i);
  428. }
  429. }
  430. if (cbp) {
  431. int zero_score = -6;
  432. if ((motion_x | motion_y | s->dquant | mb_type) == 0)
  433. zero_score -= 4; // 2 * MV + mb_type + cbp bit
  434. zero_score *= lambda;
  435. if (zero_score <= score)
  436. cbp = 0;
  437. }
  438. for (i = 0; i < 6; i++) {
  439. if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
  440. s->block_last_index[i] = -1;
  441. s->dsp.clear_block(s->block[i]);
  442. }
  443. }
  444. } else {
  445. for (i = 0; i < 6; i++) {
  446. if (s->block_last_index[i] >= 0)
  447. cbp |= 1 << (5 - i);
  448. }
  449. }
  450. return cbp;
  451. }
  452. // FIXME this is duplicated to h263.c
  453. static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
  454. void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
  455. int motion_x, int motion_y)
  456. {
  457. int cbpc, cbpy, pred_x, pred_y;
  458. PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  459. PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  460. PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  461. const int interleaved_stats = (s->flags & CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  462. if (!s->mb_intra) {
  463. int i, cbp;
  464. if (s->pict_type == AV_PICTURE_TYPE_B) {
  465. /* convert from mv_dir to type */
  466. static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
  467. int mb_type = mb_type_table[s->mv_dir];
  468. if (s->mb_x == 0) {
  469. for (i = 0; i < 2; i++)
  470. s->last_mv[i][0][0] =
  471. s->last_mv[i][0][1] =
  472. s->last_mv[i][1][0] =
  473. s->last_mv[i][1][1] = 0;
  474. }
  475. av_assert2(s->dquant >= -2 && s->dquant <= 2);
  476. av_assert2((s->dquant & 1) == 0);
  477. av_assert2(mb_type >= 0);
  478. /* nothing to do if this MB was skipped in the next P Frame */
  479. if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ...
  480. s->skip_count++;
  481. s->mv[0][0][0] =
  482. s->mv[0][0][1] =
  483. s->mv[1][0][0] =
  484. s->mv[1][0][1] = 0;
  485. s->mv_dir = MV_DIR_FORWARD; // doesn't matter
  486. s->qscale -= s->dquant;
  487. // s->mb_skipped = 1;
  488. return;
  489. }
  490. cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type);
  491. if ((cbp | motion_x | motion_y | mb_type) == 0) {
  492. /* direct MB with MV={0,0} */
  493. av_assert2(s->dquant == 0);
  494. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  495. if (interleaved_stats) {
  496. s->misc_bits++;
  497. s->last_bits++;
  498. }
  499. s->skip_count++;
  500. return;
  501. }
  502. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  503. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge
  504. put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :)
  505. if (cbp)
  506. put_bits(&s->pb, 6, cbp);
  507. if (cbp && mb_type) {
  508. if (s->dquant)
  509. put_bits(&s->pb, 2, (s->dquant >> 2) + 3);
  510. else
  511. put_bits(&s->pb, 1, 0);
  512. } else
  513. s->qscale -= s->dquant;
  514. if (!s->progressive_sequence) {
  515. if (cbp)
  516. put_bits(&s->pb, 1, s->interlaced_dct);
  517. if (mb_type) // not direct mode
  518. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  519. }
  520. if (interleaved_stats)
  521. s->misc_bits += get_bits_diff(s);
  522. if (!mb_type) {
  523. av_assert2(s->mv_dir & MV_DIRECT);
  524. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  525. s->b_count++;
  526. s->f_count++;
  527. } else {
  528. av_assert2(mb_type > 0 && mb_type < 4);
  529. if (s->mv_type != MV_TYPE_FIELD) {
  530. if (s->mv_dir & MV_DIR_FORWARD) {
  531. ff_h263_encode_motion_vector(s,
  532. s->mv[0][0][0] - s->last_mv[0][0][0],
  533. s->mv[0][0][1] - s->last_mv[0][0][1],
  534. s->f_code);
  535. s->last_mv[0][0][0] =
  536. s->last_mv[0][1][0] = s->mv[0][0][0];
  537. s->last_mv[0][0][1] =
  538. s->last_mv[0][1][1] = s->mv[0][0][1];
  539. s->f_count++;
  540. }
  541. if (s->mv_dir & MV_DIR_BACKWARD) {
  542. ff_h263_encode_motion_vector(s,
  543. s->mv[1][0][0] - s->last_mv[1][0][0],
  544. s->mv[1][0][1] - s->last_mv[1][0][1],
  545. s->b_code);
  546. s->last_mv[1][0][0] =
  547. s->last_mv[1][1][0] = s->mv[1][0][0];
  548. s->last_mv[1][0][1] =
  549. s->last_mv[1][1][1] = s->mv[1][0][1];
  550. s->b_count++;
  551. }
  552. } else {
  553. if (s->mv_dir & MV_DIR_FORWARD) {
  554. put_bits(&s->pb, 1, s->field_select[0][0]);
  555. put_bits(&s->pb, 1, s->field_select[0][1]);
  556. }
  557. if (s->mv_dir & MV_DIR_BACKWARD) {
  558. put_bits(&s->pb, 1, s->field_select[1][0]);
  559. put_bits(&s->pb, 1, s->field_select[1][1]);
  560. }
  561. if (s->mv_dir & MV_DIR_FORWARD) {
  562. for (i = 0; i < 2; i++) {
  563. ff_h263_encode_motion_vector(s,
  564. s->mv[0][i][0] - s->last_mv[0][i][0],
  565. s->mv[0][i][1] - s->last_mv[0][i][1] / 2,
  566. s->f_code);
  567. s->last_mv[0][i][0] = s->mv[0][i][0];
  568. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  569. }
  570. s->f_count++;
  571. }
  572. if (s->mv_dir & MV_DIR_BACKWARD) {
  573. for (i = 0; i < 2; i++) {
  574. ff_h263_encode_motion_vector(s,
  575. s->mv[1][i][0] - s->last_mv[1][i][0],
  576. s->mv[1][i][1] - s->last_mv[1][i][1] / 2,
  577. s->b_code);
  578. s->last_mv[1][i][0] = s->mv[1][i][0];
  579. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  580. }
  581. s->b_count++;
  582. }
  583. }
  584. }
  585. if (interleaved_stats)
  586. s->mv_bits += get_bits_diff(s);
  587. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  588. if (interleaved_stats)
  589. s->p_tex_bits += get_bits_diff(s);
  590. } else { /* s->pict_type==AV_PICTURE_TYPE_B */
  591. cbp = get_p_cbp(s, block, motion_x, motion_y);
  592. if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
  593. s->mv_type == MV_TYPE_16X16) {
  594. /* check if the B frames can skip it too, as we must skip it
  595. * if we skip here why didn't they just compress
  596. * the skip-mb bits instead of reusing them ?! */
  597. if (s->max_b_frames > 0) {
  598. int i;
  599. int x, y, offset;
  600. uint8_t *p_pic;
  601. x = s->mb_x * 16;
  602. y = s->mb_y * 16;
  603. offset = x + y * s->linesize;
  604. p_pic = s->new_picture.f.data[0] + offset;
  605. s->mb_skipped = 1;
  606. for (i = 0; i < s->max_b_frames; i++) {
  607. uint8_t *b_pic;
  608. int diff;
  609. Picture *pic = s->reordered_input_picture[i + 1];
  610. if (!pic || pic->f.pict_type != AV_PICTURE_TYPE_B)
  611. break;
  612. b_pic = pic->f.data[0] + offset;
  613. if (!pic->shared)
  614. b_pic += INPLACE_OFFSET;
  615. if (x + 16 > s->width || y + 16 > s->height) {
  616. int x1, y1;
  617. int xe = FFMIN(16, s->width - x);
  618. int ye = FFMIN(16, s->height - y);
  619. diff = 0;
  620. for (y1 = 0; y1 < ye; y1++) {
  621. for (x1 = 0; x1 < xe; x1++) {
  622. diff += FFABS(p_pic[x1 + y1 * s->linesize] - b_pic[x1 + y1 * s->linesize]);
  623. }
  624. }
  625. diff = diff * 256 / (xe * ye);
  626. } else {
  627. diff = s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  628. }
  629. if (diff > s->qscale * 70) { // FIXME check that 70 is optimal
  630. s->mb_skipped = 0;
  631. break;
  632. }
  633. }
  634. } else
  635. s->mb_skipped = 1;
  636. if (s->mb_skipped == 1) {
  637. /* skip macroblock */
  638. put_bits(&s->pb, 1, 1);
  639. if (interleaved_stats) {
  640. s->misc_bits++;
  641. s->last_bits++;
  642. }
  643. s->skip_count++;
  644. return;
  645. }
  646. }
  647. put_bits(&s->pb, 1, 0); /* mb coded */
  648. cbpc = cbp & 3;
  649. cbpy = cbp >> 2;
  650. cbpy ^= 0xf;
  651. if (s->mv_type == MV_TYPE_16X16) {
  652. if (s->dquant)
  653. cbpc += 8;
  654. put_bits(&s->pb,
  655. ff_h263_inter_MCBPC_bits[cbpc],
  656. ff_h263_inter_MCBPC_code[cbpc]);
  657. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  658. if (s->dquant)
  659. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  660. if (!s->progressive_sequence) {
  661. if (cbp)
  662. put_bits(pb2, 1, s->interlaced_dct);
  663. put_bits(pb2, 1, 0);
  664. }
  665. if (interleaved_stats)
  666. s->misc_bits += get_bits_diff(s);
  667. /* motion vectors: 16x16 mode */
  668. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  669. ff_h263_encode_motion_vector(s,
  670. motion_x - pred_x,
  671. motion_y - pred_y,
  672. s->f_code);
  673. } else if (s->mv_type == MV_TYPE_FIELD) {
  674. if (s->dquant)
  675. cbpc += 8;
  676. put_bits(&s->pb,
  677. ff_h263_inter_MCBPC_bits[cbpc],
  678. ff_h263_inter_MCBPC_code[cbpc]);
  679. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  680. if (s->dquant)
  681. put_bits(pb2, 2, dquant_code[s->dquant + 2]);
  682. av_assert2(!s->progressive_sequence);
  683. if (cbp)
  684. put_bits(pb2, 1, s->interlaced_dct);
  685. put_bits(pb2, 1, 1);
  686. if (interleaved_stats)
  687. s->misc_bits += get_bits_diff(s);
  688. /* motion vectors: 16x8 interlaced mode */
  689. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  690. pred_y /= 2;
  691. put_bits(&s->pb, 1, s->field_select[0][0]);
  692. put_bits(&s->pb, 1, s->field_select[0][1]);
  693. ff_h263_encode_motion_vector(s,
  694. s->mv[0][0][0] - pred_x,
  695. s->mv[0][0][1] - pred_y,
  696. s->f_code);
  697. ff_h263_encode_motion_vector(s,
  698. s->mv[0][1][0] - pred_x,
  699. s->mv[0][1][1] - pred_y,
  700. s->f_code);
  701. } else {
  702. av_assert2(s->mv_type == MV_TYPE_8X8);
  703. put_bits(&s->pb,
  704. ff_h263_inter_MCBPC_bits[cbpc + 16],
  705. ff_h263_inter_MCBPC_code[cbpc + 16]);
  706. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  707. if (!s->progressive_sequence && cbp)
  708. put_bits(pb2, 1, s->interlaced_dct);
  709. if (interleaved_stats)
  710. s->misc_bits += get_bits_diff(s);
  711. for (i = 0; i < 4; i++) {
  712. /* motion vectors: 8x8 mode*/
  713. ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  714. ff_h263_encode_motion_vector(s,
  715. s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x,
  716. s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y,
  717. s->f_code);
  718. }
  719. }
  720. if (interleaved_stats)
  721. s->mv_bits += get_bits_diff(s);
  722. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  723. if (interleaved_stats)
  724. s->p_tex_bits += get_bits_diff(s);
  725. s->f_count++;
  726. }
  727. } else {
  728. int cbp;
  729. int dc_diff[6]; // dc values with the dc prediction subtracted
  730. int dir[6]; // prediction direction
  731. int zigzag_last_index[6];
  732. uint8_t *scan_table[6];
  733. int i;
  734. for (i = 0; i < 6; i++)
  735. dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  736. if (s->flags & CODEC_FLAG_AC_PRED) {
  737. s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  738. } else {
  739. for (i = 0; i < 6; i++)
  740. scan_table[i] = s->intra_scantable.permutated;
  741. }
  742. /* compute cbp */
  743. cbp = 0;
  744. for (i = 0; i < 6; i++)
  745. if (s->block_last_index[i] >= 1)
  746. cbp |= 1 << (5 - i);
  747. cbpc = cbp & 3;
  748. if (s->pict_type == AV_PICTURE_TYPE_I) {
  749. if (s->dquant)
  750. cbpc += 4;
  751. put_bits(&s->pb,
  752. ff_h263_intra_MCBPC_bits[cbpc],
  753. ff_h263_intra_MCBPC_code[cbpc]);
  754. } else {
  755. if (s->dquant)
  756. cbpc += 8;
  757. put_bits(&s->pb, 1, 0); /* mb coded */
  758. put_bits(&s->pb,
  759. ff_h263_inter_MCBPC_bits[cbpc + 4],
  760. ff_h263_inter_MCBPC_code[cbpc + 4]);
  761. }
  762. put_bits(pb2, 1, s->ac_pred);
  763. cbpy = cbp >> 2;
  764. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  765. if (s->dquant)
  766. put_bits(dc_pb, 2, dquant_code[s->dquant + 2]);
  767. if (!s->progressive_sequence)
  768. put_bits(dc_pb, 1, s->interlaced_dct);
  769. if (interleaved_stats)
  770. s->misc_bits += get_bits_diff(s);
  771. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  772. if (interleaved_stats)
  773. s->i_tex_bits += get_bits_diff(s);
  774. s->i_count++;
  775. /* restore ac coeffs & last_index stuff
  776. * if we messed them up with the prediction */
  777. if (s->ac_pred)
  778. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  779. }
  780. }
  781. /**
  782. * add mpeg4 stuffing bits (01...1)
  783. */
  784. void ff_mpeg4_stuffing(PutBitContext *pbc)
  785. {
  786. int length;
  787. put_bits(pbc, 1, 0);
  788. length = (-put_bits_count(pbc)) & 7;
  789. if (length)
  790. put_bits(pbc, length, (1 << length) - 1);
  791. }
  792. /* must be called before writing the header */
  793. void ff_set_mpeg4_time(MpegEncContext *s)
  794. {
  795. if (s->pict_type == AV_PICTURE_TYPE_B) {
  796. ff_mpeg4_init_direct_mv(s);
  797. } else {
  798. s->last_time_base = s->time_base;
  799. s->time_base = FFUDIV(s->time, s->avctx->time_base.den);
  800. }
  801. }
  802. static void mpeg4_encode_gop_header(MpegEncContext *s)
  803. {
  804. int hours, minutes, seconds;
  805. int64_t time;
  806. put_bits(&s->pb, 16, 0);
  807. put_bits(&s->pb, 16, GOP_STARTCODE);
  808. time = s->current_picture_ptr->f.pts;
  809. if (s->reordered_input_picture[1])
  810. time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
  811. time = time * s->avctx->time_base.num;
  812. s->last_time_base = FFUDIV(time, s->avctx->time_base.den);
  813. seconds = FFUDIV(time, s->avctx->time_base.den);
  814. minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
  815. hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
  816. hours = FFUMOD(hours , 24);
  817. put_bits(&s->pb, 5, hours);
  818. put_bits(&s->pb, 6, minutes);
  819. put_bits(&s->pb, 1, 1);
  820. put_bits(&s->pb, 6, seconds);
  821. put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP));
  822. put_bits(&s->pb, 1, 0); // broken link == NO
  823. ff_mpeg4_stuffing(&s->pb);
  824. }
  825. static void mpeg4_encode_visual_object_header(MpegEncContext *s)
  826. {
  827. int profile_and_level_indication;
  828. int vo_ver_id;
  829. if (s->avctx->profile != FF_PROFILE_UNKNOWN) {
  830. profile_and_level_indication = s->avctx->profile << 4;
  831. } else if (s->max_b_frames || s->quarter_sample) {
  832. profile_and_level_indication = 0xF0; // adv simple
  833. } else {
  834. profile_and_level_indication = 0x00; // simple
  835. }
  836. if (s->avctx->level != FF_LEVEL_UNKNOWN)
  837. profile_and_level_indication |= s->avctx->level;
  838. else
  839. profile_and_level_indication |= 1; // level 1
  840. if (profile_and_level_indication >> 4 == 0xF)
  841. vo_ver_id = 5;
  842. else
  843. vo_ver_id = 1;
  844. // FIXME levels
  845. put_bits(&s->pb, 16, 0);
  846. put_bits(&s->pb, 16, VOS_STARTCODE);
  847. put_bits(&s->pb, 8, profile_and_level_indication);
  848. put_bits(&s->pb, 16, 0);
  849. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  850. put_bits(&s->pb, 1, 1);
  851. put_bits(&s->pb, 4, vo_ver_id);
  852. put_bits(&s->pb, 3, 1); // priority
  853. put_bits(&s->pb, 4, 1); // visual obj type== video obj
  854. put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME
  855. ff_mpeg4_stuffing(&s->pb);
  856. }
  857. static void mpeg4_encode_vol_header(MpegEncContext *s,
  858. int vo_number,
  859. int vol_number)
  860. {
  861. int vo_ver_id;
  862. if (!CONFIG_MPEG4_ENCODER)
  863. return;
  864. if (s->max_b_frames || s->quarter_sample) {
  865. vo_ver_id = 5;
  866. s->vo_type = ADV_SIMPLE_VO_TYPE;
  867. } else {
  868. vo_ver_id = 1;
  869. s->vo_type = SIMPLE_VO_TYPE;
  870. }
  871. put_bits(&s->pb, 16, 0);
  872. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  873. put_bits(&s->pb, 16, 0);
  874. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  875. put_bits(&s->pb, 1, 0); /* random access vol */
  876. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  877. if (s->workaround_bugs & FF_BUG_MS) {
  878. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  879. } else {
  880. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  881. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  882. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  883. }
  884. s->aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  885. put_bits(&s->pb, 4, s->aspect_ratio_info); /* aspect ratio info */
  886. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  887. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  888. s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den, 255);
  889. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  890. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  891. }
  892. if (s->workaround_bugs & FF_BUG_MS) {
  893. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  894. } else {
  895. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  896. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  897. put_bits(&s->pb, 1, s->low_delay);
  898. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  899. }
  900. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  901. put_bits(&s->pb, 1, 1); /* marker bit */
  902. put_bits(&s->pb, 16, s->avctx->time_base.den);
  903. if (s->time_increment_bits < 1)
  904. s->time_increment_bits = 1;
  905. put_bits(&s->pb, 1, 1); /* marker bit */
  906. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  907. put_bits(&s->pb, 1, 1); /* marker bit */
  908. put_bits(&s->pb, 13, s->width); /* vol width */
  909. put_bits(&s->pb, 1, 1); /* marker bit */
  910. put_bits(&s->pb, 13, s->height); /* vol height */
  911. put_bits(&s->pb, 1, 1); /* marker bit */
  912. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  913. put_bits(&s->pb, 1, 1); /* obmc disable */
  914. if (vo_ver_id == 1)
  915. put_bits(&s->pb, 1, 0); /* sprite enable */
  916. else
  917. put_bits(&s->pb, 2, 0); /* sprite enable */
  918. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  919. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type= (0=h263 style)*/
  920. if (s->mpeg_quant) {
  921. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  922. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  923. }
  924. if (vo_ver_id != 1)
  925. put_bits(&s->pb, 1, s->quarter_sample);
  926. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  927. s->resync_marker = s->rtp_mode;
  928. put_bits(&s->pb, 1, s->resync_marker ? 0 : 1); /* resync marker disable */
  929. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  930. if (s->data_partitioning)
  931. put_bits(&s->pb, 1, 0); /* no rvlc */
  932. if (vo_ver_id != 1) {
  933. put_bits(&s->pb, 1, 0); /* newpred */
  934. put_bits(&s->pb, 1, 0); /* reduced res vop */
  935. }
  936. put_bits(&s->pb, 1, 0); /* scalability */
  937. ff_mpeg4_stuffing(&s->pb);
  938. /* user data */
  939. if (!(s->flags & CODEC_FLAG_BITEXACT)) {
  940. put_bits(&s->pb, 16, 0);
  941. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  942. avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  943. }
  944. }
  945. /* write mpeg4 VOP header */
  946. void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
  947. {
  948. int time_incr;
  949. int time_div, time_mod;
  950. if (s->pict_type == AV_PICTURE_TYPE_I) {
  951. if (!(s->flags & CODEC_FLAG_GLOBAL_HEADER)) {
  952. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
  953. mpeg4_encode_visual_object_header(s);
  954. if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy
  955. mpeg4_encode_vol_header(s, 0, 0);
  956. }
  957. if (!(s->workaround_bugs & FF_BUG_MS))
  958. mpeg4_encode_gop_header(s);
  959. }
  960. s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B;
  961. put_bits(&s->pb, 16, 0); /* vop header */
  962. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  963. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  964. time_div = FFUDIV(s->time, s->avctx->time_base.den);
  965. time_mod = FFUMOD(s->time, s->avctx->time_base.den);
  966. time_incr = time_div - s->last_time_base;
  967. av_assert0(time_incr >= 0);
  968. while (time_incr--)
  969. put_bits(&s->pb, 1, 1);
  970. put_bits(&s->pb, 1, 0);
  971. put_bits(&s->pb, 1, 1); /* marker */
  972. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  973. put_bits(&s->pb, 1, 1); /* marker */
  974. put_bits(&s->pb, 1, 1); /* vop coded */
  975. if (s->pict_type == AV_PICTURE_TYPE_P) {
  976. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  977. }
  978. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  979. if (!s->progressive_sequence) {
  980. put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
  981. put_bits(&s->pb, 1, s->alternate_scan);
  982. }
  983. // FIXME sprite stuff
  984. put_bits(&s->pb, 5, s->qscale);
  985. if (s->pict_type != AV_PICTURE_TYPE_I)
  986. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  987. if (s->pict_type == AV_PICTURE_TYPE_B)
  988. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  989. }
  990. static av_cold void init_uni_dc_tab(void)
  991. {
  992. int level, uni_code, uni_len;
  993. for (level = -256; level < 256; level++) {
  994. int size, v, l;
  995. /* find number of bits */
  996. size = 0;
  997. v = abs(level);
  998. while (v) {
  999. v >>= 1;
  1000. size++;
  1001. }
  1002. if (level < 0)
  1003. l = (-level) ^ ((1 << size) - 1);
  1004. else
  1005. l = level;
  1006. /* luminance */
  1007. uni_code = ff_mpeg4_DCtab_lum[size][0];
  1008. uni_len = ff_mpeg4_DCtab_lum[size][1];
  1009. if (size > 0) {
  1010. uni_code <<= size;
  1011. uni_code |= l;
  1012. uni_len += size;
  1013. if (size > 8) {
  1014. uni_code <<= 1;
  1015. uni_code |= 1;
  1016. uni_len++;
  1017. }
  1018. }
  1019. uni_DCtab_lum_bits[level + 256] = uni_code;
  1020. uni_DCtab_lum_len[level + 256] = uni_len;
  1021. /* chrominance */
  1022. uni_code = ff_mpeg4_DCtab_chrom[size][0];
  1023. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  1024. if (size > 0) {
  1025. uni_code <<= size;
  1026. uni_code |= l;
  1027. uni_len += size;
  1028. if (size > 8) {
  1029. uni_code <<= 1;
  1030. uni_code |= 1;
  1031. uni_len++;
  1032. }
  1033. }
  1034. uni_DCtab_chrom_bits[level + 256] = uni_code;
  1035. uni_DCtab_chrom_len[level + 256] = uni_len;
  1036. }
  1037. }
  1038. static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
  1039. uint8_t *len_tab)
  1040. {
  1041. int slevel, run, last;
  1042. av_assert0(MAX_LEVEL >= 64);
  1043. av_assert0(MAX_RUN >= 63);
  1044. for (slevel = -64; slevel < 64; slevel++) {
  1045. if (slevel == 0)
  1046. continue;
  1047. for (run = 0; run < 64; run++) {
  1048. for (last = 0; last <= 1; last++) {
  1049. const int index = UNI_MPEG4_ENC_INDEX(last, run, slevel + 64);
  1050. int level = slevel < 0 ? -slevel : slevel;
  1051. int sign = slevel < 0 ? 1 : 0;
  1052. int bits, len, code;
  1053. int level1, run1;
  1054. len_tab[index] = 100;
  1055. /* ESC0 */
  1056. code = get_rl_index(rl, last, run, level);
  1057. bits = rl->table_vlc[code][0];
  1058. len = rl->table_vlc[code][1];
  1059. bits = bits * 2 + sign;
  1060. len++;
  1061. if (code != rl->n && len < len_tab[index]) {
  1062. bits_tab[index] = bits;
  1063. len_tab[index] = len;
  1064. }
  1065. /* ESC1 */
  1066. bits = rl->table_vlc[rl->n][0];
  1067. len = rl->table_vlc[rl->n][1];
  1068. bits = bits * 2;
  1069. len++; // esc1
  1070. level1 = level - rl->max_level[last][run];
  1071. if (level1 > 0) {
  1072. code = get_rl_index(rl, last, run, level1);
  1073. bits <<= rl->table_vlc[code][1];
  1074. len += rl->table_vlc[code][1];
  1075. bits += rl->table_vlc[code][0];
  1076. bits = bits * 2 + sign;
  1077. len++;
  1078. if (code != rl->n && len < len_tab[index]) {
  1079. bits_tab[index] = bits;
  1080. len_tab[index] = len;
  1081. }
  1082. }
  1083. /* ESC2 */
  1084. bits = rl->table_vlc[rl->n][0];
  1085. len = rl->table_vlc[rl->n][1];
  1086. bits = bits * 4 + 2;
  1087. len += 2; // esc2
  1088. run1 = run - rl->max_run[last][level] - 1;
  1089. if (run1 >= 0) {
  1090. code = get_rl_index(rl, last, run1, level);
  1091. bits <<= rl->table_vlc[code][1];
  1092. len += rl->table_vlc[code][1];
  1093. bits += rl->table_vlc[code][0];
  1094. bits = bits * 2 + sign;
  1095. len++;
  1096. if (code != rl->n && len < len_tab[index]) {
  1097. bits_tab[index] = bits;
  1098. len_tab[index] = len;
  1099. }
  1100. }
  1101. /* ESC3 */
  1102. bits = rl->table_vlc[rl->n][0];
  1103. len = rl->table_vlc[rl->n][1];
  1104. bits = bits * 4 + 3;
  1105. len += 2; // esc3
  1106. bits = bits * 2 + last;
  1107. len++;
  1108. bits = bits * 64 + run;
  1109. len += 6;
  1110. bits = bits * 2 + 1;
  1111. len++; // marker
  1112. bits = bits * 4096 + (slevel & 0xfff);
  1113. len += 12;
  1114. bits = bits * 2 + 1;
  1115. len++; // marker
  1116. if (len < len_tab[index]) {
  1117. bits_tab[index] = bits;
  1118. len_tab[index] = len;
  1119. }
  1120. }
  1121. }
  1122. }
  1123. }
  1124. static av_cold int encode_init(AVCodecContext *avctx)
  1125. {
  1126. MpegEncContext *s = avctx->priv_data;
  1127. int ret;
  1128. static int done = 0;
  1129. if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
  1130. av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n");
  1131. return AVERROR(EINVAL);
  1132. }
  1133. if ((ret = ff_MPV_encode_init(avctx)) < 0)
  1134. return ret;
  1135. if (!done) {
  1136. done = 1;
  1137. init_uni_dc_tab();
  1138. ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1139. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1140. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1141. }
  1142. s->min_qcoeff = -2048;
  1143. s->max_qcoeff = 2047;
  1144. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1145. s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64;
  1146. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1147. s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64;
  1148. s->luma_dc_vlc_length = uni_DCtab_lum_len;
  1149. s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
  1150. s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table;
  1151. s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table;
  1152. if (s->flags & CODEC_FLAG_GLOBAL_HEADER) {
  1153. s->avctx->extradata = av_malloc(1024);
  1154. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1155. if (!(s->workaround_bugs & FF_BUG_MS))
  1156. mpeg4_encode_visual_object_header(s);
  1157. mpeg4_encode_vol_header(s, 0, 0);
  1158. // ff_mpeg4_stuffing(&s->pb); ?
  1159. flush_put_bits(&s->pb);
  1160. s->avctx->extradata_size = (put_bits_count(&s->pb) + 7) >> 3;
  1161. }
  1162. return 0;
  1163. }
  1164. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1165. {
  1166. uint8_t *start = put_bits_ptr(&s->pb);
  1167. uint8_t *end = s->pb.buf_end;
  1168. int size = end - start;
  1169. int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
  1170. int tex_size = (size - 2 * pb_size) & (~3);
  1171. set_put_bits_buffer_size(&s->pb, pb_size);
  1172. init_put_bits(&s->tex_pb, start + pb_size, tex_size);
  1173. init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
  1174. }
  1175. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1176. {
  1177. const int pb2_len = put_bits_count(&s->pb2);
  1178. const int tex_pb_len = put_bits_count(&s->tex_pb);
  1179. const int bits = put_bits_count(&s->pb);
  1180. if (s->pict_type == AV_PICTURE_TYPE_I) {
  1181. put_bits(&s->pb, 19, DC_MARKER);
  1182. s->misc_bits += 19 + pb2_len + bits - s->last_bits;
  1183. s->i_tex_bits += tex_pb_len;
  1184. } else {
  1185. put_bits(&s->pb, 17, MOTION_MARKER);
  1186. s->misc_bits += 17 + pb2_len;
  1187. s->mv_bits += bits - s->last_bits;
  1188. s->p_tex_bits += tex_pb_len;
  1189. }
  1190. flush_put_bits(&s->pb2);
  1191. flush_put_bits(&s->tex_pb);
  1192. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1193. avpriv_copy_bits(&s->pb, s->pb2.buf, pb2_len);
  1194. avpriv_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1195. s->last_bits = put_bits_count(&s->pb);
  1196. }
  1197. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1198. {
  1199. int mb_num_bits = av_log2(s->mb_num - 1) + 1;
  1200. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1201. put_bits(&s->pb, 1, 1);
  1202. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y * s->mb_width);
  1203. put_bits(&s->pb, s->quant_precision, s->qscale);
  1204. put_bits(&s->pb, 1, 0); /* no HEC */
  1205. }
  1206. #define OFFSET(x) offsetof(MpegEncContext, x)
  1207. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1208. static const AVOption options[] = {
  1209. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1210. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1211. FF_MPV_COMMON_OPTS
  1212. { NULL },
  1213. };
  1214. static const AVClass mpeg4enc_class = {
  1215. .class_name = "MPEG4 encoder",
  1216. .item_name = av_default_item_name,
  1217. .option = options,
  1218. .version = LIBAVUTIL_VERSION_INT,
  1219. };
  1220. AVCodec ff_mpeg4_encoder = {
  1221. .name = "mpeg4",
  1222. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1223. .type = AVMEDIA_TYPE_VIDEO,
  1224. .id = AV_CODEC_ID_MPEG4,
  1225. .priv_data_size = sizeof(MpegEncContext),
  1226. .init = encode_init,
  1227. .encode2 = ff_MPV_encode_picture,
  1228. .close = ff_MPV_encode_end,
  1229. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  1230. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  1231. .priv_class = &mpeg4enc_class,
  1232. };