You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1350 lines
46KB

  1. /*
  2. * MPEG4 encoder.
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/attributes.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/opt.h"
  25. #include "mpegvideo.h"
  26. #include "h263.h"
  27. #include "mpeg4video.h"
  28. //The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  29. //differences in mpeg4. Unified in the sense that the specification specifies
  30. //this encoding in several steps.
  31. static uint8_t uni_DCtab_lum_len[512];
  32. static uint8_t uni_DCtab_chrom_len[512];
  33. static uint16_t uni_DCtab_lum_bits[512];
  34. static uint16_t uni_DCtab_chrom_bits[512];
  35. //unified encoding tables for run length encoding of coefficients
  36. //unified in the sense that the specification specifies the encoding in several steps.
  37. static uint32_t uni_mpeg4_intra_rl_bits[64*64*2*2];
  38. static uint8_t uni_mpeg4_intra_rl_len [64*64*2*2];
  39. static uint32_t uni_mpeg4_inter_rl_bits[64*64*2*2];
  40. static uint8_t uni_mpeg4_inter_rl_len [64*64*2*2];
  41. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128 + (run)*256 + (level))
  42. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run) + (level)*64)
  43. #define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
  44. /* mpeg4
  45. inter
  46. max level: 24/6
  47. max run: 53/63
  48. intra
  49. max level: 53/16
  50. max run: 29/41
  51. */
  52. /**
  53. * Return the number of bits that encoding the 8x8 block in block would need.
  54. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  55. */
  56. static inline int get_block_rate(MpegEncContext * s, int16_t block[64], int block_last_index, uint8_t scantable[64]){
  57. int last=0;
  58. int j;
  59. int rate=0;
  60. for(j=1; j<=block_last_index; j++){
  61. const int index= scantable[j];
  62. int level= block[index];
  63. if(level){
  64. level+= 64;
  65. if((level&(~127)) == 0){
  66. if(j<block_last_index) rate+= s->intra_ac_vlc_length [UNI_AC_ENC_INDEX(j-last-1, level)];
  67. else rate+= s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j-last-1, level)];
  68. }else
  69. rate += s->ac_esc_length;
  70. last= j;
  71. }
  72. }
  73. return rate;
  74. }
  75. /**
  76. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  77. * This function also restores s->block_last_index.
  78. * @param[in,out] block MB coefficients, these will be restored
  79. * @param[in] dir ac prediction direction for each 8x8 block
  80. * @param[out] st scantable for each 8x8 block
  81. * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  82. */
  83. static inline void restore_ac_coeffs(MpegEncContext * s, int16_t block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6])
  84. {
  85. int i, n;
  86. memcpy(s->block_last_index, zigzag_last_index, sizeof(int)*6);
  87. for(n=0; n<6; n++){
  88. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  89. st[n]= s->intra_scantable.permutated;
  90. if(dir[n]){
  91. /* top prediction */
  92. for(i=1; i<8; i++){
  93. block[n][s->dsp.idct_permutation[i ]] = ac_val[i+8];
  94. }
  95. }else{
  96. /* left prediction */
  97. for(i=1; i<8; i++){
  98. block[n][s->dsp.idct_permutation[i<<3]]= ac_val[i ];
  99. }
  100. }
  101. }
  102. }
  103. /**
  104. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in mpeg4.
  105. * This function will also update s->block_last_index and s->ac_val.
  106. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  107. * @param[in] dir ac prediction direction for each 8x8 block
  108. * @param[out] st scantable for each 8x8 block
  109. * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  110. */
  111. static inline int decide_ac_pred(MpegEncContext * s, int16_t block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6])
  112. {
  113. int score= 0;
  114. int i, n;
  115. int8_t * const qscale_table = s->current_picture.qscale_table;
  116. memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
  117. for(n=0; n<6; n++){
  118. int16_t *ac_val, *ac_val1;
  119. score -= get_block_rate(s, block[n], s->block_last_index[n], s->intra_scantable.permutated);
  120. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  121. ac_val1= ac_val;
  122. if(dir[n]){
  123. const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
  124. /* top prediction */
  125. ac_val-= s->block_wrap[n]*16;
  126. if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
  127. /* same qscale */
  128. for(i=1; i<8; i++){
  129. const int level= block[n][s->dsp.idct_permutation[i ]];
  130. block[n][s->dsp.idct_permutation[i ]] = level - ac_val[i+8];
  131. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  132. ac_val1[i+8]= level;
  133. }
  134. }else{
  135. /* different qscale, we must rescale */
  136. for(i=1; i<8; i++){
  137. const int level= block[n][s->dsp.idct_permutation[i ]];
  138. block[n][s->dsp.idct_permutation[i ]] = level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale);
  139. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  140. ac_val1[i+8]= level;
  141. }
  142. }
  143. st[n]= s->intra_h_scantable.permutated;
  144. }else{
  145. const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
  146. /* left prediction */
  147. ac_val-= 16;
  148. if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
  149. /* same qscale */
  150. for(i=1; i<8; i++){
  151. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  152. block[n][s->dsp.idct_permutation[i<<3]]= level - ac_val[i];
  153. ac_val1[i ]= level;
  154. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  155. }
  156. }else{
  157. /* different qscale, we must rescale */
  158. for(i=1; i<8; i++){
  159. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  160. block[n][s->dsp.idct_permutation[i<<3]]= level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale);
  161. ac_val1[i ]= level;
  162. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  163. }
  164. }
  165. st[n]= s->intra_v_scantable.permutated;
  166. }
  167. for(i=63; i>0; i--) //FIXME optimize
  168. if(block[n][ st[n][i] ]) break;
  169. s->block_last_index[n]= i;
  170. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  171. }
  172. if(score < 0){
  173. return 1;
  174. }else{
  175. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  176. return 0;
  177. }
  178. }
  179. /**
  180. * modify mb_type & qscale so that encoding is acually possible in mpeg4
  181. */
  182. void ff_clean_mpeg4_qscales(MpegEncContext *s){
  183. int i;
  184. int8_t * const qscale_table = s->current_picture.qscale_table;
  185. ff_clean_h263_qscales(s);
  186. if(s->pict_type== AV_PICTURE_TYPE_B){
  187. int odd=0;
  188. /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
  189. for(i=0; i<s->mb_num; i++){
  190. int mb_xy= s->mb_index2xy[i];
  191. odd += qscale_table[mb_xy]&1;
  192. }
  193. if(2*odd > s->mb_num) odd=1;
  194. else odd=0;
  195. for(i=0; i<s->mb_num; i++){
  196. int mb_xy= s->mb_index2xy[i];
  197. if((qscale_table[mb_xy]&1) != odd)
  198. qscale_table[mb_xy]++;
  199. if(qscale_table[mb_xy] > 31)
  200. qscale_table[mb_xy]= 31;
  201. }
  202. for(i=1; i<s->mb_num; i++){
  203. int mb_xy= s->mb_index2xy[i];
  204. if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){
  205. s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR;
  206. }
  207. }
  208. }
  209. }
  210. /**
  211. * Encode the dc value.
  212. * @param n block index (0-3 are luma, 4-5 are chroma)
  213. */
  214. static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n)
  215. {
  216. #if 1
  217. /* DC will overflow if level is outside the [-255,255] range. */
  218. level+=256;
  219. if (n < 4) {
  220. /* luminance */
  221. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  222. } else {
  223. /* chrominance */
  224. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  225. }
  226. #else
  227. int size, v;
  228. /* find number of bits */
  229. size = 0;
  230. v = abs(level);
  231. while (v) {
  232. v >>= 1;
  233. size++;
  234. }
  235. if (n < 4) {
  236. /* luminance */
  237. put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
  238. } else {
  239. /* chrominance */
  240. put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
  241. }
  242. /* encode remaining bits */
  243. if (size > 0) {
  244. if (level < 0)
  245. level = (-level) ^ ((1 << size) - 1);
  246. put_bits(&s->pb, size, level);
  247. if (size > 8)
  248. put_bits(&s->pb, 1, 1);
  249. }
  250. #endif
  251. }
  252. static inline int mpeg4_get_dc_length(int level, int n){
  253. if (n < 4) {
  254. return uni_DCtab_lum_len[level + 256];
  255. } else {
  256. return uni_DCtab_chrom_len[level + 256];
  257. }
  258. }
  259. /**
  260. * Encode an 8x8 block.
  261. * @param n block index (0-3 are luma, 4-5 are chroma)
  262. */
  263. static inline void mpeg4_encode_block(MpegEncContext * s, int16_t * block, int n, int intra_dc,
  264. uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
  265. {
  266. int i, last_non_zero;
  267. uint32_t *bits_tab;
  268. uint8_t *len_tab;
  269. const int last_index = s->block_last_index[n];
  270. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  271. /* mpeg4 based DC predictor */
  272. mpeg4_encode_dc(dc_pb, intra_dc, n);
  273. if(last_index<1) return;
  274. i = 1;
  275. bits_tab= uni_mpeg4_intra_rl_bits;
  276. len_tab = uni_mpeg4_intra_rl_len;
  277. } else {
  278. if(last_index<0) return;
  279. i = 0;
  280. bits_tab= uni_mpeg4_inter_rl_bits;
  281. len_tab = uni_mpeg4_inter_rl_len;
  282. }
  283. /* AC coefs */
  284. last_non_zero = i - 1;
  285. for (; i < last_index; i++) {
  286. int level = block[ scan_table[i] ];
  287. if (level) {
  288. int run = i - last_non_zero - 1;
  289. level+=64;
  290. if((level&(~127)) == 0){
  291. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  292. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  293. }else{ //ESC3
  294. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(0<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  295. }
  296. last_non_zero = i;
  297. }
  298. }
  299. /*if(i<=last_index)*/{
  300. int level = block[ scan_table[i] ];
  301. int run = i - last_non_zero - 1;
  302. level+=64;
  303. if((level&(~127)) == 0){
  304. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  305. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  306. }else{ //ESC3
  307. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(1<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  308. }
  309. }
  310. }
  311. static int mpeg4_get_block_length(MpegEncContext * s, int16_t * block, int n, int intra_dc,
  312. uint8_t *scan_table)
  313. {
  314. int i, last_non_zero;
  315. uint8_t *len_tab;
  316. const int last_index = s->block_last_index[n];
  317. int len=0;
  318. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  319. /* mpeg4 based DC predictor */
  320. len += mpeg4_get_dc_length(intra_dc, n);
  321. if(last_index<1) return len;
  322. i = 1;
  323. len_tab = uni_mpeg4_intra_rl_len;
  324. } else {
  325. if(last_index<0) return 0;
  326. i = 0;
  327. len_tab = uni_mpeg4_inter_rl_len;
  328. }
  329. /* AC coefs */
  330. last_non_zero = i - 1;
  331. for (; i < last_index; i++) {
  332. int level = block[ scan_table[i] ];
  333. if (level) {
  334. int run = i - last_non_zero - 1;
  335. level+=64;
  336. if((level&(~127)) == 0){
  337. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  338. len += len_tab[index];
  339. }else{ //ESC3
  340. len += 7+2+1+6+1+12+1;
  341. }
  342. last_non_zero = i;
  343. }
  344. }
  345. /*if(i<=last_index)*/{
  346. int level = block[ scan_table[i] ];
  347. int run = i - last_non_zero - 1;
  348. level+=64;
  349. if((level&(~127)) == 0){
  350. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  351. len += len_tab[index];
  352. }else{ //ESC3
  353. len += 7+2+1+6+1+12+1;
  354. }
  355. }
  356. return len;
  357. }
  358. static inline void mpeg4_encode_blocks(MpegEncContext * s, int16_t block[6][64], int intra_dc[6],
  359. uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb){
  360. int i;
  361. if(scan_table){
  362. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  363. for (i = 0; i < 6; i++) {
  364. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, intra_dc[i], scan_table[i]));
  365. }
  366. }else{
  367. /* encode each block */
  368. for (i = 0; i < 6; i++) {
  369. mpeg4_encode_block(s, block[i], i, intra_dc[i], scan_table[i], dc_pb, ac_pb);
  370. }
  371. }
  372. }else{
  373. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  374. for (i = 0; i < 6; i++) {
  375. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, 0, s->intra_scantable.permutated));
  376. }
  377. }else{
  378. /* encode each block */
  379. for (i = 0; i < 6; i++) {
  380. mpeg4_encode_block(s, block[i], i, 0, s->intra_scantable.permutated, dc_pb, ac_pb);
  381. }
  382. }
  383. }
  384. }
  385. static inline int get_b_cbp(MpegEncContext * s, int16_t block[6][64],
  386. int motion_x, int motion_y, int mb_type)
  387. {
  388. int cbp = 0, i;
  389. if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
  390. int score = 0;
  391. const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
  392. for (i = 0; i < 6; i++)
  393. if (s->coded_score[i] < 0) {
  394. score += s->coded_score[i];
  395. cbp |= 1 << (5 - i);
  396. }
  397. if (cbp) {
  398. int zero_score = -6;
  399. if ((motion_x | motion_y | s->dquant | mb_type) == 0)
  400. zero_score -= 4; //2*MV + mb_type + cbp bit
  401. zero_score *= lambda;
  402. if (zero_score <= score)
  403. cbp = 0;
  404. }
  405. for (i = 0; i < 6; i++) {
  406. if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
  407. s->block_last_index[i] = -1;
  408. s->dsp.clear_block(s->block[i]);
  409. }
  410. }
  411. } else {
  412. for (i = 0; i < 6; i++) {
  413. if (s->block_last_index[i] >= 0)
  414. cbp |= 1 << (5 - i);
  415. }
  416. }
  417. return cbp;
  418. }
  419. //FIXME this is duplicated to h263.c
  420. static const int dquant_code[5]= {1,0,9,2,3};
  421. void ff_mpeg4_encode_mb(MpegEncContext * s,
  422. int16_t block[6][64],
  423. int motion_x, int motion_y)
  424. {
  425. int cbpc, cbpy, pred_x, pred_y;
  426. PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  427. PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  428. PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  429. const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  430. if (!s->mb_intra) {
  431. int i, cbp;
  432. if(s->pict_type==AV_PICTURE_TYPE_B){
  433. static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
  434. int mb_type= mb_type_table[s->mv_dir];
  435. if(s->mb_x==0){
  436. for(i=0; i<2; i++){
  437. s->last_mv[i][0][0]=
  438. s->last_mv[i][0][1]=
  439. s->last_mv[i][1][0]=
  440. s->last_mv[i][1][1]= 0;
  441. }
  442. }
  443. assert(s->dquant>=-2 && s->dquant<=2);
  444. assert((s->dquant&1)==0);
  445. assert(mb_type>=0);
  446. /* nothing to do if this MB was skipped in the next P Frame */
  447. if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
  448. s->skip_count++;
  449. s->mv[0][0][0]=
  450. s->mv[0][0][1]=
  451. s->mv[1][0][0]=
  452. s->mv[1][0][1]= 0;
  453. s->mv_dir= MV_DIR_FORWARD; //doesn't matter
  454. s->qscale -= s->dquant;
  455. // s->mb_skipped=1;
  456. return;
  457. }
  458. cbp= get_b_cbp(s, block, motion_x, motion_y, mb_type);
  459. if ((cbp | motion_x | motion_y | mb_type) ==0) {
  460. /* direct MB with MV={0,0} */
  461. assert(s->dquant==0);
  462. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  463. if(interleaved_stats){
  464. s->misc_bits++;
  465. s->last_bits++;
  466. }
  467. s->skip_count++;
  468. return;
  469. }
  470. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  471. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge
  472. put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we don't need it :)
  473. if(cbp) put_bits(&s->pb, 6, cbp);
  474. if(cbp && mb_type){
  475. if(s->dquant)
  476. put_bits(&s->pb, 2, (s->dquant>>2)+3);
  477. else
  478. put_bits(&s->pb, 1, 0);
  479. }else
  480. s->qscale -= s->dquant;
  481. if(!s->progressive_sequence){
  482. if(cbp)
  483. put_bits(&s->pb, 1, s->interlaced_dct);
  484. if(mb_type) // not direct mode
  485. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  486. }
  487. if(interleaved_stats){
  488. s->misc_bits+= get_bits_diff(s);
  489. }
  490. if(mb_type == 0){
  491. assert(s->mv_dir & MV_DIRECT);
  492. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  493. s->b_count++;
  494. s->f_count++;
  495. }else{
  496. assert(mb_type > 0 && mb_type < 4);
  497. if(s->mv_type != MV_TYPE_FIELD){
  498. if(s->mv_dir & MV_DIR_FORWARD){
  499. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - s->last_mv[0][0][0],
  500. s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
  501. s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0];
  502. s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1];
  503. s->f_count++;
  504. }
  505. if(s->mv_dir & MV_DIR_BACKWARD){
  506. ff_h263_encode_motion_vector(s, s->mv[1][0][0] - s->last_mv[1][0][0],
  507. s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
  508. s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0];
  509. s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1];
  510. s->b_count++;
  511. }
  512. }else{
  513. if(s->mv_dir & MV_DIR_FORWARD){
  514. put_bits(&s->pb, 1, s->field_select[0][0]);
  515. put_bits(&s->pb, 1, s->field_select[0][1]);
  516. }
  517. if(s->mv_dir & MV_DIR_BACKWARD){
  518. put_bits(&s->pb, 1, s->field_select[1][0]);
  519. put_bits(&s->pb, 1, s->field_select[1][1]);
  520. }
  521. if(s->mv_dir & MV_DIR_FORWARD){
  522. for(i=0; i<2; i++){
  523. ff_h263_encode_motion_vector(s, s->mv[0][i][0] - s->last_mv[0][i][0] ,
  524. s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code);
  525. s->last_mv[0][i][0]= s->mv[0][i][0];
  526. s->last_mv[0][i][1]= s->mv[0][i][1]*2;
  527. }
  528. s->f_count++;
  529. }
  530. if(s->mv_dir & MV_DIR_BACKWARD){
  531. for(i=0; i<2; i++){
  532. ff_h263_encode_motion_vector(s, s->mv[1][i][0] - s->last_mv[1][i][0] ,
  533. s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code);
  534. s->last_mv[1][i][0]= s->mv[1][i][0];
  535. s->last_mv[1][i][1]= s->mv[1][i][1]*2;
  536. }
  537. s->b_count++;
  538. }
  539. }
  540. }
  541. if(interleaved_stats){
  542. s->mv_bits+= get_bits_diff(s);
  543. }
  544. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  545. if(interleaved_stats){
  546. s->p_tex_bits+= get_bits_diff(s);
  547. }
  548. }else{ /* s->pict_type==AV_PICTURE_TYPE_B */
  549. cbp= get_p_cbp(s, block, motion_x, motion_y);
  550. if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
  551. /* check if the B frames can skip it too, as we must skip it if we skip here
  552. why didn't they just compress the skip-mb bits instead of reusing them ?! */
  553. if(s->max_b_frames>0){
  554. int i;
  555. int x,y, offset;
  556. uint8_t *p_pic;
  557. x= s->mb_x*16;
  558. y= s->mb_y*16;
  559. if(x+16 > s->width) x= s->width-16;
  560. if(y+16 > s->height) y= s->height-16;
  561. offset= x + y*s->linesize;
  562. p_pic = s->new_picture.f.data[0] + offset;
  563. s->mb_skipped=1;
  564. for(i=0; i<s->max_b_frames; i++){
  565. uint8_t *b_pic;
  566. int diff;
  567. Picture *pic= s->reordered_input_picture[i+1];
  568. if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
  569. break;
  570. b_pic = pic->f.data[0] + offset;
  571. if (!pic->shared)
  572. b_pic+= INPLACE_OFFSET;
  573. diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  574. if(diff>s->qscale*70){ //FIXME check that 70 is optimal
  575. s->mb_skipped=0;
  576. break;
  577. }
  578. }
  579. }else
  580. s->mb_skipped=1;
  581. if(s->mb_skipped==1){
  582. /* skip macroblock */
  583. put_bits(&s->pb, 1, 1);
  584. if(interleaved_stats){
  585. s->misc_bits++;
  586. s->last_bits++;
  587. }
  588. s->skip_count++;
  589. return;
  590. }
  591. }
  592. put_bits(&s->pb, 1, 0); /* mb coded */
  593. cbpc = cbp & 3;
  594. cbpy = cbp >> 2;
  595. cbpy ^= 0xf;
  596. if(s->mv_type==MV_TYPE_16X16){
  597. if(s->dquant) cbpc+= 8;
  598. put_bits(&s->pb,
  599. ff_h263_inter_MCBPC_bits[cbpc],
  600. ff_h263_inter_MCBPC_code[cbpc]);
  601. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  602. if(s->dquant)
  603. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  604. if(!s->progressive_sequence){
  605. if(cbp)
  606. put_bits(pb2, 1, s->interlaced_dct);
  607. put_bits(pb2, 1, 0);
  608. }
  609. if(interleaved_stats){
  610. s->misc_bits+= get_bits_diff(s);
  611. }
  612. /* motion vectors: 16x16 mode */
  613. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  614. ff_h263_encode_motion_vector(s, motion_x - pred_x,
  615. motion_y - pred_y, s->f_code);
  616. }else if(s->mv_type==MV_TYPE_FIELD){
  617. if(s->dquant) cbpc+= 8;
  618. put_bits(&s->pb,
  619. ff_h263_inter_MCBPC_bits[cbpc],
  620. ff_h263_inter_MCBPC_code[cbpc]);
  621. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  622. if(s->dquant)
  623. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  624. assert(!s->progressive_sequence);
  625. if(cbp)
  626. put_bits(pb2, 1, s->interlaced_dct);
  627. put_bits(pb2, 1, 1);
  628. if(interleaved_stats){
  629. s->misc_bits+= get_bits_diff(s);
  630. }
  631. /* motion vectors: 16x8 interlaced mode */
  632. ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  633. pred_y /=2;
  634. put_bits(&s->pb, 1, s->field_select[0][0]);
  635. put_bits(&s->pb, 1, s->field_select[0][1]);
  636. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - pred_x,
  637. s->mv[0][0][1] - pred_y, s->f_code);
  638. ff_h263_encode_motion_vector(s, s->mv[0][1][0] - pred_x,
  639. s->mv[0][1][1] - pred_y, s->f_code);
  640. }else{
  641. assert(s->mv_type==MV_TYPE_8X8);
  642. put_bits(&s->pb,
  643. ff_h263_inter_MCBPC_bits[cbpc+16],
  644. ff_h263_inter_MCBPC_code[cbpc+16]);
  645. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  646. if(!s->progressive_sequence){
  647. if(cbp)
  648. put_bits(pb2, 1, s->interlaced_dct);
  649. }
  650. if(interleaved_stats){
  651. s->misc_bits+= get_bits_diff(s);
  652. }
  653. for(i=0; i<4; i++){
  654. /* motion vectors: 8x8 mode*/
  655. ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  656. ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
  657. s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
  658. }
  659. }
  660. if(interleaved_stats){
  661. s->mv_bits+= get_bits_diff(s);
  662. }
  663. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  664. if(interleaved_stats){
  665. s->p_tex_bits+= get_bits_diff(s);
  666. }
  667. s->f_count++;
  668. }
  669. } else {
  670. int cbp;
  671. int dc_diff[6]; //dc values with the dc prediction subtracted
  672. int dir[6]; //prediction direction
  673. int zigzag_last_index[6];
  674. uint8_t *scan_table[6];
  675. int i;
  676. for(i=0; i<6; i++){
  677. dc_diff[i]= ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  678. }
  679. if(s->flags & CODEC_FLAG_AC_PRED){
  680. s->ac_pred= decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  681. }else{
  682. for(i=0; i<6; i++)
  683. scan_table[i]= s->intra_scantable.permutated;
  684. }
  685. /* compute cbp */
  686. cbp = 0;
  687. for (i = 0; i < 6; i++) {
  688. if (s->block_last_index[i] >= 1)
  689. cbp |= 1 << (5 - i);
  690. }
  691. cbpc = cbp & 3;
  692. if (s->pict_type == AV_PICTURE_TYPE_I) {
  693. if(s->dquant) cbpc+=4;
  694. put_bits(&s->pb,
  695. ff_h263_intra_MCBPC_bits[cbpc],
  696. ff_h263_intra_MCBPC_code[cbpc]);
  697. } else {
  698. if(s->dquant) cbpc+=8;
  699. put_bits(&s->pb, 1, 0); /* mb coded */
  700. put_bits(&s->pb,
  701. ff_h263_inter_MCBPC_bits[cbpc + 4],
  702. ff_h263_inter_MCBPC_code[cbpc + 4]);
  703. }
  704. put_bits(pb2, 1, s->ac_pred);
  705. cbpy = cbp >> 2;
  706. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  707. if(s->dquant)
  708. put_bits(dc_pb, 2, dquant_code[s->dquant+2]);
  709. if(!s->progressive_sequence){
  710. put_bits(dc_pb, 1, s->interlaced_dct);
  711. }
  712. if(interleaved_stats){
  713. s->misc_bits+= get_bits_diff(s);
  714. }
  715. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  716. if(interleaved_stats){
  717. s->i_tex_bits+= get_bits_diff(s);
  718. }
  719. s->i_count++;
  720. /* restore ac coeffs & last_index stuff if we messed them up with the prediction */
  721. if(s->ac_pred)
  722. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  723. }
  724. }
  725. /**
  726. * add mpeg4 stuffing bits (01...1)
  727. */
  728. void ff_mpeg4_stuffing(PutBitContext * pbc)
  729. {
  730. int length;
  731. put_bits(pbc, 1, 0);
  732. length= (-put_bits_count(pbc))&7;
  733. if(length) put_bits(pbc, length, (1<<length)-1);
  734. }
  735. /* must be called before writing the header */
  736. void ff_set_mpeg4_time(MpegEncContext * s){
  737. if(s->pict_type==AV_PICTURE_TYPE_B){
  738. ff_mpeg4_init_direct_mv(s);
  739. }else{
  740. s->last_time_base= s->time_base;
  741. s->time_base= s->time/s->avctx->time_base.den;
  742. }
  743. }
  744. static void mpeg4_encode_gop_header(MpegEncContext * s){
  745. int hours, minutes, seconds;
  746. int64_t time;
  747. put_bits(&s->pb, 16, 0);
  748. put_bits(&s->pb, 16, GOP_STARTCODE);
  749. time = s->current_picture_ptr->f.pts;
  750. if(s->reordered_input_picture[1])
  751. time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
  752. time= time*s->avctx->time_base.num;
  753. seconds= time/s->avctx->time_base.den;
  754. minutes= seconds/60; seconds %= 60;
  755. hours= minutes/60; minutes %= 60;
  756. hours%=24;
  757. put_bits(&s->pb, 5, hours);
  758. put_bits(&s->pb, 6, minutes);
  759. put_bits(&s->pb, 1, 1);
  760. put_bits(&s->pb, 6, seconds);
  761. put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP));
  762. put_bits(&s->pb, 1, 0); //broken link == NO
  763. s->last_time_base= time / s->avctx->time_base.den;
  764. ff_mpeg4_stuffing(&s->pb);
  765. }
  766. static void mpeg4_encode_visual_object_header(MpegEncContext * s){
  767. int profile_and_level_indication;
  768. int vo_ver_id;
  769. if(s->avctx->profile != FF_PROFILE_UNKNOWN){
  770. profile_and_level_indication = s->avctx->profile << 4;
  771. }else if(s->max_b_frames || s->quarter_sample){
  772. profile_and_level_indication= 0xF0; // adv simple
  773. }else{
  774. profile_and_level_indication= 0x00; // simple
  775. }
  776. if(s->avctx->level != FF_LEVEL_UNKNOWN){
  777. profile_and_level_indication |= s->avctx->level;
  778. }else{
  779. profile_and_level_indication |= 1; //level 1
  780. }
  781. if(profile_and_level_indication>>4 == 0xF){
  782. vo_ver_id= 5;
  783. }else{
  784. vo_ver_id= 1;
  785. }
  786. //FIXME levels
  787. put_bits(&s->pb, 16, 0);
  788. put_bits(&s->pb, 16, VOS_STARTCODE);
  789. put_bits(&s->pb, 8, profile_and_level_indication);
  790. put_bits(&s->pb, 16, 0);
  791. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  792. put_bits(&s->pb, 1, 1);
  793. put_bits(&s->pb, 4, vo_ver_id);
  794. put_bits(&s->pb, 3, 1); //priority
  795. put_bits(&s->pb, 4, 1); //visual obj type== video obj
  796. put_bits(&s->pb, 1, 0); //video signal type == no clue //FIXME
  797. ff_mpeg4_stuffing(&s->pb);
  798. }
  799. static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number)
  800. {
  801. int vo_ver_id;
  802. if (!CONFIG_MPEG4_ENCODER) return;
  803. if(s->max_b_frames || s->quarter_sample){
  804. vo_ver_id= 5;
  805. s->vo_type= ADV_SIMPLE_VO_TYPE;
  806. }else{
  807. vo_ver_id= 1;
  808. s->vo_type= SIMPLE_VO_TYPE;
  809. }
  810. put_bits(&s->pb, 16, 0);
  811. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  812. put_bits(&s->pb, 16, 0);
  813. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  814. put_bits(&s->pb, 1, 0); /* random access vol */
  815. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  816. if(s->workaround_bugs & FF_BUG_MS) {
  817. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  818. } else {
  819. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  820. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  821. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  822. }
  823. s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  824. put_bits(&s->pb, 4, s->aspect_ratio_info);/* aspect ratio info */
  825. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
  826. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  827. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  828. }
  829. if(s->workaround_bugs & FF_BUG_MS) { //
  830. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  831. } else {
  832. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  833. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  834. put_bits(&s->pb, 1, s->low_delay);
  835. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  836. }
  837. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  838. put_bits(&s->pb, 1, 1); /* marker bit */
  839. put_bits(&s->pb, 16, s->avctx->time_base.den);
  840. if (s->time_increment_bits < 1)
  841. s->time_increment_bits = 1;
  842. put_bits(&s->pb, 1, 1); /* marker bit */
  843. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  844. put_bits(&s->pb, 1, 1); /* marker bit */
  845. put_bits(&s->pb, 13, s->width); /* vol width */
  846. put_bits(&s->pb, 1, 1); /* marker bit */
  847. put_bits(&s->pb, 13, s->height); /* vol height */
  848. put_bits(&s->pb, 1, 1); /* marker bit */
  849. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  850. put_bits(&s->pb, 1, 1); /* obmc disable */
  851. if (vo_ver_id == 1) {
  852. put_bits(&s->pb, 1, s->vol_sprite_usage); /* sprite enable */
  853. }else{
  854. put_bits(&s->pb, 2, s->vol_sprite_usage); /* sprite enable */
  855. }
  856. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  857. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type= (0=h263 style)*/
  858. if(s->mpeg_quant){
  859. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  860. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  861. }
  862. if (vo_ver_id != 1)
  863. put_bits(&s->pb, 1, s->quarter_sample);
  864. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  865. s->resync_marker= s->rtp_mode;
  866. put_bits(&s->pb, 1, s->resync_marker ? 0 : 1);/* resync marker disable */
  867. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  868. if(s->data_partitioning){
  869. put_bits(&s->pb, 1, 0); /* no rvlc */
  870. }
  871. if (vo_ver_id != 1){
  872. put_bits(&s->pb, 1, 0); /* newpred */
  873. put_bits(&s->pb, 1, 0); /* reduced res vop */
  874. }
  875. put_bits(&s->pb, 1, 0); /* scalability */
  876. ff_mpeg4_stuffing(&s->pb);
  877. /* user data */
  878. if(!(s->flags & CODEC_FLAG_BITEXACT)){
  879. put_bits(&s->pb, 16, 0);
  880. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  881. avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  882. }
  883. }
  884. /* write mpeg4 VOP header */
  885. void ff_mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
  886. {
  887. int time_incr;
  888. int time_div, time_mod;
  889. if(s->pict_type==AV_PICTURE_TYPE_I){
  890. if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
  891. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
  892. mpeg4_encode_visual_object_header(s);
  893. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number==0) //HACK, the reference sw is buggy
  894. mpeg4_encode_vol_header(s, 0, 0);
  895. }
  896. if(!(s->workaround_bugs & FF_BUG_MS))
  897. mpeg4_encode_gop_header(s);
  898. }
  899. s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
  900. put_bits(&s->pb, 16, 0); /* vop header */
  901. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  902. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  903. assert(s->time>=0);
  904. time_div= s->time/s->avctx->time_base.den;
  905. time_mod= s->time%s->avctx->time_base.den;
  906. time_incr= time_div - s->last_time_base;
  907. assert(time_incr >= 0);
  908. while(time_incr--)
  909. put_bits(&s->pb, 1, 1);
  910. put_bits(&s->pb, 1, 0);
  911. put_bits(&s->pb, 1, 1); /* marker */
  912. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  913. put_bits(&s->pb, 1, 1); /* marker */
  914. put_bits(&s->pb, 1, 1); /* vop coded */
  915. if ( s->pict_type == AV_PICTURE_TYPE_P
  916. || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
  917. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  918. }
  919. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  920. if(!s->progressive_sequence){
  921. put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
  922. put_bits(&s->pb, 1, s->alternate_scan);
  923. }
  924. //FIXME sprite stuff
  925. put_bits(&s->pb, 5, s->qscale);
  926. if (s->pict_type != AV_PICTURE_TYPE_I)
  927. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  928. if (s->pict_type == AV_PICTURE_TYPE_B)
  929. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  930. }
  931. static av_cold void init_uni_dc_tab(void)
  932. {
  933. int level, uni_code, uni_len;
  934. for(level=-256; level<256; level++){
  935. int size, v, l;
  936. /* find number of bits */
  937. size = 0;
  938. v = abs(level);
  939. while (v) {
  940. v >>= 1;
  941. size++;
  942. }
  943. if (level < 0)
  944. l= (-level) ^ ((1 << size) - 1);
  945. else
  946. l= level;
  947. /* luminance */
  948. uni_code= ff_mpeg4_DCtab_lum[size][0];
  949. uni_len = ff_mpeg4_DCtab_lum[size][1];
  950. if (size > 0) {
  951. uni_code<<=size; uni_code|=l;
  952. uni_len+=size;
  953. if (size > 8){
  954. uni_code<<=1; uni_code|=1;
  955. uni_len++;
  956. }
  957. }
  958. uni_DCtab_lum_bits[level+256]= uni_code;
  959. uni_DCtab_lum_len [level+256]= uni_len;
  960. /* chrominance */
  961. uni_code= ff_mpeg4_DCtab_chrom[size][0];
  962. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  963. if (size > 0) {
  964. uni_code<<=size; uni_code|=l;
  965. uni_len+=size;
  966. if (size > 8){
  967. uni_code<<=1; uni_code|=1;
  968. uni_len++;
  969. }
  970. }
  971. uni_DCtab_chrom_bits[level+256]= uni_code;
  972. uni_DCtab_chrom_len [level+256]= uni_len;
  973. }
  974. }
  975. static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab,
  976. uint8_t *len_tab)
  977. {
  978. int slevel, run, last;
  979. assert(MAX_LEVEL >= 64);
  980. assert(MAX_RUN >= 63);
  981. for(slevel=-64; slevel<64; slevel++){
  982. if(slevel==0) continue;
  983. for(run=0; run<64; run++){
  984. for(last=0; last<=1; last++){
  985. const int index= UNI_MPEG4_ENC_INDEX(last, run, slevel+64);
  986. int level= slevel < 0 ? -slevel : slevel;
  987. int sign= slevel < 0 ? 1 : 0;
  988. int bits, len, code;
  989. int level1, run1;
  990. len_tab[index]= 100;
  991. /* ESC0 */
  992. code= get_rl_index(rl, last, run, level);
  993. bits= rl->table_vlc[code][0];
  994. len= rl->table_vlc[code][1];
  995. bits=bits*2+sign; len++;
  996. if(code!=rl->n && len < len_tab[index]){
  997. bits_tab[index]= bits;
  998. len_tab [index]= len;
  999. }
  1000. /* ESC1 */
  1001. bits= rl->table_vlc[rl->n][0];
  1002. len= rl->table_vlc[rl->n][1];
  1003. bits=bits*2; len++; //esc1
  1004. level1= level - rl->max_level[last][run];
  1005. if(level1>0){
  1006. code= get_rl_index(rl, last, run, level1);
  1007. bits<<= rl->table_vlc[code][1];
  1008. len += rl->table_vlc[code][1];
  1009. bits += rl->table_vlc[code][0];
  1010. bits=bits*2+sign; len++;
  1011. if(code!=rl->n && len < len_tab[index]){
  1012. bits_tab[index]= bits;
  1013. len_tab [index]= len;
  1014. }
  1015. }
  1016. /* ESC2 */
  1017. bits= rl->table_vlc[rl->n][0];
  1018. len= rl->table_vlc[rl->n][1];
  1019. bits=bits*4+2; len+=2; //esc2
  1020. run1 = run - rl->max_run[last][level] - 1;
  1021. if(run1>=0){
  1022. code= get_rl_index(rl, last, run1, level);
  1023. bits<<= rl->table_vlc[code][1];
  1024. len += rl->table_vlc[code][1];
  1025. bits += rl->table_vlc[code][0];
  1026. bits=bits*2+sign; len++;
  1027. if(code!=rl->n && len < len_tab[index]){
  1028. bits_tab[index]= bits;
  1029. len_tab [index]= len;
  1030. }
  1031. }
  1032. /* ESC3 */
  1033. bits= rl->table_vlc[rl->n][0];
  1034. len = rl->table_vlc[rl->n][1];
  1035. bits=bits*4+3; len+=2; //esc3
  1036. bits=bits*2+last; len++;
  1037. bits=bits*64+run; len+=6;
  1038. bits=bits*2+1; len++; //marker
  1039. bits=bits*4096+(slevel&0xfff); len+=12;
  1040. bits=bits*2+1; len++; //marker
  1041. if(len < len_tab[index]){
  1042. bits_tab[index]= bits;
  1043. len_tab [index]= len;
  1044. }
  1045. }
  1046. }
  1047. }
  1048. }
  1049. static av_cold int encode_init(AVCodecContext *avctx)
  1050. {
  1051. MpegEncContext *s = avctx->priv_data;
  1052. int ret;
  1053. static int done = 0;
  1054. if((ret=ff_MPV_encode_init(avctx)) < 0)
  1055. return ret;
  1056. if (!done) {
  1057. done = 1;
  1058. init_uni_dc_tab();
  1059. ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1060. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1061. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1062. }
  1063. s->min_qcoeff= -2048;
  1064. s->max_qcoeff= 2047;
  1065. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1066. s->intra_ac_vlc_last_length= uni_mpeg4_intra_rl_len + 128*64;
  1067. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1068. s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64;
  1069. s->luma_dc_vlc_length= uni_DCtab_lum_len;
  1070. s->ac_esc_length= 7+2+1+6+1+12+1;
  1071. s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
  1072. s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
  1073. if(s->flags & CODEC_FLAG_GLOBAL_HEADER){
  1074. s->avctx->extradata= av_malloc(1024);
  1075. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1076. if(!(s->workaround_bugs & FF_BUG_MS))
  1077. mpeg4_encode_visual_object_header(s);
  1078. mpeg4_encode_vol_header(s, 0, 0);
  1079. // ff_mpeg4_stuffing(&s->pb); ?
  1080. flush_put_bits(&s->pb);
  1081. s->avctx->extradata_size= (put_bits_count(&s->pb)+7)>>3;
  1082. }
  1083. return 0;
  1084. }
  1085. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1086. {
  1087. uint8_t *start= put_bits_ptr(&s->pb);
  1088. uint8_t *end= s->pb.buf_end;
  1089. int size= end - start;
  1090. int pb_size = (((intptr_t)start + size/3)&(~3)) - (intptr_t)start;
  1091. int tex_size= (size - 2*pb_size)&(~3);
  1092. set_put_bits_buffer_size(&s->pb, pb_size);
  1093. init_put_bits(&s->tex_pb, start + pb_size , tex_size);
  1094. init_put_bits(&s->pb2 , start + pb_size + tex_size, pb_size);
  1095. }
  1096. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1097. {
  1098. const int pb2_len = put_bits_count(&s->pb2 );
  1099. const int tex_pb_len= put_bits_count(&s->tex_pb);
  1100. const int bits= put_bits_count(&s->pb);
  1101. if(s->pict_type==AV_PICTURE_TYPE_I){
  1102. put_bits(&s->pb, 19, DC_MARKER);
  1103. s->misc_bits+=19 + pb2_len + bits - s->last_bits;
  1104. s->i_tex_bits+= tex_pb_len;
  1105. }else{
  1106. put_bits(&s->pb, 17, MOTION_MARKER);
  1107. s->misc_bits+=17 + pb2_len;
  1108. s->mv_bits+= bits - s->last_bits;
  1109. s->p_tex_bits+= tex_pb_len;
  1110. }
  1111. flush_put_bits(&s->pb2);
  1112. flush_put_bits(&s->tex_pb);
  1113. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1114. avpriv_copy_bits(&s->pb, s->pb2.buf , pb2_len);
  1115. avpriv_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1116. s->last_bits= put_bits_count(&s->pb);
  1117. }
  1118. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1119. {
  1120. int mb_num_bits= av_log2(s->mb_num - 1) + 1;
  1121. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1122. put_bits(&s->pb, 1, 1);
  1123. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y*s->mb_width);
  1124. put_bits(&s->pb, s->quant_precision, s->qscale);
  1125. put_bits(&s->pb, 1, 0); /* no HEC */
  1126. }
  1127. #define OFFSET(x) offsetof(MpegEncContext, x)
  1128. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1129. static const AVOption options[] = {
  1130. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1131. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  1132. FF_MPV_COMMON_OPTS
  1133. { NULL },
  1134. };
  1135. static const AVClass mpeg4enc_class = {
  1136. .class_name = "MPEG4 encoder",
  1137. .item_name = av_default_item_name,
  1138. .option = options,
  1139. .version = LIBAVUTIL_VERSION_INT,
  1140. };
  1141. AVCodec ff_mpeg4_encoder = {
  1142. .name = "mpeg4",
  1143. .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1144. .type = AVMEDIA_TYPE_VIDEO,
  1145. .id = AV_CODEC_ID_MPEG4,
  1146. .priv_data_size = sizeof(MpegEncContext),
  1147. .init = encode_init,
  1148. .encode2 = ff_MPV_encode_picture,
  1149. .close = ff_MPV_encode_end,
  1150. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  1151. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  1152. .priv_class = &mpeg4enc_class,
  1153. };