You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1317 lines
46KB

  1. /*
  2. * MPEG4 encoder.
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/log.h"
  23. #include "libavutil/opt.h"
  24. #include "mpegvideo.h"
  25. #include "h263.h"
  26. #include "mpeg4video.h"
  27. //The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  28. //differences in mpeg4. Unified in the sense that the specification specifies
  29. //this encoding in several steps.
  30. static uint8_t uni_DCtab_lum_len[512];
  31. static uint8_t uni_DCtab_chrom_len[512];
  32. static uint16_t uni_DCtab_lum_bits[512];
  33. static uint16_t uni_DCtab_chrom_bits[512];
  34. //unified encoding tables for run length encoding of coefficients
  35. //unified in the sense that the specification specifies the encoding in several steps.
  36. static uint32_t uni_mpeg4_intra_rl_bits[64*64*2*2];
  37. static uint8_t uni_mpeg4_intra_rl_len [64*64*2*2];
  38. static uint32_t uni_mpeg4_inter_rl_bits[64*64*2*2];
  39. static uint8_t uni_mpeg4_inter_rl_len [64*64*2*2];
  40. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128 + (run)*256 + (level))
  41. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run) + (level)*64)
  42. #define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
  43. /* mpeg4
  44. inter
  45. max level: 24/6
  46. max run: 53/63
  47. intra
  48. max level: 53/16
  49. max run: 29/41
  50. */
  51. /**
  52. * Return the number of bits that encoding the 8x8 block in block would need.
  53. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  54. */
  55. static inline int get_block_rate(MpegEncContext * s, DCTELEM block[64], int block_last_index, uint8_t scantable[64]){
  56. int last=0;
  57. int j;
  58. int rate=0;
  59. for(j=1; j<=block_last_index; j++){
  60. const int index= scantable[j];
  61. int level= block[index];
  62. if(level){
  63. level+= 64;
  64. if((level&(~127)) == 0){
  65. if(j<block_last_index) rate+= s->intra_ac_vlc_length [UNI_AC_ENC_INDEX(j-last-1, level)];
  66. else rate+= s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j-last-1, level)];
  67. }else
  68. rate += s->ac_esc_length;
  69. last= j;
  70. }
  71. }
  72. return rate;
  73. }
  74. /**
  75. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  76. * This function also restores s->block_last_index.
  77. * @param[in,out] block MB coefficients, these will be restored
  78. * @param[in] dir ac prediction direction for each 8x8 block
  79. * @param[out] st scantable for each 8x8 block
  80. * @param[in] zigzag_last_index index refering to the last non zero coefficient in zigzag order
  81. */
  82. static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6])
  83. {
  84. int i, n;
  85. memcpy(s->block_last_index, zigzag_last_index, sizeof(int)*6);
  86. for(n=0; n<6; n++){
  87. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  88. st[n]= s->intra_scantable.permutated;
  89. if(dir[n]){
  90. /* top prediction */
  91. for(i=1; i<8; i++){
  92. block[n][s->dsp.idct_permutation[i ]] = ac_val[i+8];
  93. }
  94. }else{
  95. /* left prediction */
  96. for(i=1; i<8; i++){
  97. block[n][s->dsp.idct_permutation[i<<3]]= ac_val[i ];
  98. }
  99. }
  100. }
  101. }
  102. /**
  103. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in mpeg4.
  104. * This function will also update s->block_last_index and s->ac_val.
  105. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  106. * @param[in] dir ac prediction direction for each 8x8 block
  107. * @param[out] st scantable for each 8x8 block
  108. * @param[out] zigzag_last_index index refering to the last non zero coefficient in zigzag order
  109. */
  110. static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6])
  111. {
  112. int score= 0;
  113. int i, n;
  114. int8_t * const qscale_table = s->current_picture.f.qscale_table;
  115. memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
  116. for(n=0; n<6; n++){
  117. int16_t *ac_val, *ac_val1;
  118. score -= get_block_rate(s, block[n], s->block_last_index[n], s->intra_scantable.permutated);
  119. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  120. ac_val1= ac_val;
  121. if(dir[n]){
  122. const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
  123. /* top prediction */
  124. ac_val-= s->block_wrap[n]*16;
  125. if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
  126. /* same qscale */
  127. for(i=1; i<8; i++){
  128. const int level= block[n][s->dsp.idct_permutation[i ]];
  129. block[n][s->dsp.idct_permutation[i ]] = level - ac_val[i+8];
  130. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  131. ac_val1[i+8]= level;
  132. }
  133. }else{
  134. /* different qscale, we must rescale */
  135. for(i=1; i<8; i++){
  136. const int level= block[n][s->dsp.idct_permutation[i ]];
  137. block[n][s->dsp.idct_permutation[i ]] = level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale);
  138. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  139. ac_val1[i+8]= level;
  140. }
  141. }
  142. st[n]= s->intra_h_scantable.permutated;
  143. }else{
  144. const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
  145. /* left prediction */
  146. ac_val-= 16;
  147. if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
  148. /* same qscale */
  149. for(i=1; i<8; i++){
  150. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  151. block[n][s->dsp.idct_permutation[i<<3]]= level - ac_val[i];
  152. ac_val1[i ]= level;
  153. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  154. }
  155. }else{
  156. /* different qscale, we must rescale */
  157. for(i=1; i<8; i++){
  158. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  159. block[n][s->dsp.idct_permutation[i<<3]]= level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale);
  160. ac_val1[i ]= level;
  161. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  162. }
  163. }
  164. st[n]= s->intra_v_scantable.permutated;
  165. }
  166. for(i=63; i>0; i--) //FIXME optimize
  167. if(block[n][ st[n][i] ]) break;
  168. s->block_last_index[n]= i;
  169. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  170. }
  171. if(score < 0){
  172. return 1;
  173. }else{
  174. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  175. return 0;
  176. }
  177. }
  178. /**
  179. * modify mb_type & qscale so that encoding is acually possible in mpeg4
  180. */
  181. void ff_clean_mpeg4_qscales(MpegEncContext *s){
  182. int i;
  183. int8_t * const qscale_table = s->current_picture.f.qscale_table;
  184. ff_clean_h263_qscales(s);
  185. if(s->pict_type== AV_PICTURE_TYPE_B){
  186. int odd=0;
  187. /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
  188. for(i=0; i<s->mb_num; i++){
  189. int mb_xy= s->mb_index2xy[i];
  190. odd += qscale_table[mb_xy]&1;
  191. }
  192. if(2*odd > s->mb_num) odd=1;
  193. else odd=0;
  194. for(i=0; i<s->mb_num; i++){
  195. int mb_xy= s->mb_index2xy[i];
  196. if((qscale_table[mb_xy]&1) != odd)
  197. qscale_table[mb_xy]++;
  198. if(qscale_table[mb_xy] > 31)
  199. qscale_table[mb_xy]= 31;
  200. }
  201. for(i=1; i<s->mb_num; i++){
  202. int mb_xy= s->mb_index2xy[i];
  203. if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){
  204. s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR;
  205. }
  206. }
  207. }
  208. }
  209. /**
  210. * encodes the dc value.
  211. * @param n block index (0-3 are luma, 4-5 are chroma)
  212. */
  213. static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n)
  214. {
  215. #if 1
  216. /* DC will overflow if level is outside the [-255,255] range. */
  217. level+=256;
  218. if (n < 4) {
  219. /* luminance */
  220. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  221. } else {
  222. /* chrominance */
  223. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  224. }
  225. #else
  226. int size, v;
  227. /* find number of bits */
  228. size = 0;
  229. v = abs(level);
  230. while (v) {
  231. v >>= 1;
  232. size++;
  233. }
  234. if (n < 4) {
  235. /* luminance */
  236. put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
  237. } else {
  238. /* chrominance */
  239. put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
  240. }
  241. /* encode remaining bits */
  242. if (size > 0) {
  243. if (level < 0)
  244. level = (-level) ^ ((1 << size) - 1);
  245. put_bits(&s->pb, size, level);
  246. if (size > 8)
  247. put_bits(&s->pb, 1, 1);
  248. }
  249. #endif
  250. }
  251. static inline int mpeg4_get_dc_length(int level, int n){
  252. if (n < 4) {
  253. return uni_DCtab_lum_len[level + 256];
  254. } else {
  255. return uni_DCtab_chrom_len[level + 256];
  256. }
  257. }
  258. /**
  259. * encodes a 8x8 block
  260. * @param n block index (0-3 are luma, 4-5 are chroma)
  261. */
  262. static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
  263. uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
  264. {
  265. int i, last_non_zero;
  266. uint32_t *bits_tab;
  267. uint8_t *len_tab;
  268. const int last_index = s->block_last_index[n];
  269. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  270. /* mpeg4 based DC predictor */
  271. mpeg4_encode_dc(dc_pb, intra_dc, n);
  272. if(last_index<1) return;
  273. i = 1;
  274. bits_tab= uni_mpeg4_intra_rl_bits;
  275. len_tab = uni_mpeg4_intra_rl_len;
  276. } else {
  277. if(last_index<0) return;
  278. i = 0;
  279. bits_tab= uni_mpeg4_inter_rl_bits;
  280. len_tab = uni_mpeg4_inter_rl_len;
  281. }
  282. /* AC coefs */
  283. last_non_zero = i - 1;
  284. for (; i < last_index; i++) {
  285. int level = block[ scan_table[i] ];
  286. if (level) {
  287. int run = i - last_non_zero - 1;
  288. level+=64;
  289. if((level&(~127)) == 0){
  290. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  291. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  292. }else{ //ESC3
  293. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(0<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  294. }
  295. last_non_zero = i;
  296. }
  297. }
  298. /*if(i<=last_index)*/{
  299. int level = block[ scan_table[i] ];
  300. int run = i - last_non_zero - 1;
  301. level+=64;
  302. if((level&(~127)) == 0){
  303. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  304. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  305. }else{ //ESC3
  306. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(1<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  307. }
  308. }
  309. }
  310. static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
  311. uint8_t *scan_table)
  312. {
  313. int i, last_non_zero;
  314. uint8_t *len_tab;
  315. const int last_index = s->block_last_index[n];
  316. int len=0;
  317. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  318. /* mpeg4 based DC predictor */
  319. len += mpeg4_get_dc_length(intra_dc, n);
  320. if(last_index<1) return len;
  321. i = 1;
  322. len_tab = uni_mpeg4_intra_rl_len;
  323. } else {
  324. if(last_index<0) return 0;
  325. i = 0;
  326. len_tab = uni_mpeg4_inter_rl_len;
  327. }
  328. /* AC coefs */
  329. last_non_zero = i - 1;
  330. for (; i < last_index; i++) {
  331. int level = block[ scan_table[i] ];
  332. if (level) {
  333. int run = i - last_non_zero - 1;
  334. level+=64;
  335. if((level&(~127)) == 0){
  336. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  337. len += len_tab[index];
  338. }else{ //ESC3
  339. len += 7+2+1+6+1+12+1;
  340. }
  341. last_non_zero = i;
  342. }
  343. }
  344. /*if(i<=last_index)*/{
  345. int level = block[ scan_table[i] ];
  346. int run = i - last_non_zero - 1;
  347. level+=64;
  348. if((level&(~127)) == 0){
  349. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  350. len += len_tab[index];
  351. }else{ //ESC3
  352. len += 7+2+1+6+1+12+1;
  353. }
  354. }
  355. return len;
  356. }
  357. static inline void mpeg4_encode_blocks(MpegEncContext * s, DCTELEM block[6][64], int intra_dc[6],
  358. uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb){
  359. int i;
  360. if(scan_table){
  361. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  362. for (i = 0; i < 6; i++) {
  363. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, intra_dc[i], scan_table[i]));
  364. }
  365. }else{
  366. /* encode each block */
  367. for (i = 0; i < 6; i++) {
  368. mpeg4_encode_block(s, block[i], i, intra_dc[i], scan_table[i], dc_pb, ac_pb);
  369. }
  370. }
  371. }else{
  372. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  373. for (i = 0; i < 6; i++) {
  374. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, 0, s->intra_scantable.permutated));
  375. }
  376. }else{
  377. /* encode each block */
  378. for (i = 0; i < 6; i++) {
  379. mpeg4_encode_block(s, block[i], i, 0, s->intra_scantable.permutated, dc_pb, ac_pb);
  380. }
  381. }
  382. }
  383. }
  384. //FIXME this is duplicated to h263.c
  385. static const int dquant_code[5]= {1,0,9,2,3};
  386. void mpeg4_encode_mb(MpegEncContext * s,
  387. DCTELEM block[6][64],
  388. int motion_x, int motion_y)
  389. {
  390. int cbpc, cbpy, pred_x, pred_y;
  391. PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  392. PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb;
  393. PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb;
  394. const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  395. if (!s->mb_intra) {
  396. int i, cbp;
  397. if(s->pict_type==AV_PICTURE_TYPE_B){
  398. static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
  399. int mb_type= mb_type_table[s->mv_dir];
  400. if(s->mb_x==0){
  401. for(i=0; i<2; i++){
  402. s->last_mv[i][0][0]=
  403. s->last_mv[i][0][1]=
  404. s->last_mv[i][1][0]=
  405. s->last_mv[i][1][1]= 0;
  406. }
  407. }
  408. assert(s->dquant>=-2 && s->dquant<=2);
  409. assert((s->dquant&1)==0);
  410. assert(mb_type>=0);
  411. /* nothing to do if this MB was skipped in the next P Frame */
  412. if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
  413. s->skip_count++;
  414. s->mv[0][0][0]=
  415. s->mv[0][0][1]=
  416. s->mv[1][0][0]=
  417. s->mv[1][0][1]= 0;
  418. s->mv_dir= MV_DIR_FORWARD; //doesn't matter
  419. s->qscale -= s->dquant;
  420. // s->mb_skipped=1;
  421. return;
  422. }
  423. cbp= get_b_cbp(s, block, motion_x, motion_y, mb_type);
  424. if ((cbp | motion_x | motion_y | mb_type) ==0) {
  425. /* direct MB with MV={0,0} */
  426. assert(s->dquant==0);
  427. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  428. if(interleaved_stats){
  429. s->misc_bits++;
  430. s->last_bits++;
  431. }
  432. s->skip_count++;
  433. return;
  434. }
  435. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  436. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge
  437. put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we don't need it :)
  438. if(cbp) put_bits(&s->pb, 6, cbp);
  439. if(cbp && mb_type){
  440. if(s->dquant)
  441. put_bits(&s->pb, 2, (s->dquant>>2)+3);
  442. else
  443. put_bits(&s->pb, 1, 0);
  444. }else
  445. s->qscale -= s->dquant;
  446. if(!s->progressive_sequence){
  447. if(cbp)
  448. put_bits(&s->pb, 1, s->interlaced_dct);
  449. if(mb_type) // not direct mode
  450. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  451. }
  452. if(interleaved_stats){
  453. s->misc_bits+= get_bits_diff(s);
  454. }
  455. if(mb_type == 0){
  456. assert(s->mv_dir & MV_DIRECT);
  457. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  458. s->b_count++;
  459. s->f_count++;
  460. }else{
  461. assert(mb_type > 0 && mb_type < 4);
  462. if(s->mv_type != MV_TYPE_FIELD){
  463. if(s->mv_dir & MV_DIR_FORWARD){
  464. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - s->last_mv[0][0][0],
  465. s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
  466. s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0];
  467. s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1];
  468. s->f_count++;
  469. }
  470. if(s->mv_dir & MV_DIR_BACKWARD){
  471. ff_h263_encode_motion_vector(s, s->mv[1][0][0] - s->last_mv[1][0][0],
  472. s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
  473. s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0];
  474. s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1];
  475. s->b_count++;
  476. }
  477. }else{
  478. if(s->mv_dir & MV_DIR_FORWARD){
  479. put_bits(&s->pb, 1, s->field_select[0][0]);
  480. put_bits(&s->pb, 1, s->field_select[0][1]);
  481. }
  482. if(s->mv_dir & MV_DIR_BACKWARD){
  483. put_bits(&s->pb, 1, s->field_select[1][0]);
  484. put_bits(&s->pb, 1, s->field_select[1][1]);
  485. }
  486. if(s->mv_dir & MV_DIR_FORWARD){
  487. for(i=0; i<2; i++){
  488. ff_h263_encode_motion_vector(s, s->mv[0][i][0] - s->last_mv[0][i][0] ,
  489. s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code);
  490. s->last_mv[0][i][0]= s->mv[0][i][0];
  491. s->last_mv[0][i][1]= s->mv[0][i][1]*2;
  492. }
  493. s->f_count++;
  494. }
  495. if(s->mv_dir & MV_DIR_BACKWARD){
  496. for(i=0; i<2; i++){
  497. ff_h263_encode_motion_vector(s, s->mv[1][i][0] - s->last_mv[1][i][0] ,
  498. s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code);
  499. s->last_mv[1][i][0]= s->mv[1][i][0];
  500. s->last_mv[1][i][1]= s->mv[1][i][1]*2;
  501. }
  502. s->b_count++;
  503. }
  504. }
  505. }
  506. if(interleaved_stats){
  507. s->mv_bits+= get_bits_diff(s);
  508. }
  509. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  510. if(interleaved_stats){
  511. s->p_tex_bits+= get_bits_diff(s);
  512. }
  513. }else{ /* s->pict_type==AV_PICTURE_TYPE_B */
  514. cbp= get_p_cbp(s, block, motion_x, motion_y);
  515. if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
  516. /* check if the B frames can skip it too, as we must skip it if we skip here
  517. why didn't they just compress the skip-mb bits instead of reusing them ?! */
  518. if(s->max_b_frames>0){
  519. int i;
  520. int x,y, offset;
  521. uint8_t *p_pic;
  522. x= s->mb_x*16;
  523. y= s->mb_y*16;
  524. offset= x + y*s->linesize;
  525. p_pic = s->new_picture.f.data[0] + offset;
  526. s->mb_skipped=1;
  527. for(i=0; i<s->max_b_frames; i++){
  528. uint8_t *b_pic;
  529. int diff;
  530. Picture *pic= s->reordered_input_picture[i+1];
  531. if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
  532. break;
  533. b_pic = pic->f.data[0] + offset;
  534. if (pic->f.type != FF_BUFFER_TYPE_SHARED)
  535. b_pic+= INPLACE_OFFSET;
  536. if(x+16 > s->width || y+16 > s->height){
  537. int x1,y1;
  538. int xe= FFMIN(16, s->width - x);
  539. int ye= FFMIN(16, s->height- y);
  540. diff=0;
  541. for(y1=0; y1<ye; y1++){
  542. for(x1=0; x1<xe; x1++){
  543. diff+= FFABS(p_pic[x1+y1*s->linesize] - b_pic[x1+y1*s->linesize]);
  544. }
  545. }
  546. diff= diff*256/(xe*ye);
  547. }else{
  548. diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  549. }
  550. if(diff>s->qscale*70){ //FIXME check that 70 is optimal
  551. s->mb_skipped=0;
  552. break;
  553. }
  554. }
  555. }else
  556. s->mb_skipped=1;
  557. if(s->mb_skipped==1){
  558. /* skip macroblock */
  559. put_bits(&s->pb, 1, 1);
  560. if(interleaved_stats){
  561. s->misc_bits++;
  562. s->last_bits++;
  563. }
  564. s->skip_count++;
  565. return;
  566. }
  567. }
  568. put_bits(&s->pb, 1, 0); /* mb coded */
  569. cbpc = cbp & 3;
  570. cbpy = cbp >> 2;
  571. cbpy ^= 0xf;
  572. if(s->mv_type==MV_TYPE_16X16){
  573. if(s->dquant) cbpc+= 8;
  574. put_bits(&s->pb,
  575. ff_h263_inter_MCBPC_bits[cbpc],
  576. ff_h263_inter_MCBPC_code[cbpc]);
  577. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  578. if(s->dquant)
  579. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  580. if(!s->progressive_sequence){
  581. if(cbp)
  582. put_bits(pb2, 1, s->interlaced_dct);
  583. put_bits(pb2, 1, 0);
  584. }
  585. if(interleaved_stats){
  586. s->misc_bits+= get_bits_diff(s);
  587. }
  588. /* motion vectors: 16x16 mode */
  589. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  590. ff_h263_encode_motion_vector(s, motion_x - pred_x,
  591. motion_y - pred_y, s->f_code);
  592. }else if(s->mv_type==MV_TYPE_FIELD){
  593. if(s->dquant) cbpc+= 8;
  594. put_bits(&s->pb,
  595. ff_h263_inter_MCBPC_bits[cbpc],
  596. ff_h263_inter_MCBPC_code[cbpc]);
  597. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  598. if(s->dquant)
  599. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  600. assert(!s->progressive_sequence);
  601. if(cbp)
  602. put_bits(pb2, 1, s->interlaced_dct);
  603. put_bits(pb2, 1, 1);
  604. if(interleaved_stats){
  605. s->misc_bits+= get_bits_diff(s);
  606. }
  607. /* motion vectors: 16x8 interlaced mode */
  608. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  609. pred_y /=2;
  610. put_bits(&s->pb, 1, s->field_select[0][0]);
  611. put_bits(&s->pb, 1, s->field_select[0][1]);
  612. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - pred_x,
  613. s->mv[0][0][1] - pred_y, s->f_code);
  614. ff_h263_encode_motion_vector(s, s->mv[0][1][0] - pred_x,
  615. s->mv[0][1][1] - pred_y, s->f_code);
  616. }else{
  617. assert(s->mv_type==MV_TYPE_8X8);
  618. put_bits(&s->pb,
  619. ff_h263_inter_MCBPC_bits[cbpc+16],
  620. ff_h263_inter_MCBPC_code[cbpc+16]);
  621. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  622. if(!s->progressive_sequence){
  623. if(cbp)
  624. put_bits(pb2, 1, s->interlaced_dct);
  625. }
  626. if(interleaved_stats){
  627. s->misc_bits+= get_bits_diff(s);
  628. }
  629. for(i=0; i<4; i++){
  630. /* motion vectors: 8x8 mode*/
  631. h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  632. ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
  633. s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
  634. }
  635. }
  636. if(interleaved_stats){
  637. s->mv_bits+= get_bits_diff(s);
  638. }
  639. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  640. if(interleaved_stats){
  641. s->p_tex_bits+= get_bits_diff(s);
  642. }
  643. s->f_count++;
  644. }
  645. } else {
  646. int cbp;
  647. int dc_diff[6]; //dc values with the dc prediction subtracted
  648. int dir[6]; //prediction direction
  649. int zigzag_last_index[6];
  650. uint8_t *scan_table[6];
  651. int i;
  652. for(i=0; i<6; i++){
  653. dc_diff[i]= ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  654. }
  655. if(s->flags & CODEC_FLAG_AC_PRED){
  656. s->ac_pred= decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  657. }else{
  658. for(i=0; i<6; i++)
  659. scan_table[i]= s->intra_scantable.permutated;
  660. }
  661. /* compute cbp */
  662. cbp = 0;
  663. for (i = 0; i < 6; i++) {
  664. if (s->block_last_index[i] >= 1)
  665. cbp |= 1 << (5 - i);
  666. }
  667. cbpc = cbp & 3;
  668. if (s->pict_type == AV_PICTURE_TYPE_I) {
  669. if(s->dquant) cbpc+=4;
  670. put_bits(&s->pb,
  671. ff_h263_intra_MCBPC_bits[cbpc],
  672. ff_h263_intra_MCBPC_code[cbpc]);
  673. } else {
  674. if(s->dquant) cbpc+=8;
  675. put_bits(&s->pb, 1, 0); /* mb coded */
  676. put_bits(&s->pb,
  677. ff_h263_inter_MCBPC_bits[cbpc + 4],
  678. ff_h263_inter_MCBPC_code[cbpc + 4]);
  679. }
  680. put_bits(pb2, 1, s->ac_pred);
  681. cbpy = cbp >> 2;
  682. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  683. if(s->dquant)
  684. put_bits(dc_pb, 2, dquant_code[s->dquant+2]);
  685. if(!s->progressive_sequence){
  686. put_bits(dc_pb, 1, s->interlaced_dct);
  687. }
  688. if(interleaved_stats){
  689. s->misc_bits+= get_bits_diff(s);
  690. }
  691. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  692. if(interleaved_stats){
  693. s->i_tex_bits+= get_bits_diff(s);
  694. }
  695. s->i_count++;
  696. /* restore ac coeffs & last_index stuff if we messed them up with the prediction */
  697. if(s->ac_pred)
  698. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  699. }
  700. }
  701. /**
  702. * add mpeg4 stuffing bits (01...1)
  703. */
  704. void ff_mpeg4_stuffing(PutBitContext * pbc)
  705. {
  706. int length;
  707. put_bits(pbc, 1, 0);
  708. length= (-put_bits_count(pbc))&7;
  709. if(length) put_bits(pbc, length, (1<<length)-1);
  710. }
  711. /* must be called before writing the header */
  712. void ff_set_mpeg4_time(MpegEncContext * s){
  713. if(s->pict_type==AV_PICTURE_TYPE_B){
  714. ff_mpeg4_init_direct_mv(s);
  715. }else{
  716. s->last_time_base= s->time_base;
  717. s->time_base= FFUDIV(s->time, s->avctx->time_base.den);
  718. }
  719. }
  720. static void mpeg4_encode_gop_header(MpegEncContext * s){
  721. int hours, minutes, seconds;
  722. int64_t time;
  723. put_bits(&s->pb, 16, 0);
  724. put_bits(&s->pb, 16, GOP_STARTCODE);
  725. time = s->current_picture_ptr->f.pts;
  726. if(s->reordered_input_picture[1])
  727. time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
  728. time= time*s->avctx->time_base.num;
  729. s->last_time_base= FFUDIV(time, s->avctx->time_base.den);
  730. seconds= FFUDIV(time, s->avctx->time_base.den);
  731. minutes= FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60);
  732. hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60);
  733. hours = FFUMOD(hours , 24);
  734. put_bits(&s->pb, 5, hours);
  735. put_bits(&s->pb, 6, minutes);
  736. put_bits(&s->pb, 1, 1);
  737. put_bits(&s->pb, 6, seconds);
  738. put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP));
  739. put_bits(&s->pb, 1, 0); //broken link == NO
  740. ff_mpeg4_stuffing(&s->pb);
  741. }
  742. static void mpeg4_encode_visual_object_header(MpegEncContext * s){
  743. int profile_and_level_indication;
  744. int vo_ver_id;
  745. if(s->avctx->profile != FF_PROFILE_UNKNOWN){
  746. profile_and_level_indication = s->avctx->profile << 4;
  747. }else if(s->max_b_frames || s->quarter_sample){
  748. profile_and_level_indication= 0xF0; // adv simple
  749. }else{
  750. profile_and_level_indication= 0x00; // simple
  751. }
  752. if(s->avctx->level != FF_LEVEL_UNKNOWN){
  753. profile_and_level_indication |= s->avctx->level;
  754. }else{
  755. profile_and_level_indication |= 1; //level 1
  756. }
  757. if(profile_and_level_indication>>4 == 0xF){
  758. vo_ver_id= 5;
  759. }else{
  760. vo_ver_id= 1;
  761. }
  762. //FIXME levels
  763. put_bits(&s->pb, 16, 0);
  764. put_bits(&s->pb, 16, VOS_STARTCODE);
  765. put_bits(&s->pb, 8, profile_and_level_indication);
  766. put_bits(&s->pb, 16, 0);
  767. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  768. put_bits(&s->pb, 1, 1);
  769. put_bits(&s->pb, 4, vo_ver_id);
  770. put_bits(&s->pb, 3, 1); //priority
  771. put_bits(&s->pb, 4, 1); //visual obj type== video obj
  772. put_bits(&s->pb, 1, 0); //video signal type == no clue //FIXME
  773. ff_mpeg4_stuffing(&s->pb);
  774. }
  775. static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number)
  776. {
  777. int vo_ver_id;
  778. if (!CONFIG_MPEG4_ENCODER) return;
  779. if(s->max_b_frames || s->quarter_sample){
  780. vo_ver_id= 5;
  781. s->vo_type= ADV_SIMPLE_VO_TYPE;
  782. }else{
  783. vo_ver_id= 1;
  784. s->vo_type= SIMPLE_VO_TYPE;
  785. }
  786. put_bits(&s->pb, 16, 0);
  787. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  788. put_bits(&s->pb, 16, 0);
  789. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  790. put_bits(&s->pb, 1, 0); /* random access vol */
  791. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  792. if(s->workaround_bugs & FF_BUG_MS) {
  793. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  794. } else {
  795. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  796. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  797. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  798. }
  799. s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  800. put_bits(&s->pb, 4, s->aspect_ratio_info);/* aspect ratio info */
  801. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
  802. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  803. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  804. }
  805. if(s->workaround_bugs & FF_BUG_MS) { //
  806. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  807. } else {
  808. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  809. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  810. put_bits(&s->pb, 1, s->low_delay);
  811. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  812. }
  813. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  814. put_bits(&s->pb, 1, 1); /* marker bit */
  815. put_bits(&s->pb, 16, s->avctx->time_base.den);
  816. if (s->time_increment_bits < 1)
  817. s->time_increment_bits = 1;
  818. put_bits(&s->pb, 1, 1); /* marker bit */
  819. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  820. put_bits(&s->pb, 1, 1); /* marker bit */
  821. put_bits(&s->pb, 13, s->width); /* vol width */
  822. put_bits(&s->pb, 1, 1); /* marker bit */
  823. put_bits(&s->pb, 13, s->height); /* vol height */
  824. put_bits(&s->pb, 1, 1); /* marker bit */
  825. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  826. put_bits(&s->pb, 1, 1); /* obmc disable */
  827. if (vo_ver_id == 1) {
  828. put_bits(&s->pb, 1, s->vol_sprite_usage); /* sprite enable */
  829. }else{
  830. put_bits(&s->pb, 2, s->vol_sprite_usage); /* sprite enable */
  831. }
  832. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  833. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type= (0=h263 style)*/
  834. if(s->mpeg_quant){
  835. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  836. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  837. }
  838. if (vo_ver_id != 1)
  839. put_bits(&s->pb, 1, s->quarter_sample);
  840. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  841. s->resync_marker= s->rtp_mode;
  842. put_bits(&s->pb, 1, s->resync_marker ? 0 : 1);/* resync marker disable */
  843. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  844. if(s->data_partitioning){
  845. put_bits(&s->pb, 1, 0); /* no rvlc */
  846. }
  847. if (vo_ver_id != 1){
  848. put_bits(&s->pb, 1, 0); /* newpred */
  849. put_bits(&s->pb, 1, 0); /* reduced res vop */
  850. }
  851. put_bits(&s->pb, 1, 0); /* scalability */
  852. ff_mpeg4_stuffing(&s->pb);
  853. /* user data */
  854. if(!(s->flags & CODEC_FLAG_BITEXACT)){
  855. put_bits(&s->pb, 16, 0);
  856. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  857. ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  858. }
  859. }
  860. /* write mpeg4 VOP header */
  861. void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
  862. {
  863. int time_incr;
  864. int time_div, time_mod;
  865. if(s->pict_type==AV_PICTURE_TYPE_I){
  866. if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
  867. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
  868. mpeg4_encode_visual_object_header(s);
  869. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number==0) //HACK, the reference sw is buggy
  870. mpeg4_encode_vol_header(s, 0, 0);
  871. }
  872. if(!(s->workaround_bugs & FF_BUG_MS))
  873. mpeg4_encode_gop_header(s);
  874. }
  875. s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B;
  876. put_bits(&s->pb, 16, 0); /* vop header */
  877. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  878. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  879. time_div= FFUDIV(s->time, s->avctx->time_base.den);
  880. time_mod= FFUMOD(s->time, s->avctx->time_base.den);
  881. time_incr= time_div - s->last_time_base;
  882. assert(time_incr >= 0);
  883. while(time_incr--)
  884. put_bits(&s->pb, 1, 1);
  885. put_bits(&s->pb, 1, 0);
  886. put_bits(&s->pb, 1, 1); /* marker */
  887. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  888. put_bits(&s->pb, 1, 1); /* marker */
  889. put_bits(&s->pb, 1, 1); /* vop coded */
  890. if ( s->pict_type == AV_PICTURE_TYPE_P
  891. || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) {
  892. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  893. }
  894. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  895. if(!s->progressive_sequence){
  896. put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
  897. put_bits(&s->pb, 1, s->alternate_scan);
  898. }
  899. //FIXME sprite stuff
  900. put_bits(&s->pb, 5, s->qscale);
  901. if (s->pict_type != AV_PICTURE_TYPE_I)
  902. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  903. if (s->pict_type == AV_PICTURE_TYPE_B)
  904. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  905. }
  906. static void init_uni_dc_tab(void)
  907. {
  908. int level, uni_code, uni_len;
  909. for(level=-256; level<256; level++){
  910. int size, v, l;
  911. /* find number of bits */
  912. size = 0;
  913. v = abs(level);
  914. while (v) {
  915. v >>= 1;
  916. size++;
  917. }
  918. if (level < 0)
  919. l= (-level) ^ ((1 << size) - 1);
  920. else
  921. l= level;
  922. /* luminance */
  923. uni_code= ff_mpeg4_DCtab_lum[size][0];
  924. uni_len = ff_mpeg4_DCtab_lum[size][1];
  925. if (size > 0) {
  926. uni_code<<=size; uni_code|=l;
  927. uni_len+=size;
  928. if (size > 8){
  929. uni_code<<=1; uni_code|=1;
  930. uni_len++;
  931. }
  932. }
  933. uni_DCtab_lum_bits[level+256]= uni_code;
  934. uni_DCtab_lum_len [level+256]= uni_len;
  935. /* chrominance */
  936. uni_code= ff_mpeg4_DCtab_chrom[size][0];
  937. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  938. if (size > 0) {
  939. uni_code<<=size; uni_code|=l;
  940. uni_len+=size;
  941. if (size > 8){
  942. uni_code<<=1; uni_code|=1;
  943. uni_len++;
  944. }
  945. }
  946. uni_DCtab_chrom_bits[level+256]= uni_code;
  947. uni_DCtab_chrom_len [level+256]= uni_len;
  948. }
  949. }
  950. static void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab){
  951. int slevel, run, last;
  952. assert(MAX_LEVEL >= 64);
  953. assert(MAX_RUN >= 63);
  954. for(slevel=-64; slevel<64; slevel++){
  955. if(slevel==0) continue;
  956. for(run=0; run<64; run++){
  957. for(last=0; last<=1; last++){
  958. const int index= UNI_MPEG4_ENC_INDEX(last, run, slevel+64);
  959. int level= slevel < 0 ? -slevel : slevel;
  960. int sign= slevel < 0 ? 1 : 0;
  961. int bits, len, code;
  962. int level1, run1;
  963. len_tab[index]= 100;
  964. /* ESC0 */
  965. code= get_rl_index(rl, last, run, level);
  966. bits= rl->table_vlc[code][0];
  967. len= rl->table_vlc[code][1];
  968. bits=bits*2+sign; len++;
  969. if(code!=rl->n && len < len_tab[index]){
  970. bits_tab[index]= bits;
  971. len_tab [index]= len;
  972. }
  973. /* ESC1 */
  974. bits= rl->table_vlc[rl->n][0];
  975. len= rl->table_vlc[rl->n][1];
  976. bits=bits*2; len++; //esc1
  977. level1= level - rl->max_level[last][run];
  978. if(level1>0){
  979. code= get_rl_index(rl, last, run, level1);
  980. bits<<= rl->table_vlc[code][1];
  981. len += rl->table_vlc[code][1];
  982. bits += rl->table_vlc[code][0];
  983. bits=bits*2+sign; len++;
  984. if(code!=rl->n && len < len_tab[index]){
  985. bits_tab[index]= bits;
  986. len_tab [index]= len;
  987. }
  988. }
  989. /* ESC2 */
  990. bits= rl->table_vlc[rl->n][0];
  991. len= rl->table_vlc[rl->n][1];
  992. bits=bits*4+2; len+=2; //esc2
  993. run1 = run - rl->max_run[last][level] - 1;
  994. if(run1>=0){
  995. code= get_rl_index(rl, last, run1, level);
  996. bits<<= rl->table_vlc[code][1];
  997. len += rl->table_vlc[code][1];
  998. bits += rl->table_vlc[code][0];
  999. bits=bits*2+sign; len++;
  1000. if(code!=rl->n && len < len_tab[index]){
  1001. bits_tab[index]= bits;
  1002. len_tab [index]= len;
  1003. }
  1004. }
  1005. /* ESC3 */
  1006. bits= rl->table_vlc[rl->n][0];
  1007. len = rl->table_vlc[rl->n][1];
  1008. bits=bits*4+3; len+=2; //esc3
  1009. bits=bits*2+last; len++;
  1010. bits=bits*64+run; len+=6;
  1011. bits=bits*2+1; len++; //marker
  1012. bits=bits*4096+(slevel&0xfff); len+=12;
  1013. bits=bits*2+1; len++; //marker
  1014. if(len < len_tab[index]){
  1015. bits_tab[index]= bits;
  1016. len_tab [index]= len;
  1017. }
  1018. }
  1019. }
  1020. }
  1021. }
  1022. static av_cold int encode_init(AVCodecContext *avctx)
  1023. {
  1024. MpegEncContext *s = avctx->priv_data;
  1025. int ret;
  1026. static int done = 0;
  1027. if((ret=MPV_encode_init(avctx)) < 0)
  1028. return ret;
  1029. if (!done) {
  1030. done = 1;
  1031. init_uni_dc_tab();
  1032. init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1033. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1034. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1035. }
  1036. s->min_qcoeff= -2048;
  1037. s->max_qcoeff= 2047;
  1038. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1039. s->intra_ac_vlc_last_length= uni_mpeg4_intra_rl_len + 128*64;
  1040. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1041. s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64;
  1042. s->luma_dc_vlc_length= uni_DCtab_lum_len;
  1043. s->chroma_dc_vlc_length= uni_DCtab_chrom_len;
  1044. s->ac_esc_length= 7+2+1+6+1+12+1;
  1045. s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
  1046. s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
  1047. if(s->flags & CODEC_FLAG_GLOBAL_HEADER){
  1048. s->avctx->extradata= av_malloc(1024);
  1049. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1050. if(!(s->workaround_bugs & FF_BUG_MS))
  1051. mpeg4_encode_visual_object_header(s);
  1052. mpeg4_encode_vol_header(s, 0, 0);
  1053. // ff_mpeg4_stuffing(&s->pb); ?
  1054. flush_put_bits(&s->pb);
  1055. s->avctx->extradata_size= (put_bits_count(&s->pb)+7)>>3;
  1056. }
  1057. return 0;
  1058. }
  1059. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1060. {
  1061. uint8_t *start= put_bits_ptr(&s->pb);
  1062. uint8_t *end= s->pb.buf_end;
  1063. int size= end - start;
  1064. int pb_size = (((intptr_t)start + size/3)&(~3)) - (intptr_t)start;
  1065. int tex_size= (size - 2*pb_size)&(~3);
  1066. set_put_bits_buffer_size(&s->pb, pb_size);
  1067. init_put_bits(&s->tex_pb, start + pb_size , tex_size);
  1068. init_put_bits(&s->pb2 , start + pb_size + tex_size, pb_size);
  1069. }
  1070. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1071. {
  1072. const int pb2_len = put_bits_count(&s->pb2 );
  1073. const int tex_pb_len= put_bits_count(&s->tex_pb);
  1074. const int bits= put_bits_count(&s->pb);
  1075. if(s->pict_type==AV_PICTURE_TYPE_I){
  1076. put_bits(&s->pb, 19, DC_MARKER);
  1077. s->misc_bits+=19 + pb2_len + bits - s->last_bits;
  1078. s->i_tex_bits+= tex_pb_len;
  1079. }else{
  1080. put_bits(&s->pb, 17, MOTION_MARKER);
  1081. s->misc_bits+=17 + pb2_len;
  1082. s->mv_bits+= bits - s->last_bits;
  1083. s->p_tex_bits+= tex_pb_len;
  1084. }
  1085. flush_put_bits(&s->pb2);
  1086. flush_put_bits(&s->tex_pb);
  1087. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1088. ff_copy_bits(&s->pb, s->pb2.buf , pb2_len);
  1089. ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1090. s->last_bits= put_bits_count(&s->pb);
  1091. }
  1092. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1093. {
  1094. int mb_num_bits= av_log2(s->mb_num - 1) + 1;
  1095. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1096. put_bits(&s->pb, 1, 1);
  1097. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y*s->mb_width);
  1098. put_bits(&s->pb, s->quant_precision, s->qscale);
  1099. put_bits(&s->pb, 1, 0); /* no HEC */
  1100. }
  1101. #define OFFSET(x) offsetof(MpegEncContext, x)
  1102. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1103. static const AVOption options[] = {
  1104. { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), FF_OPT_TYPE_INT, { 0 }, 0, 1, VE },
  1105. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), FF_OPT_TYPE_INT, { 0 }, 0, 1, VE },
  1106. { NULL },
  1107. };
  1108. static const AVClass mpeg4enc_class = {
  1109. .class_name = "MPEG4 encoder",
  1110. .item_name = av_default_item_name,
  1111. .option = options,
  1112. .version = LIBAVUTIL_VERSION_INT,
  1113. };
  1114. AVCodec ff_mpeg4_encoder = {
  1115. .name = "mpeg4",
  1116. .type = AVMEDIA_TYPE_VIDEO,
  1117. .id = CODEC_ID_MPEG4,
  1118. .priv_data_size = sizeof(MpegEncContext),
  1119. .init = encode_init,
  1120. .encode = MPV_encode_picture,
  1121. .close = MPV_encode_end,
  1122. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  1123. .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  1124. .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1125. .priv_class = &mpeg4enc_class,
  1126. };