You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1353 lines
46KB

  1. /*
  2. * MPEG4 encoder.
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "mpegvideo.h"
  23. #include "h263.h"
  24. #include "mpeg4video.h"
  25. //The uni_DCtab_* tables below contain unified bits+length tables to encode DC
  26. //differences in mpeg4. Unified in the sense that the specification specifies
  27. //this encoding in several steps.
  28. static uint8_t uni_DCtab_lum_len[512];
  29. static uint8_t uni_DCtab_chrom_len[512];
  30. static uint16_t uni_DCtab_lum_bits[512];
  31. static uint16_t uni_DCtab_chrom_bits[512];
  32. //unified encoding tables for run length encoding of coefficients
  33. //unified in the sense that the specification specifies the encoding in several steps.
  34. static uint32_t uni_mpeg4_intra_rl_bits[64*64*2*2];
  35. static uint8_t uni_mpeg4_intra_rl_len [64*64*2*2];
  36. static uint32_t uni_mpeg4_inter_rl_bits[64*64*2*2];
  37. static uint8_t uni_mpeg4_inter_rl_len [64*64*2*2];
  38. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128 + (run)*256 + (level))
  39. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run) + (level)*64)
  40. #define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
  41. /* mpeg4
  42. inter
  43. max level: 24/6
  44. max run: 53/63
  45. intra
  46. max level: 53/16
  47. max run: 29/41
  48. */
  49. /**
  50. * Return the number of bits that encoding the 8x8 block in block would need.
  51. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
  52. */
  53. static inline int get_block_rate(MpegEncContext * s, DCTELEM block[64], int block_last_index, uint8_t scantable[64]){
  54. int last=0;
  55. int j;
  56. int rate=0;
  57. for(j=1; j<=block_last_index; j++){
  58. const int index= scantable[j];
  59. int level= block[index];
  60. if(level){
  61. level+= 64;
  62. if((level&(~127)) == 0){
  63. if(j<block_last_index) rate+= s->intra_ac_vlc_length [UNI_AC_ENC_INDEX(j-last-1, level)];
  64. else rate+= s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j-last-1, level)];
  65. }else
  66. rate += s->ac_esc_length;
  67. last= j;
  68. }
  69. }
  70. return rate;
  71. }
  72. /**
  73. * Restore the ac coefficients in block that have been changed by decide_ac_pred().
  74. * This function also restores s->block_last_index.
  75. * @param[in,out] block MB coefficients, these will be restored
  76. * @param[in] dir ac prediction direction for each 8x8 block
  77. * @param[out] st scantable for each 8x8 block
  78. * @param[in] zigzag_last_index index refering to the last non zero coefficient in zigzag order
  79. */
  80. static inline void restore_ac_coeffs(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6])
  81. {
  82. int i, n;
  83. memcpy(s->block_last_index, zigzag_last_index, sizeof(int)*6);
  84. for(n=0; n<6; n++){
  85. int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  86. st[n]= s->intra_scantable.permutated;
  87. if(dir[n]){
  88. /* top prediction */
  89. for(i=1; i<8; i++){
  90. block[n][s->dsp.idct_permutation[i ]] = ac_val[i+8];
  91. }
  92. }else{
  93. /* left prediction */
  94. for(i=1; i<8; i++){
  95. block[n][s->dsp.idct_permutation[i<<3]]= ac_val[i ];
  96. }
  97. }
  98. }
  99. }
  100. /**
  101. * Return the optimal value (0 or 1) for the ac_pred element for the given MB in mpeg4.
  102. * This function will also update s->block_last_index and s->ac_val.
  103. * @param[in,out] block MB coefficients, these will be updated if 1 is returned
  104. * @param[in] dir ac prediction direction for each 8x8 block
  105. * @param[out] st scantable for each 8x8 block
  106. * @param[out] zigzag_last_index index refering to the last non zero coefficient in zigzag order
  107. */
  108. static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6])
  109. {
  110. int score= 0;
  111. int i, n;
  112. int8_t * const qscale_table= s->current_picture.qscale_table;
  113. memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
  114. for(n=0; n<6; n++){
  115. int16_t *ac_val, *ac_val1;
  116. score -= get_block_rate(s, block[n], s->block_last_index[n], s->intra_scantable.permutated);
  117. ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
  118. ac_val1= ac_val;
  119. if(dir[n]){
  120. const int xy= s->mb_x + s->mb_y*s->mb_stride - s->mb_stride;
  121. /* top prediction */
  122. ac_val-= s->block_wrap[n]*16;
  123. if(s->mb_y==0 || s->qscale == qscale_table[xy] || n==2 || n==3){
  124. /* same qscale */
  125. for(i=1; i<8; i++){
  126. const int level= block[n][s->dsp.idct_permutation[i ]];
  127. block[n][s->dsp.idct_permutation[i ]] = level - ac_val[i+8];
  128. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  129. ac_val1[i+8]= level;
  130. }
  131. }else{
  132. /* different qscale, we must rescale */
  133. for(i=1; i<8; i++){
  134. const int level= block[n][s->dsp.idct_permutation[i ]];
  135. block[n][s->dsp.idct_permutation[i ]] = level - ROUNDED_DIV(ac_val[i + 8]*qscale_table[xy], s->qscale);
  136. ac_val1[i ]= block[n][s->dsp.idct_permutation[i<<3]];
  137. ac_val1[i+8]= level;
  138. }
  139. }
  140. st[n]= s->intra_h_scantable.permutated;
  141. }else{
  142. const int xy= s->mb_x-1 + s->mb_y*s->mb_stride;
  143. /* left prediction */
  144. ac_val-= 16;
  145. if(s->mb_x==0 || s->qscale == qscale_table[xy] || n==1 || n==3){
  146. /* same qscale */
  147. for(i=1; i<8; i++){
  148. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  149. block[n][s->dsp.idct_permutation[i<<3]]= level - ac_val[i];
  150. ac_val1[i ]= level;
  151. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  152. }
  153. }else{
  154. /* different qscale, we must rescale */
  155. for(i=1; i<8; i++){
  156. const int level= block[n][s->dsp.idct_permutation[i<<3]];
  157. block[n][s->dsp.idct_permutation[i<<3]]= level - ROUNDED_DIV(ac_val[i]*qscale_table[xy], s->qscale);
  158. ac_val1[i ]= level;
  159. ac_val1[i+8]= block[n][s->dsp.idct_permutation[i ]];
  160. }
  161. }
  162. st[n]= s->intra_v_scantable.permutated;
  163. }
  164. for(i=63; i>0; i--) //FIXME optimize
  165. if(block[n][ st[n][i] ]) break;
  166. s->block_last_index[n]= i;
  167. score += get_block_rate(s, block[n], s->block_last_index[n], st[n]);
  168. }
  169. if(score < 0){
  170. return 1;
  171. }else{
  172. restore_ac_coeffs(s, block, dir, st, zigzag_last_index);
  173. return 0;
  174. }
  175. }
  176. /**
  177. * modify mb_type & qscale so that encoding is acually possible in mpeg4
  178. */
  179. void ff_clean_mpeg4_qscales(MpegEncContext *s){
  180. int i;
  181. int8_t * const qscale_table= s->current_picture.qscale_table;
  182. ff_clean_h263_qscales(s);
  183. if(s->pict_type== FF_B_TYPE){
  184. int odd=0;
  185. /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
  186. for(i=0; i<s->mb_num; i++){
  187. int mb_xy= s->mb_index2xy[i];
  188. odd += qscale_table[mb_xy]&1;
  189. }
  190. if(2*odd > s->mb_num) odd=1;
  191. else odd=0;
  192. for(i=0; i<s->mb_num; i++){
  193. int mb_xy= s->mb_index2xy[i];
  194. if((qscale_table[mb_xy]&1) != odd)
  195. qscale_table[mb_xy]++;
  196. if(qscale_table[mb_xy] > 31)
  197. qscale_table[mb_xy]= 31;
  198. }
  199. for(i=1; i<s->mb_num; i++){
  200. int mb_xy= s->mb_index2xy[i];
  201. if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){
  202. s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR;
  203. }
  204. }
  205. }
  206. }
  207. /**
  208. * encodes the dc value.
  209. * @param n block index (0-3 are luma, 4-5 are chroma)
  210. */
  211. static inline void mpeg4_encode_dc(PutBitContext * s, int level, int n)
  212. {
  213. #if 1
  214. /* DC will overflow if level is outside the [-255,255] range. */
  215. level+=256;
  216. if (n < 4) {
  217. /* luminance */
  218. put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
  219. } else {
  220. /* chrominance */
  221. put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
  222. }
  223. #else
  224. int size, v;
  225. /* find number of bits */
  226. size = 0;
  227. v = abs(level);
  228. while (v) {
  229. v >>= 1;
  230. size++;
  231. }
  232. if (n < 4) {
  233. /* luminance */
  234. put_bits(&s->pb, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
  235. } else {
  236. /* chrominance */
  237. put_bits(&s->pb, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
  238. }
  239. /* encode remaining bits */
  240. if (size > 0) {
  241. if (level < 0)
  242. level = (-level) ^ ((1 << size) - 1);
  243. put_bits(&s->pb, size, level);
  244. if (size > 8)
  245. put_bits(&s->pb, 1, 1);
  246. }
  247. #endif
  248. }
  249. static inline int mpeg4_get_dc_length(int level, int n){
  250. if (n < 4) {
  251. return uni_DCtab_lum_len[level + 256];
  252. } else {
  253. return uni_DCtab_chrom_len[level + 256];
  254. }
  255. }
  256. /**
  257. * encodes a 8x8 block
  258. * @param n block index (0-3 are luma, 4-5 are chroma)
  259. */
  260. static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
  261. uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
  262. {
  263. int i, last_non_zero;
  264. #if 0 //variables for the outcommented version
  265. int code, sign, last;
  266. #endif
  267. const RLTable *rl;
  268. uint32_t *bits_tab;
  269. uint8_t *len_tab;
  270. const int last_index = s->block_last_index[n];
  271. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  272. /* mpeg4 based DC predictor */
  273. mpeg4_encode_dc(dc_pb, intra_dc, n);
  274. if(last_index<1) return;
  275. i = 1;
  276. rl = &ff_mpeg4_rl_intra;
  277. bits_tab= uni_mpeg4_intra_rl_bits;
  278. len_tab = uni_mpeg4_intra_rl_len;
  279. } else {
  280. if(last_index<0) return;
  281. i = 0;
  282. rl = &ff_h263_rl_inter;
  283. bits_tab= uni_mpeg4_inter_rl_bits;
  284. len_tab = uni_mpeg4_inter_rl_len;
  285. }
  286. /* AC coefs */
  287. last_non_zero = i - 1;
  288. #if 1
  289. for (; i < last_index; i++) {
  290. int level = block[ scan_table[i] ];
  291. if (level) {
  292. int run = i - last_non_zero - 1;
  293. level+=64;
  294. if((level&(~127)) == 0){
  295. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  296. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  297. }else{ //ESC3
  298. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(0<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  299. }
  300. last_non_zero = i;
  301. }
  302. }
  303. /*if(i<=last_index)*/{
  304. int level = block[ scan_table[i] ];
  305. int run = i - last_non_zero - 1;
  306. level+=64;
  307. if((level&(~127)) == 0){
  308. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  309. put_bits(ac_pb, len_tab[index], bits_tab[index]);
  310. }else{ //ESC3
  311. put_bits(ac_pb, 7+2+1+6+1+12+1, (3<<23)+(3<<21)+(1<<20)+(run<<14)+(1<<13)+(((level-64)&0xfff)<<1)+1);
  312. }
  313. }
  314. #else
  315. for (; i <= last_index; i++) {
  316. const int slevel = block[ scan_table[i] ];
  317. if (slevel) {
  318. int level;
  319. int run = i - last_non_zero - 1;
  320. last = (i == last_index);
  321. sign = 0;
  322. level = slevel;
  323. if (level < 0) {
  324. sign = 1;
  325. level = -level;
  326. }
  327. code = get_rl_index(rl, last, run, level);
  328. put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
  329. if (code == rl->n) {
  330. int level1, run1;
  331. level1 = level - rl->max_level[last][run];
  332. if (level1 < 1)
  333. goto esc2;
  334. code = get_rl_index(rl, last, run, level1);
  335. if (code == rl->n) {
  336. esc2:
  337. put_bits(ac_pb, 1, 1);
  338. if (level > MAX_LEVEL)
  339. goto esc3;
  340. run1 = run - rl->max_run[last][level] - 1;
  341. if (run1 < 0)
  342. goto esc3;
  343. code = get_rl_index(rl, last, run1, level);
  344. if (code == rl->n) {
  345. esc3:
  346. /* third escape */
  347. put_bits(ac_pb, 1, 1);
  348. put_bits(ac_pb, 1, last);
  349. put_bits(ac_pb, 6, run);
  350. put_bits(ac_pb, 1, 1);
  351. put_sbits(ac_pb, 12, slevel);
  352. put_bits(ac_pb, 1, 1);
  353. } else {
  354. /* second escape */
  355. put_bits(ac_pb, 1, 0);
  356. put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
  357. put_bits(ac_pb, 1, sign);
  358. }
  359. } else {
  360. /* first escape */
  361. put_bits(ac_pb, 1, 0);
  362. put_bits(ac_pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
  363. put_bits(ac_pb, 1, sign);
  364. }
  365. } else {
  366. put_bits(ac_pb, 1, sign);
  367. }
  368. last_non_zero = i;
  369. }
  370. }
  371. #endif
  372. }
  373. static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
  374. uint8_t *scan_table)
  375. {
  376. int i, last_non_zero;
  377. uint8_t *len_tab;
  378. const int last_index = s->block_last_index[n];
  379. int len=0;
  380. if (s->mb_intra) { //Note gcc (3.2.1 at least) will optimize this away
  381. /* mpeg4 based DC predictor */
  382. len += mpeg4_get_dc_length(intra_dc, n);
  383. if(last_index<1) return len;
  384. i = 1;
  385. len_tab = uni_mpeg4_intra_rl_len;
  386. } else {
  387. if(last_index<0) return 0;
  388. i = 0;
  389. len_tab = uni_mpeg4_inter_rl_len;
  390. }
  391. /* AC coefs */
  392. last_non_zero = i - 1;
  393. for (; i < last_index; i++) {
  394. int level = block[ scan_table[i] ];
  395. if (level) {
  396. int run = i - last_non_zero - 1;
  397. level+=64;
  398. if((level&(~127)) == 0){
  399. const int index= UNI_MPEG4_ENC_INDEX(0, run, level);
  400. len += len_tab[index];
  401. }else{ //ESC3
  402. len += 7+2+1+6+1+12+1;
  403. }
  404. last_non_zero = i;
  405. }
  406. }
  407. /*if(i<=last_index)*/{
  408. int level = block[ scan_table[i] ];
  409. int run = i - last_non_zero - 1;
  410. level+=64;
  411. if((level&(~127)) == 0){
  412. const int index= UNI_MPEG4_ENC_INDEX(1, run, level);
  413. len += len_tab[index];
  414. }else{ //ESC3
  415. len += 7+2+1+6+1+12+1;
  416. }
  417. }
  418. return len;
  419. }
  420. static inline void mpeg4_encode_blocks(MpegEncContext * s, DCTELEM block[6][64], int intra_dc[6],
  421. uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb){
  422. int i;
  423. if(scan_table){
  424. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  425. for (i = 0; i < 6; i++) {
  426. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, intra_dc[i], scan_table[i]));
  427. }
  428. }else{
  429. /* encode each block */
  430. for (i = 0; i < 6; i++) {
  431. mpeg4_encode_block(s, block[i], i, intra_dc[i], scan_table[i], dc_pb, ac_pb);
  432. }
  433. }
  434. }else{
  435. if(s->flags2 & CODEC_FLAG2_NO_OUTPUT){
  436. for (i = 0; i < 6; i++) {
  437. skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, 0, s->intra_scantable.permutated));
  438. }
  439. }else{
  440. /* encode each block */
  441. for (i = 0; i < 6; i++) {
  442. mpeg4_encode_block(s, block[i], i, 0, s->intra_scantable.permutated, dc_pb, ac_pb);
  443. }
  444. }
  445. }
  446. }
  447. //FIXME this is duplicated to h263.c
  448. static const int dquant_code[5]= {1,0,9,2,3};
  449. void mpeg4_encode_mb(MpegEncContext * s,
  450. DCTELEM block[6][64],
  451. int motion_x, int motion_y)
  452. {
  453. int cbpc, cbpy, pred_x, pred_y;
  454. PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
  455. PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=FF_B_TYPE ? &s->tex_pb : &s->pb;
  456. PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=FF_I_TYPE ? &s->pb2 : &s->pb;
  457. const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
  458. if (!s->mb_intra) {
  459. int i, cbp;
  460. if(s->pict_type==FF_B_TYPE){
  461. static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
  462. int mb_type= mb_type_table[s->mv_dir];
  463. if(s->mb_x==0){
  464. for(i=0; i<2; i++){
  465. s->last_mv[i][0][0]=
  466. s->last_mv[i][0][1]=
  467. s->last_mv[i][1][0]=
  468. s->last_mv[i][1][1]= 0;
  469. }
  470. }
  471. assert(s->dquant>=-2 && s->dquant<=2);
  472. assert((s->dquant&1)==0);
  473. assert(mb_type>=0);
  474. /* nothing to do if this MB was skipped in the next P Frame */
  475. if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
  476. s->skip_count++;
  477. s->mv[0][0][0]=
  478. s->mv[0][0][1]=
  479. s->mv[1][0][0]=
  480. s->mv[1][0][1]= 0;
  481. s->mv_dir= MV_DIR_FORWARD; //doesn't matter
  482. s->qscale -= s->dquant;
  483. // s->mb_skipped=1;
  484. return;
  485. }
  486. cbp= get_b_cbp(s, block, motion_x, motion_y, mb_type);
  487. if ((cbp | motion_x | motion_y | mb_type) ==0) {
  488. /* direct MB with MV={0,0} */
  489. assert(s->dquant==0);
  490. put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */
  491. if(interleaved_stats){
  492. s->misc_bits++;
  493. s->last_bits++;
  494. }
  495. s->skip_count++;
  496. return;
  497. }
  498. put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */
  499. put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge
  500. put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we don't need it :)
  501. if(cbp) put_bits(&s->pb, 6, cbp);
  502. if(cbp && mb_type){
  503. if(s->dquant)
  504. put_bits(&s->pb, 2, (s->dquant>>2)+3);
  505. else
  506. put_bits(&s->pb, 1, 0);
  507. }else
  508. s->qscale -= s->dquant;
  509. if(!s->progressive_sequence){
  510. if(cbp)
  511. put_bits(&s->pb, 1, s->interlaced_dct);
  512. if(mb_type) // not direct mode
  513. put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
  514. }
  515. if(interleaved_stats){
  516. s->misc_bits+= get_bits_diff(s);
  517. }
  518. if(mb_type == 0){
  519. assert(s->mv_dir & MV_DIRECT);
  520. ff_h263_encode_motion_vector(s, motion_x, motion_y, 1);
  521. s->b_count++;
  522. s->f_count++;
  523. }else{
  524. assert(mb_type > 0 && mb_type < 4);
  525. if(s->mv_type != MV_TYPE_FIELD){
  526. if(s->mv_dir & MV_DIR_FORWARD){
  527. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - s->last_mv[0][0][0],
  528. s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
  529. s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0];
  530. s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1];
  531. s->f_count++;
  532. }
  533. if(s->mv_dir & MV_DIR_BACKWARD){
  534. ff_h263_encode_motion_vector(s, s->mv[1][0][0] - s->last_mv[1][0][0],
  535. s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
  536. s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0];
  537. s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1];
  538. s->b_count++;
  539. }
  540. }else{
  541. if(s->mv_dir & MV_DIR_FORWARD){
  542. put_bits(&s->pb, 1, s->field_select[0][0]);
  543. put_bits(&s->pb, 1, s->field_select[0][1]);
  544. }
  545. if(s->mv_dir & MV_DIR_BACKWARD){
  546. put_bits(&s->pb, 1, s->field_select[1][0]);
  547. put_bits(&s->pb, 1, s->field_select[1][1]);
  548. }
  549. if(s->mv_dir & MV_DIR_FORWARD){
  550. for(i=0; i<2; i++){
  551. ff_h263_encode_motion_vector(s, s->mv[0][i][0] - s->last_mv[0][i][0] ,
  552. s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code);
  553. s->last_mv[0][i][0]= s->mv[0][i][0];
  554. s->last_mv[0][i][1]= s->mv[0][i][1]*2;
  555. }
  556. s->f_count++;
  557. }
  558. if(s->mv_dir & MV_DIR_BACKWARD){
  559. for(i=0; i<2; i++){
  560. ff_h263_encode_motion_vector(s, s->mv[1][i][0] - s->last_mv[1][i][0] ,
  561. s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code);
  562. s->last_mv[1][i][0]= s->mv[1][i][0];
  563. s->last_mv[1][i][1]= s->mv[1][i][1]*2;
  564. }
  565. s->b_count++;
  566. }
  567. }
  568. }
  569. if(interleaved_stats){
  570. s->mv_bits+= get_bits_diff(s);
  571. }
  572. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb);
  573. if(interleaved_stats){
  574. s->p_tex_bits+= get_bits_diff(s);
  575. }
  576. }else{ /* s->pict_type==FF_B_TYPE */
  577. cbp= get_p_cbp(s, block, motion_x, motion_y);
  578. if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
  579. /* check if the B frames can skip it too, as we must skip it if we skip here
  580. why didn't they just compress the skip-mb bits instead of reusing them ?! */
  581. if(s->max_b_frames>0){
  582. int i;
  583. int x,y, offset;
  584. uint8_t *p_pic;
  585. x= s->mb_x*16;
  586. y= s->mb_y*16;
  587. if(x+16 > s->width) x= s->width-16;
  588. if(y+16 > s->height) y= s->height-16;
  589. offset= x + y*s->linesize;
  590. p_pic= s->new_picture.data[0] + offset;
  591. s->mb_skipped=1;
  592. for(i=0; i<s->max_b_frames; i++){
  593. uint8_t *b_pic;
  594. int diff;
  595. Picture *pic= s->reordered_input_picture[i+1];
  596. if(pic==NULL || pic->pict_type!=FF_B_TYPE) break;
  597. b_pic= pic->data[0] + offset;
  598. if(pic->type != FF_BUFFER_TYPE_SHARED)
  599. b_pic+= INPLACE_OFFSET;
  600. diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
  601. if(diff>s->qscale*70){ //FIXME check that 70 is optimal
  602. s->mb_skipped=0;
  603. break;
  604. }
  605. }
  606. }else
  607. s->mb_skipped=1;
  608. if(s->mb_skipped==1){
  609. /* skip macroblock */
  610. put_bits(&s->pb, 1, 1);
  611. if(interleaved_stats){
  612. s->misc_bits++;
  613. s->last_bits++;
  614. }
  615. s->skip_count++;
  616. return;
  617. }
  618. }
  619. put_bits(&s->pb, 1, 0); /* mb coded */
  620. cbpc = cbp & 3;
  621. cbpy = cbp >> 2;
  622. cbpy ^= 0xf;
  623. if(s->mv_type==MV_TYPE_16X16){
  624. if(s->dquant) cbpc+= 8;
  625. put_bits(&s->pb,
  626. ff_h263_inter_MCBPC_bits[cbpc],
  627. ff_h263_inter_MCBPC_code[cbpc]);
  628. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  629. if(s->dquant)
  630. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  631. if(!s->progressive_sequence){
  632. if(cbp)
  633. put_bits(pb2, 1, s->interlaced_dct);
  634. put_bits(pb2, 1, 0);
  635. }
  636. if(interleaved_stats){
  637. s->misc_bits+= get_bits_diff(s);
  638. }
  639. /* motion vectors: 16x16 mode */
  640. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  641. ff_h263_encode_motion_vector(s, motion_x - pred_x,
  642. motion_y - pred_y, s->f_code);
  643. }else if(s->mv_type==MV_TYPE_FIELD){
  644. if(s->dquant) cbpc+= 8;
  645. put_bits(&s->pb,
  646. ff_h263_inter_MCBPC_bits[cbpc],
  647. ff_h263_inter_MCBPC_code[cbpc]);
  648. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  649. if(s->dquant)
  650. put_bits(pb2, 2, dquant_code[s->dquant+2]);
  651. assert(!s->progressive_sequence);
  652. if(cbp)
  653. put_bits(pb2, 1, s->interlaced_dct);
  654. put_bits(pb2, 1, 1);
  655. if(interleaved_stats){
  656. s->misc_bits+= get_bits_diff(s);
  657. }
  658. /* motion vectors: 16x8 interlaced mode */
  659. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  660. pred_y /=2;
  661. put_bits(&s->pb, 1, s->field_select[0][0]);
  662. put_bits(&s->pb, 1, s->field_select[0][1]);
  663. ff_h263_encode_motion_vector(s, s->mv[0][0][0] - pred_x,
  664. s->mv[0][0][1] - pred_y, s->f_code);
  665. ff_h263_encode_motion_vector(s, s->mv[0][1][0] - pred_x,
  666. s->mv[0][1][1] - pred_y, s->f_code);
  667. }else{
  668. assert(s->mv_type==MV_TYPE_8X8);
  669. put_bits(&s->pb,
  670. ff_h263_inter_MCBPC_bits[cbpc+16],
  671. ff_h263_inter_MCBPC_code[cbpc+16]);
  672. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  673. if(!s->progressive_sequence){
  674. if(cbp)
  675. put_bits(pb2, 1, s->interlaced_dct);
  676. }
  677. if(interleaved_stats){
  678. s->misc_bits+= get_bits_diff(s);
  679. }
  680. for(i=0; i<4; i++){
  681. /* motion vectors: 8x8 mode*/
  682. h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  683. ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
  684. s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
  685. }
  686. }
  687. if(interleaved_stats){
  688. s->mv_bits+= get_bits_diff(s);
  689. }
  690. mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb);
  691. if(interleaved_stats){
  692. s->p_tex_bits+= get_bits_diff(s);
  693. }
  694. s->f_count++;
  695. }
  696. } else {
  697. int cbp;
  698. int dc_diff[6]; //dc values with the dc prediction subtracted
  699. int dir[6]; //prediction direction
  700. int zigzag_last_index[6];
  701. uint8_t *scan_table[6];
  702. int i;
  703. for(i=0; i<6; i++){
  704. dc_diff[i]= ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1);
  705. }
  706. if(s->flags & CODEC_FLAG_AC_PRED){
  707. s->ac_pred= decide_ac_pred(s, block, dir, scan_table, zigzag_last_index);
  708. }else{
  709. for(i=0; i<6; i++)
  710. scan_table[i]= s->intra_scantable.permutated;
  711. }
  712. /* compute cbp */
  713. cbp = 0;
  714. for (i = 0; i < 6; i++) {
  715. if (s->block_last_index[i] >= 1)
  716. cbp |= 1 << (5 - i);
  717. }
  718. cbpc = cbp & 3;
  719. if (s->pict_type == FF_I_TYPE) {
  720. if(s->dquant) cbpc+=4;
  721. put_bits(&s->pb,
  722. ff_h263_intra_MCBPC_bits[cbpc],
  723. ff_h263_intra_MCBPC_code[cbpc]);
  724. } else {
  725. if(s->dquant) cbpc+=8;
  726. put_bits(&s->pb, 1, 0); /* mb coded */
  727. put_bits(&s->pb,
  728. ff_h263_inter_MCBPC_bits[cbpc + 4],
  729. ff_h263_inter_MCBPC_code[cbpc + 4]);
  730. }
  731. put_bits(pb2, 1, s->ac_pred);
  732. cbpy = cbp >> 2;
  733. put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]);
  734. if(s->dquant)
  735. put_bits(dc_pb, 2, dquant_code[s->dquant+2]);
  736. if(!s->progressive_sequence){
  737. put_bits(dc_pb, 1, s->interlaced_dct);
  738. }
  739. if(interleaved_stats){
  740. s->misc_bits+= get_bits_diff(s);
  741. }
  742. mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb);
  743. if(interleaved_stats){
  744. s->i_tex_bits+= get_bits_diff(s);
  745. }
  746. s->i_count++;
  747. /* restore ac coeffs & last_index stuff if we messed them up with the prediction */
  748. if(s->ac_pred)
  749. restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index);
  750. }
  751. }
  752. /**
  753. * add mpeg4 stuffing bits (01...1)
  754. */
  755. void ff_mpeg4_stuffing(PutBitContext * pbc)
  756. {
  757. int length;
  758. put_bits(pbc, 1, 0);
  759. length= (-put_bits_count(pbc))&7;
  760. if(length) put_bits(pbc, length, (1<<length)-1);
  761. }
  762. /* must be called before writing the header */
  763. void ff_set_mpeg4_time(MpegEncContext * s){
  764. if(s->pict_type==FF_B_TYPE){
  765. ff_mpeg4_init_direct_mv(s);
  766. }else{
  767. s->last_time_base= s->time_base;
  768. s->time_base= s->time/s->avctx->time_base.den;
  769. }
  770. }
  771. static void mpeg4_encode_gop_header(MpegEncContext * s){
  772. int hours, minutes, seconds;
  773. int64_t time;
  774. put_bits(&s->pb, 16, 0);
  775. put_bits(&s->pb, 16, GOP_STARTCODE);
  776. time= s->current_picture_ptr->pts;
  777. if(s->reordered_input_picture[1])
  778. time= FFMIN(time, s->reordered_input_picture[1]->pts);
  779. time= time*s->avctx->time_base.num;
  780. seconds= time/s->avctx->time_base.den;
  781. minutes= seconds/60; seconds %= 60;
  782. hours= minutes/60; minutes %= 60;
  783. hours%=24;
  784. put_bits(&s->pb, 5, hours);
  785. put_bits(&s->pb, 6, minutes);
  786. put_bits(&s->pb, 1, 1);
  787. put_bits(&s->pb, 6, seconds);
  788. put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP));
  789. put_bits(&s->pb, 1, 0); //broken link == NO
  790. s->last_time_base= time / s->avctx->time_base.den;
  791. ff_mpeg4_stuffing(&s->pb);
  792. }
  793. static void mpeg4_encode_visual_object_header(MpegEncContext * s){
  794. int profile_and_level_indication;
  795. int vo_ver_id;
  796. if(s->avctx->profile != FF_PROFILE_UNKNOWN){
  797. profile_and_level_indication = s->avctx->profile << 4;
  798. }else if(s->max_b_frames || s->quarter_sample){
  799. profile_and_level_indication= 0xF0; // adv simple
  800. }else{
  801. profile_and_level_indication= 0x00; // simple
  802. }
  803. if(s->avctx->level != FF_LEVEL_UNKNOWN){
  804. profile_and_level_indication |= s->avctx->level;
  805. }else{
  806. profile_and_level_indication |= 1; //level 1
  807. }
  808. if(profile_and_level_indication>>4 == 0xF){
  809. vo_ver_id= 5;
  810. }else{
  811. vo_ver_id= 1;
  812. }
  813. //FIXME levels
  814. put_bits(&s->pb, 16, 0);
  815. put_bits(&s->pb, 16, VOS_STARTCODE);
  816. put_bits(&s->pb, 8, profile_and_level_indication);
  817. put_bits(&s->pb, 16, 0);
  818. put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE);
  819. put_bits(&s->pb, 1, 1);
  820. put_bits(&s->pb, 4, vo_ver_id);
  821. put_bits(&s->pb, 3, 1); //priority
  822. put_bits(&s->pb, 4, 1); //visual obj type== video obj
  823. put_bits(&s->pb, 1, 0); //video signal type == no clue //FIXME
  824. ff_mpeg4_stuffing(&s->pb);
  825. }
  826. static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number)
  827. {
  828. int vo_ver_id;
  829. if (!CONFIG_MPEG4_ENCODER) return;
  830. if(s->max_b_frames || s->quarter_sample){
  831. vo_ver_id= 5;
  832. s->vo_type= ADV_SIMPLE_VO_TYPE;
  833. }else{
  834. vo_ver_id= 1;
  835. s->vo_type= SIMPLE_VO_TYPE;
  836. }
  837. put_bits(&s->pb, 16, 0);
  838. put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */
  839. put_bits(&s->pb, 16, 0);
  840. put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */
  841. put_bits(&s->pb, 1, 0); /* random access vol */
  842. put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */
  843. if(s->workaround_bugs & FF_BUG_MS) {
  844. put_bits(&s->pb, 1, 0); /* is obj layer id= no */
  845. } else {
  846. put_bits(&s->pb, 1, 1); /* is obj layer id= yes */
  847. put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */
  848. put_bits(&s->pb, 3, 1); /* is obj layer priority */
  849. }
  850. s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  851. put_bits(&s->pb, 4, s->aspect_ratio_info);/* aspect ratio info */
  852. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
  853. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  854. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  855. }
  856. if(s->workaround_bugs & FF_BUG_MS) { //
  857. put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */
  858. } else {
  859. put_bits(&s->pb, 1, 1); /* vol control parameters= yes */
  860. put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */
  861. put_bits(&s->pb, 1, s->low_delay);
  862. put_bits(&s->pb, 1, 0); /* vbv parameters= no */
  863. }
  864. put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */
  865. put_bits(&s->pb, 1, 1); /* marker bit */
  866. put_bits(&s->pb, 16, s->avctx->time_base.den);
  867. if (s->time_increment_bits < 1)
  868. s->time_increment_bits = 1;
  869. put_bits(&s->pb, 1, 1); /* marker bit */
  870. put_bits(&s->pb, 1, 0); /* fixed vop rate=no */
  871. put_bits(&s->pb, 1, 1); /* marker bit */
  872. put_bits(&s->pb, 13, s->width); /* vol width */
  873. put_bits(&s->pb, 1, 1); /* marker bit */
  874. put_bits(&s->pb, 13, s->height); /* vol height */
  875. put_bits(&s->pb, 1, 1); /* marker bit */
  876. put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1);
  877. put_bits(&s->pb, 1, 1); /* obmc disable */
  878. if (vo_ver_id == 1) {
  879. put_bits(&s->pb, 1, s->vol_sprite_usage); /* sprite enable */
  880. }else{
  881. put_bits(&s->pb, 2, s->vol_sprite_usage); /* sprite enable */
  882. }
  883. put_bits(&s->pb, 1, 0); /* not 8 bit == false */
  884. put_bits(&s->pb, 1, s->mpeg_quant); /* quant type= (0=h263 style)*/
  885. if(s->mpeg_quant){
  886. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  887. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  888. }
  889. if (vo_ver_id != 1)
  890. put_bits(&s->pb, 1, s->quarter_sample);
  891. put_bits(&s->pb, 1, 1); /* complexity estimation disable */
  892. s->resync_marker= s->rtp_mode;
  893. put_bits(&s->pb, 1, s->resync_marker ? 0 : 1);/* resync marker disable */
  894. put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0);
  895. if(s->data_partitioning){
  896. put_bits(&s->pb, 1, 0); /* no rvlc */
  897. }
  898. if (vo_ver_id != 1){
  899. put_bits(&s->pb, 1, 0); /* newpred */
  900. put_bits(&s->pb, 1, 0); /* reduced res vop */
  901. }
  902. put_bits(&s->pb, 1, 0); /* scalability */
  903. ff_mpeg4_stuffing(&s->pb);
  904. /* user data */
  905. if(!(s->flags & CODEC_FLAG_BITEXACT)){
  906. put_bits(&s->pb, 16, 0);
  907. put_bits(&s->pb, 16, 0x1B2); /* user_data */
  908. ff_put_string(&s->pb, LIBAVCODEC_IDENT, 0);
  909. }
  910. }
  911. /* write mpeg4 VOP header */
  912. void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
  913. {
  914. int time_incr;
  915. int time_div, time_mod;
  916. if(s->pict_type==FF_I_TYPE){
  917. if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
  918. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
  919. mpeg4_encode_visual_object_header(s);
  920. if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number==0) //HACK, the reference sw is buggy
  921. mpeg4_encode_vol_header(s, 0, 0);
  922. }
  923. if(!(s->workaround_bugs & FF_BUG_MS))
  924. mpeg4_encode_gop_header(s);
  925. }
  926. s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
  927. put_bits(&s->pb, 16, 0); /* vop header */
  928. put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */
  929. put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */
  930. assert(s->time>=0);
  931. time_div= s->time/s->avctx->time_base.den;
  932. time_mod= s->time%s->avctx->time_base.den;
  933. time_incr= time_div - s->last_time_base;
  934. assert(time_incr >= 0);
  935. while(time_incr--)
  936. put_bits(&s->pb, 1, 1);
  937. put_bits(&s->pb, 1, 0);
  938. put_bits(&s->pb, 1, 1); /* marker */
  939. put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
  940. put_bits(&s->pb, 1, 1); /* marker */
  941. put_bits(&s->pb, 1, 1); /* vop coded */
  942. if ( s->pict_type == FF_P_TYPE
  943. || (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
  944. put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
  945. }
  946. put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
  947. if(!s->progressive_sequence){
  948. put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
  949. put_bits(&s->pb, 1, s->alternate_scan);
  950. }
  951. //FIXME sprite stuff
  952. put_bits(&s->pb, 5, s->qscale);
  953. if (s->pict_type != FF_I_TYPE)
  954. put_bits(&s->pb, 3, s->f_code); /* fcode_for */
  955. if (s->pict_type == FF_B_TYPE)
  956. put_bits(&s->pb, 3, s->b_code); /* fcode_back */
  957. }
  958. static void init_uni_dc_tab(void)
  959. {
  960. int level, uni_code, uni_len;
  961. for(level=-256; level<256; level++){
  962. int size, v, l;
  963. /* find number of bits */
  964. size = 0;
  965. v = abs(level);
  966. while (v) {
  967. v >>= 1;
  968. size++;
  969. }
  970. if (level < 0)
  971. l= (-level) ^ ((1 << size) - 1);
  972. else
  973. l= level;
  974. /* luminance */
  975. uni_code= ff_mpeg4_DCtab_lum[size][0];
  976. uni_len = ff_mpeg4_DCtab_lum[size][1];
  977. if (size > 0) {
  978. uni_code<<=size; uni_code|=l;
  979. uni_len+=size;
  980. if (size > 8){
  981. uni_code<<=1; uni_code|=1;
  982. uni_len++;
  983. }
  984. }
  985. uni_DCtab_lum_bits[level+256]= uni_code;
  986. uni_DCtab_lum_len [level+256]= uni_len;
  987. /* chrominance */
  988. uni_code= ff_mpeg4_DCtab_chrom[size][0];
  989. uni_len = ff_mpeg4_DCtab_chrom[size][1];
  990. if (size > 0) {
  991. uni_code<<=size; uni_code|=l;
  992. uni_len+=size;
  993. if (size > 8){
  994. uni_code<<=1; uni_code|=1;
  995. uni_len++;
  996. }
  997. }
  998. uni_DCtab_chrom_bits[level+256]= uni_code;
  999. uni_DCtab_chrom_len [level+256]= uni_len;
  1000. }
  1001. }
  1002. static void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab){
  1003. int slevel, run, last;
  1004. assert(MAX_LEVEL >= 64);
  1005. assert(MAX_RUN >= 63);
  1006. for(slevel=-64; slevel<64; slevel++){
  1007. if(slevel==0) continue;
  1008. for(run=0; run<64; run++){
  1009. for(last=0; last<=1; last++){
  1010. const int index= UNI_MPEG4_ENC_INDEX(last, run, slevel+64);
  1011. int level= slevel < 0 ? -slevel : slevel;
  1012. int sign= slevel < 0 ? 1 : 0;
  1013. int bits, len, code;
  1014. int level1, run1;
  1015. len_tab[index]= 100;
  1016. /* ESC0 */
  1017. code= get_rl_index(rl, last, run, level);
  1018. bits= rl->table_vlc[code][0];
  1019. len= rl->table_vlc[code][1];
  1020. bits=bits*2+sign; len++;
  1021. if(code!=rl->n && len < len_tab[index]){
  1022. bits_tab[index]= bits;
  1023. len_tab [index]= len;
  1024. }
  1025. /* ESC1 */
  1026. bits= rl->table_vlc[rl->n][0];
  1027. len= rl->table_vlc[rl->n][1];
  1028. bits=bits*2; len++; //esc1
  1029. level1= level - rl->max_level[last][run];
  1030. if(level1>0){
  1031. code= get_rl_index(rl, last, run, level1);
  1032. bits<<= rl->table_vlc[code][1];
  1033. len += rl->table_vlc[code][1];
  1034. bits += rl->table_vlc[code][0];
  1035. bits=bits*2+sign; len++;
  1036. if(code!=rl->n && len < len_tab[index]){
  1037. bits_tab[index]= bits;
  1038. len_tab [index]= len;
  1039. }
  1040. }
  1041. /* ESC2 */
  1042. bits= rl->table_vlc[rl->n][0];
  1043. len= rl->table_vlc[rl->n][1];
  1044. bits=bits*4+2; len+=2; //esc2
  1045. run1 = run - rl->max_run[last][level] - 1;
  1046. if(run1>=0){
  1047. code= get_rl_index(rl, last, run1, level);
  1048. bits<<= rl->table_vlc[code][1];
  1049. len += rl->table_vlc[code][1];
  1050. bits += rl->table_vlc[code][0];
  1051. bits=bits*2+sign; len++;
  1052. if(code!=rl->n && len < len_tab[index]){
  1053. bits_tab[index]= bits;
  1054. len_tab [index]= len;
  1055. }
  1056. }
  1057. /* ESC3 */
  1058. bits= rl->table_vlc[rl->n][0];
  1059. len = rl->table_vlc[rl->n][1];
  1060. bits=bits*4+3; len+=2; //esc3
  1061. bits=bits*2+last; len++;
  1062. bits=bits*64+run; len+=6;
  1063. bits=bits*2+1; len++; //marker
  1064. bits=bits*4096+(slevel&0xfff); len+=12;
  1065. bits=bits*2+1; len++; //marker
  1066. if(len < len_tab[index]){
  1067. bits_tab[index]= bits;
  1068. len_tab [index]= len;
  1069. }
  1070. }
  1071. }
  1072. }
  1073. }
  1074. static av_cold int encode_init(AVCodecContext *avctx)
  1075. {
  1076. MpegEncContext *s = avctx->priv_data;
  1077. int ret;
  1078. static int done = 0;
  1079. if((ret=MPV_encode_init(avctx)) < 0)
  1080. return ret;
  1081. if (!done) {
  1082. done = 1;
  1083. init_uni_dc_tab();
  1084. init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
  1085. init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
  1086. init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
  1087. }
  1088. s->min_qcoeff= -2048;
  1089. s->max_qcoeff= 2047;
  1090. s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len;
  1091. s->intra_ac_vlc_last_length= uni_mpeg4_intra_rl_len + 128*64;
  1092. s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
  1093. s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64;
  1094. s->luma_dc_vlc_length= uni_DCtab_lum_len;
  1095. s->chroma_dc_vlc_length= uni_DCtab_chrom_len;
  1096. s->ac_esc_length= 7+2+1+6+1+12+1;
  1097. s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
  1098. s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
  1099. if(s->flags & CODEC_FLAG_GLOBAL_HEADER){
  1100. s->avctx->extradata= av_malloc(1024);
  1101. init_put_bits(&s->pb, s->avctx->extradata, 1024);
  1102. if(!(s->workaround_bugs & FF_BUG_MS))
  1103. mpeg4_encode_visual_object_header(s);
  1104. mpeg4_encode_vol_header(s, 0, 0);
  1105. // ff_mpeg4_stuffing(&s->pb); ?
  1106. flush_put_bits(&s->pb);
  1107. s->avctx->extradata_size= (put_bits_count(&s->pb)+7)>>3;
  1108. }
  1109. return 0;
  1110. }
  1111. void ff_mpeg4_init_partitions(MpegEncContext *s)
  1112. {
  1113. uint8_t *start= put_bits_ptr(&s->pb);
  1114. uint8_t *end= s->pb.buf_end;
  1115. int size= end - start;
  1116. int pb_size = (((intptr_t)start + size/3)&(~3)) - (intptr_t)start;
  1117. int tex_size= (size - 2*pb_size)&(~3);
  1118. set_put_bits_buffer_size(&s->pb, pb_size);
  1119. init_put_bits(&s->tex_pb, start + pb_size , tex_size);
  1120. init_put_bits(&s->pb2 , start + pb_size + tex_size, pb_size);
  1121. }
  1122. void ff_mpeg4_merge_partitions(MpegEncContext *s)
  1123. {
  1124. const int pb2_len = put_bits_count(&s->pb2 );
  1125. const int tex_pb_len= put_bits_count(&s->tex_pb);
  1126. const int bits= put_bits_count(&s->pb);
  1127. if(s->pict_type==FF_I_TYPE){
  1128. put_bits(&s->pb, 19, DC_MARKER);
  1129. s->misc_bits+=19 + pb2_len + bits - s->last_bits;
  1130. s->i_tex_bits+= tex_pb_len;
  1131. }else{
  1132. put_bits(&s->pb, 17, MOTION_MARKER);
  1133. s->misc_bits+=17 + pb2_len;
  1134. s->mv_bits+= bits - s->last_bits;
  1135. s->p_tex_bits+= tex_pb_len;
  1136. }
  1137. flush_put_bits(&s->pb2);
  1138. flush_put_bits(&s->tex_pb);
  1139. set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf);
  1140. ff_copy_bits(&s->pb, s->pb2.buf , pb2_len);
  1141. ff_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len);
  1142. s->last_bits= put_bits_count(&s->pb);
  1143. }
  1144. void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
  1145. {
  1146. int mb_num_bits= av_log2(s->mb_num - 1) + 1;
  1147. put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0);
  1148. put_bits(&s->pb, 1, 1);
  1149. put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y*s->mb_width);
  1150. put_bits(&s->pb, s->quant_precision, s->qscale);
  1151. put_bits(&s->pb, 1, 0); /* no HEC */
  1152. }
  1153. AVCodec mpeg4_encoder = {
  1154. "mpeg4",
  1155. AVMEDIA_TYPE_VIDEO,
  1156. CODEC_ID_MPEG4,
  1157. sizeof(MpegEncContext),
  1158. encode_init,
  1159. MPV_encode_picture,
  1160. MPV_encode_end,
  1161. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  1162. .capabilities= CODEC_CAP_DELAY,
  1163. .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
  1164. };