You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

986 lines
36KB

  1. /*
  2. * MPEG1/2 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * MPEG1/2 encoder
  25. */
  26. #include "avcodec.h"
  27. #include "mathops.h"
  28. #include "mpegvideo.h"
  29. #include "mpeg12.h"
  30. #include "mpeg12data.h"
  31. #include "bytestream.h"
  32. #include "libavutil/attributes.h"
  33. #include "libavutil/log.h"
  34. #include "libavutil/opt.h"
  35. static const uint8_t inv_non_linear_qscale[13] = {
  36. 0, 2, 4, 6, 8,
  37. 9,10,11,12,13,14,15,16,
  38. };
  39. static const uint8_t svcd_scan_offset_placeholder[14] = {
  40. 0x10, 0x0E,
  41. 0x00, 0x80, 0x81,
  42. 0x00, 0x80, 0x81,
  43. 0xff, 0xff, 0xff,
  44. 0xff, 0xff, 0xff,
  45. };
  46. static void mpeg1_encode_block(MpegEncContext *s,
  47. int16_t *block,
  48. int component);
  49. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added
  50. static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
  51. static uint8_t fcode_tab[MAX_MV*2+1];
  52. static uint8_t uni_mpeg1_ac_vlc_len [64*64*2];
  53. static uint8_t uni_mpeg2_ac_vlc_len [64*64*2];
  54. /* simple include everything table for dc, first byte is bits number next 3 are code*/
  55. static uint32_t mpeg1_lum_dc_uni[512];
  56. static uint32_t mpeg1_chr_dc_uni[512];
  57. static uint8_t mpeg1_index_run[2][64];
  58. static int8_t mpeg1_max_level[2][64];
  59. static av_cold void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len)
  60. {
  61. int i;
  62. for(i=0; i<128; i++){
  63. int level= i-64;
  64. int run;
  65. if (!level)
  66. continue;
  67. for(run=0; run<64; run++){
  68. int len, code;
  69. int alevel= FFABS(level);
  70. if (alevel > rl->max_level[0][run])
  71. code= 111; /*rl->n*/
  72. else
  73. code= rl->index_run[0][run] + alevel - 1;
  74. if (code < 111 /* rl->n */) {
  75. /* length of vlc and sign */
  76. len= rl->table_vlc[code][1]+1;
  77. } else {
  78. len= rl->table_vlc[111/*rl->n*/][1]+6;
  79. if (alevel < 128) {
  80. len += 8;
  81. } else {
  82. len += 16;
  83. }
  84. }
  85. uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len;
  86. }
  87. }
  88. }
  89. static int find_frame_rate_index(MpegEncContext *s){
  90. int i;
  91. int64_t dmin= INT64_MAX;
  92. int64_t d;
  93. for(i=1;i<14;i++) {
  94. int64_t n0 = 1001LL / ff_mpeg12_frame_rate_tab[i].den * ff_mpeg12_frame_rate_tab[i].num * s->avctx->time_base.num;
  95. int64_t n1= 1001LL*s->avctx->time_base.den;
  96. if(s->avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL && i>=9) break;
  97. d = FFABS(n0 - n1);
  98. if(d < dmin){
  99. dmin=d;
  100. s->frame_rate_index= i;
  101. }
  102. }
  103. if(dmin)
  104. return -1;
  105. else
  106. return 0;
  107. }
  108. static av_cold int encode_init(AVCodecContext *avctx)
  109. {
  110. MpegEncContext *s = avctx->priv_data;
  111. if(ff_MPV_encode_init(avctx) < 0)
  112. return -1;
  113. if(find_frame_rate_index(s) < 0){
  114. if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){
  115. av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num);
  116. return -1;
  117. }else{
  118. av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num);
  119. }
  120. }
  121. if(avctx->profile == FF_PROFILE_UNKNOWN){
  122. if(avctx->level != FF_LEVEL_UNKNOWN){
  123. av_log(avctx, AV_LOG_ERROR, "Set profile and level\n");
  124. return -1;
  125. }
  126. avctx->profile = s->chroma_format == CHROMA_420 ? 4 : 0; /* Main or 4:2:2 */
  127. }
  128. if(avctx->level == FF_LEVEL_UNKNOWN){
  129. if(avctx->profile == 0){ /* 4:2:2 */
  130. if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */
  131. else avctx->level = 2; /* High */
  132. }else{
  133. if(avctx->profile != 1 && s->chroma_format != CHROMA_420){
  134. av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n");
  135. return -1;
  136. }
  137. if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */
  138. else if(avctx->width <= 1440) avctx->level = 6; /* High 1440 */
  139. else avctx->level = 4; /* High */
  140. }
  141. }
  142. if (s->drop_frame_timecode && s->frame_rate_index != 4) {
  143. av_log(avctx, AV_LOG_ERROR, "Drop frame time code only allowed with 1001/30000 fps\n");
  144. return -1;
  145. }
  146. return 0;
  147. }
  148. static void put_header(MpegEncContext *s, int header)
  149. {
  150. avpriv_align_put_bits(&s->pb);
  151. put_bits(&s->pb, 16, header>>16);
  152. put_sbits(&s->pb, 16, header);
  153. }
  154. /* put sequence header if needed */
  155. static void mpeg1_encode_sequence_header(MpegEncContext *s)
  156. {
  157. unsigned int vbv_buffer_size;
  158. unsigned int fps, v;
  159. int i;
  160. uint64_t time_code;
  161. float best_aspect_error= 1E10;
  162. float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);
  163. int constraint_parameter_flag;
  164. if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
  165. if (s->current_picture.f.key_frame) {
  166. AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
  167. /* mpeg1 header repeated every gop */
  168. put_header(s, SEQ_START_CODE);
  169. put_sbits(&s->pb, 12, s->width );
  170. put_sbits(&s->pb, 12, s->height);
  171. for(i=1; i<15; i++){
  172. float error= aspect_ratio;
  173. if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <=1)
  174. error-= 1.0/ff_mpeg1_aspect[i];
  175. else
  176. error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;
  177. error= FFABS(error);
  178. if(error < best_aspect_error){
  179. best_aspect_error= error;
  180. s->aspect_ratio_info= i;
  181. }
  182. }
  183. put_bits(&s->pb, 4, s->aspect_ratio_info);
  184. put_bits(&s->pb, 4, s->frame_rate_index);
  185. if(s->avctx->rc_max_rate){
  186. v = (s->avctx->rc_max_rate + 399) / 400;
  187. if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  188. v = 0x3ffff;
  189. }else{
  190. v= 0x3FFFF;
  191. }
  192. if(s->avctx->rc_buffer_size)
  193. vbv_buffer_size = s->avctx->rc_buffer_size;
  194. else
  195. /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */
  196. vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
  197. vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;
  198. put_sbits(&s->pb, 18, v);
  199. put_bits(&s->pb, 1, 1); /* marker */
  200. put_sbits(&s->pb, 10, vbv_buffer_size);
  201. constraint_parameter_flag=
  202. s->width <= 768 && s->height <= 576 &&
  203. s->mb_width * s->mb_height <= 396 &&
  204. s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&
  205. framerate.num <= framerate.den*30 &&
  206. s->avctx->me_range && s->avctx->me_range < 128 &&
  207. vbv_buffer_size <= 20 &&
  208. v <= 1856000/400 &&
  209. s->codec_id == AV_CODEC_ID_MPEG1VIDEO;
  210. put_bits(&s->pb, 1, constraint_parameter_flag);
  211. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  212. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  213. if(s->codec_id == AV_CODEC_ID_MPEG2VIDEO){
  214. put_header(s, EXT_START_CODE);
  215. put_bits(&s->pb, 4, 1); //seq ext
  216. put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */
  217. put_bits(&s->pb, 3, s->avctx->profile); //profile
  218. put_bits(&s->pb, 4, s->avctx->level); //level
  219. put_bits(&s->pb, 1, s->progressive_sequence);
  220. put_bits(&s->pb, 2, s->chroma_format);
  221. put_bits(&s->pb, 2, s->width >>12);
  222. put_bits(&s->pb, 2, s->height>>12);
  223. put_bits(&s->pb, 12, v>>18); //bitrate ext
  224. put_bits(&s->pb, 1, 1); //marker
  225. put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext
  226. put_bits(&s->pb, 1, s->low_delay);
  227. put_bits(&s->pb, 2, 0); // frame_rate_ext_n
  228. put_bits(&s->pb, 5, 0); // frame_rate_ext_d
  229. }
  230. put_header(s, GOP_START_CODE);
  231. put_bits(&s->pb, 1, s->drop_frame_timecode); /* drop frame flag */
  232. /* time code : we must convert from the real frame rate to a
  233. fake mpeg frame rate in case of low frame rate */
  234. fps = (framerate.num + framerate.den/2)/ framerate.den;
  235. time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start;
  236. s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
  237. if (s->drop_frame_timecode) {
  238. /* only works for NTSC 29.97 */
  239. int d = time_code / 17982;
  240. int m = time_code % 17982;
  241. //if (m < 2) m += 2; /* not needed since -2,-1 / 1798 in C returns 0 */
  242. time_code += 18 * d + 2 * ((m - 2) / 1798);
  243. }
  244. put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
  245. put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
  246. put_bits(&s->pb, 1, 1);
  247. put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
  248. put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
  249. put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP));
  250. put_bits(&s->pb, 1, 0); /* broken link */
  251. }
  252. }
  253. static inline void encode_mb_skip_run(MpegEncContext *s, int run){
  254. while (run >= 33) {
  255. put_bits(&s->pb, 11, 0x008);
  256. run -= 33;
  257. }
  258. put_bits(&s->pb, ff_mpeg12_mbAddrIncrTable[run][1],
  259. ff_mpeg12_mbAddrIncrTable[run][0]);
  260. }
  261. static av_always_inline void put_qscale(MpegEncContext *s)
  262. {
  263. if(s->q_scale_type){
  264. assert(s->qscale>=1 && s->qscale <=12);
  265. put_bits(&s->pb, 5, inv_non_linear_qscale[s->qscale]);
  266. }else{
  267. put_bits(&s->pb, 5, s->qscale);
  268. }
  269. }
  270. void ff_mpeg1_encode_slice_header(MpegEncContext *s){
  271. if (s->height > 2800) {
  272. put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127));
  273. put_bits(&s->pb, 3, s->mb_y >> 7); /* slice_vertical_position_extension */
  274. } else {
  275. put_header(s, SLICE_MIN_START_CODE + s->mb_y);
  276. }
  277. put_qscale(s);
  278. put_bits(&s->pb, 1, 0); /* slice extra information */
  279. }
  280. void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
  281. {
  282. mpeg1_encode_sequence_header(s);
  283. /* mpeg1 picture header */
  284. put_header(s, PICTURE_START_CODE);
  285. /* temporal reference */
  286. // RAL: s->picture_number instead of s->fake_picture_number
  287. put_bits(&s->pb, 10, (s->picture_number -
  288. s->gop_picture_number) & 0x3ff);
  289. put_bits(&s->pb, 3, s->pict_type);
  290. s->vbv_delay_ptr= s->pb.buf + put_bits_count(&s->pb)/8;
  291. put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
  292. // RAL: Forward f_code also needed for B frames
  293. if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
  294. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  295. if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  296. put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
  297. else
  298. put_bits(&s->pb, 3, 7); /* forward_f_code */
  299. }
  300. // RAL: Backward f_code necessary for B frames
  301. if (s->pict_type == AV_PICTURE_TYPE_B) {
  302. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  303. if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  304. put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
  305. else
  306. put_bits(&s->pb, 3, 7); /* backward_f_code */
  307. }
  308. put_bits(&s->pb, 1, 0); /* extra bit picture */
  309. s->frame_pred_frame_dct = 1;
  310. if(s->codec_id == AV_CODEC_ID_MPEG2VIDEO){
  311. put_header(s, EXT_START_CODE);
  312. put_bits(&s->pb, 4, 8); //pic ext
  313. if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
  314. put_bits(&s->pb, 4, s->f_code);
  315. put_bits(&s->pb, 4, s->f_code);
  316. }else{
  317. put_bits(&s->pb, 8, 255);
  318. }
  319. if (s->pict_type == AV_PICTURE_TYPE_B) {
  320. put_bits(&s->pb, 4, s->b_code);
  321. put_bits(&s->pb, 4, s->b_code);
  322. }else{
  323. put_bits(&s->pb, 8, 255);
  324. }
  325. put_bits(&s->pb, 2, s->intra_dc_precision);
  326. assert(s->picture_structure == PICT_FRAME);
  327. put_bits(&s->pb, 2, s->picture_structure);
  328. if (s->progressive_sequence) {
  329. put_bits(&s->pb, 1, 0); /* no repeat */
  330. } else {
  331. put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
  332. }
  333. /* XXX: optimize the generation of this flag with entropy
  334. measures */
  335. s->frame_pred_frame_dct = s->progressive_sequence;
  336. put_bits(&s->pb, 1, s->frame_pred_frame_dct);
  337. put_bits(&s->pb, 1, s->concealment_motion_vectors);
  338. put_bits(&s->pb, 1, s->q_scale_type);
  339. put_bits(&s->pb, 1, s->intra_vlc_format);
  340. put_bits(&s->pb, 1, s->alternate_scan);
  341. put_bits(&s->pb, 1, s->repeat_first_field);
  342. s->progressive_frame = s->progressive_sequence;
  343. put_bits(&s->pb, 1, s->chroma_format == CHROMA_420 ? s->progressive_frame : 0); /* chroma_420_type */
  344. put_bits(&s->pb, 1, s->progressive_frame);
  345. put_bits(&s->pb, 1, 0); //composite_display_flag
  346. }
  347. if (s->scan_offset) {
  348. int i;
  349. put_header(s, USER_START_CODE);
  350. for(i=0; i<sizeof(svcd_scan_offset_placeholder); i++){
  351. put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
  352. }
  353. }
  354. s->mb_y=0;
  355. ff_mpeg1_encode_slice_header(s);
  356. }
  357. static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
  358. int has_mv, int field_motion)
  359. {
  360. put_bits(&s->pb, n, bits);
  361. if (!s->frame_pred_frame_dct) {
  362. if (has_mv)
  363. put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */
  364. put_bits(&s->pb, 1, s->interlaced_dct);
  365. }
  366. }
  367. static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
  368. int16_t block[6][64],
  369. int motion_x, int motion_y,
  370. int mb_block_count)
  371. {
  372. int i, cbp;
  373. const int mb_x = s->mb_x;
  374. const int mb_y = s->mb_y;
  375. const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y;
  376. /* compute cbp */
  377. cbp = 0;
  378. for(i=0;i<mb_block_count;i++) {
  379. if (s->block_last_index[i] >= 0)
  380. cbp |= 1 << (mb_block_count - 1 - i);
  381. }
  382. if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
  383. (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)) &&
  384. ((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
  385. (s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
  386. ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
  387. s->mb_skip_run++;
  388. s->qscale -= s->dquant;
  389. s->skip_count++;
  390. s->misc_bits++;
  391. s->last_bits++;
  392. if(s->pict_type == AV_PICTURE_TYPE_P){
  393. s->last_mv[0][1][0]= s->last_mv[0][0][0]=
  394. s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
  395. }
  396. } else {
  397. if(first_mb){
  398. assert(s->mb_skip_run == 0);
  399. encode_mb_skip_run(s, s->mb_x);
  400. }else{
  401. encode_mb_skip_run(s, s->mb_skip_run);
  402. }
  403. if (s->pict_type == AV_PICTURE_TYPE_I) {
  404. if(s->dquant && cbp){
  405. put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
  406. put_qscale(s);
  407. }else{
  408. put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */
  409. s->qscale -= s->dquant;
  410. }
  411. s->misc_bits+= get_bits_diff(s);
  412. s->i_count++;
  413. } else if (s->mb_intra) {
  414. if(s->dquant && cbp){
  415. put_mb_modes(s, 6, 0x01, 0, 0);
  416. put_qscale(s);
  417. }else{
  418. put_mb_modes(s, 5, 0x03, 0, 0);
  419. s->qscale -= s->dquant;
  420. }
  421. s->misc_bits+= get_bits_diff(s);
  422. s->i_count++;
  423. memset(s->last_mv, 0, sizeof(s->last_mv));
  424. } else if (s->pict_type == AV_PICTURE_TYPE_P) {
  425. if(s->mv_type == MV_TYPE_16X16){
  426. if (cbp != 0) {
  427. if ((motion_x|motion_y) == 0) {
  428. if(s->dquant){
  429. put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */
  430. put_qscale(s);
  431. }else{
  432. put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */
  433. }
  434. s->misc_bits+= get_bits_diff(s);
  435. } else {
  436. if(s->dquant){
  437. put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
  438. put_qscale(s);
  439. }else{
  440. put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
  441. }
  442. s->misc_bits+= get_bits_diff(s);
  443. mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
  444. mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
  445. s->mv_bits+= get_bits_diff(s);
  446. }
  447. } else {
  448. put_bits(&s->pb, 3, 1); /* motion only */
  449. if (!s->frame_pred_frame_dct)
  450. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  451. s->misc_bits+= get_bits_diff(s);
  452. mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
  453. mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
  454. s->qscale -= s->dquant;
  455. s->mv_bits+= get_bits_diff(s);
  456. }
  457. s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x;
  458. s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y;
  459. }else{
  460. assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
  461. if (cbp) {
  462. if(s->dquant){
  463. put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
  464. put_qscale(s);
  465. }else{
  466. put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
  467. }
  468. } else {
  469. put_bits(&s->pb, 3, 1); /* motion only */
  470. put_bits(&s->pb, 2, 1); /* motion_type: field */
  471. s->qscale -= s->dquant;
  472. }
  473. s->misc_bits+= get_bits_diff(s);
  474. for(i=0; i<2; i++){
  475. put_bits(&s->pb, 1, s->field_select[0][i]);
  476. mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
  477. mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
  478. s->last_mv[0][i][0]= s->mv[0][i][0];
  479. s->last_mv[0][i][1]= 2*s->mv[0][i][1];
  480. }
  481. s->mv_bits+= get_bits_diff(s);
  482. }
  483. if(cbp) {
  484. if (s->chroma_y_shift) {
  485. put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]);
  486. } else {
  487. put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]);
  488. put_sbits(&s->pb, 2, cbp);
  489. }
  490. }
  491. s->f_count++;
  492. } else{
  493. if(s->mv_type == MV_TYPE_16X16){
  494. if (cbp){ // With coded bloc pattern
  495. if (s->dquant) {
  496. if(s->mv_dir == MV_DIR_FORWARD)
  497. put_mb_modes(s, 6, 3, 1, 0);
  498. else
  499. put_mb_modes(s, 8-s->mv_dir, 2, 1, 0);
  500. put_qscale(s);
  501. } else {
  502. put_mb_modes(s, 5-s->mv_dir, 3, 1, 0);
  503. }
  504. }else{ // No coded bloc pattern
  505. put_bits(&s->pb, 5-s->mv_dir, 2);
  506. if (!s->frame_pred_frame_dct)
  507. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  508. s->qscale -= s->dquant;
  509. }
  510. s->misc_bits += get_bits_diff(s);
  511. if (s->mv_dir&MV_DIR_FORWARD){
  512. mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
  513. mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
  514. s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0];
  515. s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1];
  516. s->f_count++;
  517. }
  518. if (s->mv_dir&MV_DIR_BACKWARD){
  519. mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
  520. mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
  521. s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0];
  522. s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1];
  523. s->b_count++;
  524. }
  525. }else{
  526. assert(s->mv_type == MV_TYPE_FIELD);
  527. assert(!s->frame_pred_frame_dct);
  528. if (cbp){ // With coded bloc pattern
  529. if (s->dquant) {
  530. if(s->mv_dir == MV_DIR_FORWARD)
  531. put_mb_modes(s, 6, 3, 1, 1);
  532. else
  533. put_mb_modes(s, 8-s->mv_dir, 2, 1, 1);
  534. put_qscale(s);
  535. } else {
  536. put_mb_modes(s, 5-s->mv_dir, 3, 1, 1);
  537. }
  538. }else{ // No coded bloc pattern
  539. put_bits(&s->pb, 5-s->mv_dir, 2);
  540. put_bits(&s->pb, 2, 1); /* motion_type: field */
  541. s->qscale -= s->dquant;
  542. }
  543. s->misc_bits += get_bits_diff(s);
  544. if (s->mv_dir&MV_DIR_FORWARD){
  545. for(i=0; i<2; i++){
  546. put_bits(&s->pb, 1, s->field_select[0][i]);
  547. mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
  548. mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
  549. s->last_mv[0][i][0]= s->mv[0][i][0];
  550. s->last_mv[0][i][1]= 2*s->mv[0][i][1];
  551. }
  552. s->f_count++;
  553. }
  554. if (s->mv_dir&MV_DIR_BACKWARD){
  555. for(i=0; i<2; i++){
  556. put_bits(&s->pb, 1, s->field_select[1][i]);
  557. mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code);
  558. mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code);
  559. s->last_mv[1][i][0]= s->mv[1][i][0];
  560. s->last_mv[1][i][1]= 2*s->mv[1][i][1];
  561. }
  562. s->b_count++;
  563. }
  564. }
  565. s->mv_bits += get_bits_diff(s);
  566. if(cbp) {
  567. if (s->chroma_y_shift) {
  568. put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp][1], ff_mpeg12_mbPatTable[cbp][0]);
  569. } else {
  570. put_bits(&s->pb, ff_mpeg12_mbPatTable[cbp>>2][1], ff_mpeg12_mbPatTable[cbp>>2][0]);
  571. put_sbits(&s->pb, 2, cbp);
  572. }
  573. }
  574. }
  575. for(i=0;i<mb_block_count;i++) {
  576. if (cbp & (1 << (mb_block_count - 1 - i))) {
  577. mpeg1_encode_block(s, block[i], i);
  578. }
  579. }
  580. s->mb_skip_run = 0;
  581. if(s->mb_intra)
  582. s->i_tex_bits+= get_bits_diff(s);
  583. else
  584. s->p_tex_bits+= get_bits_diff(s);
  585. }
  586. }
  587. void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
  588. {
  589. if (s->chroma_format == CHROMA_420) mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6);
  590. else mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8);
  591. }
  592. // RAL: Parameter added: f_or_b_code
  593. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
  594. {
  595. if (val == 0) {
  596. /* zero vector */
  597. put_bits(&s->pb,
  598. ff_mpeg12_mbMotionVectorTable[0][1],
  599. ff_mpeg12_mbMotionVectorTable[0][0]);
  600. } else {
  601. int code, sign, bits;
  602. int bit_size = f_or_b_code - 1;
  603. int range = 1 << bit_size;
  604. /* modulo encoding */
  605. val = sign_extend(val, 5 + bit_size);
  606. if (val >= 0) {
  607. val--;
  608. code = (val >> bit_size) + 1;
  609. bits = val & (range - 1);
  610. sign = 0;
  611. } else {
  612. val = -val;
  613. val--;
  614. code = (val >> bit_size) + 1;
  615. bits = val & (range - 1);
  616. sign = 1;
  617. }
  618. assert(code > 0 && code <= 16);
  619. put_bits(&s->pb,
  620. ff_mpeg12_mbMotionVectorTable[code][1],
  621. ff_mpeg12_mbMotionVectorTable[code][0]);
  622. put_bits(&s->pb, 1, sign);
  623. if (bit_size > 0) {
  624. put_bits(&s->pb, bit_size, bits);
  625. }
  626. }
  627. }
  628. av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
  629. {
  630. static int done=0;
  631. ff_mpeg12_common_init(s);
  632. if(!done){
  633. int f_code;
  634. int mv;
  635. int i;
  636. done=1;
  637. ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
  638. ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
  639. for(i=0; i<64; i++)
  640. {
  641. mpeg1_max_level[0][i]= ff_rl_mpeg1.max_level[0][i];
  642. mpeg1_index_run[0][i]= ff_rl_mpeg1.index_run[0][i];
  643. }
  644. init_uni_ac_vlc(&ff_rl_mpeg1, uni_mpeg1_ac_vlc_len);
  645. if(s->intra_vlc_format)
  646. init_uni_ac_vlc(&ff_rl_mpeg2, uni_mpeg2_ac_vlc_len);
  647. /* build unified dc encoding tables */
  648. for(i=-255; i<256; i++)
  649. {
  650. int adiff, index;
  651. int bits, code;
  652. int diff=i;
  653. adiff = FFABS(diff);
  654. if(diff<0) diff--;
  655. index = av_log2(2*adiff);
  656. bits= ff_mpeg12_vlc_dc_lum_bits[index] + index;
  657. code= (ff_mpeg12_vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1));
  658. mpeg1_lum_dc_uni[i+255]= bits + (code<<8);
  659. bits= ff_mpeg12_vlc_dc_chroma_bits[index] + index;
  660. code= (ff_mpeg12_vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1));
  661. mpeg1_chr_dc_uni[i+255]= bits + (code<<8);
  662. }
  663. for(f_code=1; f_code<=MAX_FCODE; f_code++){
  664. for(mv=-MAX_MV; mv<=MAX_MV; mv++){
  665. int len;
  666. if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1];
  667. else{
  668. int val, bit_size, code;
  669. bit_size = f_code - 1;
  670. val=mv;
  671. if (val < 0)
  672. val = -val;
  673. val--;
  674. code = (val >> bit_size) + 1;
  675. if(code<17){
  676. len= ff_mpeg12_mbMotionVectorTable[code][1] + 1 + bit_size;
  677. }else{
  678. len= ff_mpeg12_mbMotionVectorTable[16][1] + 2 + bit_size;
  679. }
  680. }
  681. mv_penalty[f_code][mv+MAX_MV]= len;
  682. }
  683. }
  684. for(f_code=MAX_FCODE; f_code>0; f_code--){
  685. for(mv=-(8<<f_code); mv<(8<<f_code); mv++){
  686. fcode_tab[mv+MAX_MV]= f_code;
  687. }
  688. }
  689. }
  690. s->me.mv_penalty= mv_penalty;
  691. s->fcode_tab= fcode_tab;
  692. if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO){
  693. s->min_qcoeff=-255;
  694. s->max_qcoeff= 255;
  695. }else{
  696. s->min_qcoeff=-2047;
  697. s->max_qcoeff= 2047;
  698. }
  699. if (s->intra_vlc_format) {
  700. s->intra_ac_vlc_length=
  701. s->intra_ac_vlc_last_length= uni_mpeg2_ac_vlc_len;
  702. } else {
  703. s->intra_ac_vlc_length=
  704. s->intra_ac_vlc_last_length= uni_mpeg1_ac_vlc_len;
  705. }
  706. s->inter_ac_vlc_length=
  707. s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len;
  708. }
  709. static inline void encode_dc(MpegEncContext *s, int diff, int component)
  710. {
  711. if(((unsigned) (diff+255)) >= 511){
  712. int index;
  713. if(diff<0){
  714. index= av_log2_16bit(-2*diff);
  715. diff--;
  716. }else{
  717. index= av_log2_16bit(2*diff);
  718. }
  719. if (component == 0) {
  720. put_bits(
  721. &s->pb,
  722. ff_mpeg12_vlc_dc_lum_bits[index] + index,
  723. (ff_mpeg12_vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1)));
  724. }else{
  725. put_bits(
  726. &s->pb,
  727. ff_mpeg12_vlc_dc_chroma_bits[index] + index,
  728. (ff_mpeg12_vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1)));
  729. }
  730. }else{
  731. if (component == 0) {
  732. put_bits(
  733. &s->pb,
  734. mpeg1_lum_dc_uni[diff+255]&0xFF,
  735. mpeg1_lum_dc_uni[diff+255]>>8);
  736. } else {
  737. put_bits(
  738. &s->pb,
  739. mpeg1_chr_dc_uni[diff+255]&0xFF,
  740. mpeg1_chr_dc_uni[diff+255]>>8);
  741. }
  742. }
  743. }
  744. static void mpeg1_encode_block(MpegEncContext *s,
  745. int16_t *block,
  746. int n)
  747. {
  748. int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
  749. int code, component;
  750. const uint16_t (*table_vlc)[2] = ff_rl_mpeg1.table_vlc;
  751. last_index = s->block_last_index[n];
  752. /* DC coef */
  753. if (s->mb_intra) {
  754. component = (n <= 3 ? 0 : (n&1) + 1);
  755. dc = block[0]; /* overflow is impossible */
  756. diff = dc - s->last_dc[component];
  757. encode_dc(s, diff, component);
  758. s->last_dc[component] = dc;
  759. i = 1;
  760. if (s->intra_vlc_format)
  761. table_vlc = ff_rl_mpeg2.table_vlc;
  762. } else {
  763. /* encode the first coefficient : needs to be done here because
  764. it is handled slightly differently */
  765. level = block[0];
  766. if (abs(level) == 1) {
  767. code = ((uint32_t)level >> 31); /* the sign bit */
  768. put_bits(&s->pb, 2, code | 0x02);
  769. i = 1;
  770. } else {
  771. i = 0;
  772. last_non_zero = -1;
  773. goto next_coef;
  774. }
  775. }
  776. /* now quantify & encode AC coefs */
  777. last_non_zero = i - 1;
  778. for(;i<=last_index;i++) {
  779. j = s->intra_scantable.permutated[i];
  780. level = block[j];
  781. next_coef:
  782. /* encode using VLC */
  783. if (level != 0) {
  784. run = i - last_non_zero - 1;
  785. alevel= level;
  786. MASK_ABS(sign, alevel);
  787. sign&=1;
  788. if (alevel <= mpeg1_max_level[0][run]){
  789. code= mpeg1_index_run[0][run] + alevel - 1;
  790. /* store the vlc & sign at once */
  791. put_bits(&s->pb, table_vlc[code][1]+1, (table_vlc[code][0]<<1) + sign);
  792. } else {
  793. /* escape seems to be pretty rare <5% so I do not optimize it */
  794. put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]);
  795. /* escape: only clip in this case */
  796. put_bits(&s->pb, 6, run);
  797. if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO){
  798. if (alevel < 128) {
  799. put_sbits(&s->pb, 8, level);
  800. } else {
  801. if (level < 0) {
  802. put_bits(&s->pb, 16, 0x8001 + level + 255);
  803. } else {
  804. put_sbits(&s->pb, 16, level);
  805. }
  806. }
  807. }else{
  808. put_sbits(&s->pb, 12, level);
  809. }
  810. }
  811. last_non_zero = i;
  812. }
  813. }
  814. /* end of block */
  815. put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
  816. }
  817. #define OFFSET(x) offsetof(MpegEncContext, x)
  818. #define VE AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
  819. #define COMMON_OPTS\
  820. { "intra_vlc", "Use MPEG-2 intra VLC table.", OFFSET(intra_vlc_format), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },\
  821. { "drop_frame_timecode", "Timecode is in drop frame format.", OFFSET(drop_frame_timecode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE}, \
  822. { "scan_offset", "Reserve space for SVCD scan offset user data.", OFFSET(scan_offset), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  823. static const AVOption mpeg1_options[] = {
  824. COMMON_OPTS
  825. FF_MPV_COMMON_OPTS
  826. { NULL },
  827. };
  828. static const AVOption mpeg2_options[] = {
  829. COMMON_OPTS
  830. { "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  831. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  832. FF_MPV_COMMON_OPTS
  833. { NULL },
  834. };
  835. #define mpeg12_class(x)\
  836. static const AVClass mpeg## x ##_class = {\
  837. .class_name = "mpeg" #x "video encoder",\
  838. .item_name = av_default_item_name,\
  839. .option = mpeg## x ##_options,\
  840. .version = LIBAVUTIL_VERSION_INT,\
  841. };
  842. mpeg12_class(1)
  843. mpeg12_class(2)
  844. AVCodec ff_mpeg1video_encoder = {
  845. .name = "mpeg1video",
  846. .type = AVMEDIA_TYPE_VIDEO,
  847. .id = AV_CODEC_ID_MPEG1VIDEO,
  848. .priv_data_size = sizeof(MpegEncContext),
  849. .init = encode_init,
  850. .encode2 = ff_MPV_encode_picture,
  851. .close = ff_MPV_encode_end,
  852. .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
  853. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
  854. AV_PIX_FMT_NONE },
  855. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  856. .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
  857. .priv_class = &mpeg1_class,
  858. };
  859. AVCodec ff_mpeg2video_encoder = {
  860. .name = "mpeg2video",
  861. .type = AVMEDIA_TYPE_VIDEO,
  862. .id = AV_CODEC_ID_MPEG2VIDEO,
  863. .priv_data_size = sizeof(MpegEncContext),
  864. .init = encode_init,
  865. .encode2 = ff_MPV_encode_picture,
  866. .close = ff_MPV_encode_end,
  867. .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
  868. .pix_fmts = (const enum AVPixelFormat[]){
  869. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE
  870. },
  871. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  872. .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
  873. .priv_class = &mpeg2_class,
  874. };