You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1095 lines
41KB

  1. /*
  2. * MPEG1/2 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * MPEG1/2 encoder
  25. */
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/log.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/timecode.h"
  31. #include "avcodec.h"
  32. #include "bytestream.h"
  33. #include "mathops.h"
  34. #include "mpeg12.h"
  35. #include "mpeg12data.h"
  36. #include "mpegvideo.h"
  37. static const uint8_t inv_non_linear_qscale[] = {
  38. 0, 2, 4, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  39. };
  40. static const uint8_t svcd_scan_offset_placeholder[] = {
  41. 0x10, 0x0E, 0x00, 0x80, 0x81, 0x00, 0x80,
  42. 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  43. };
  44. static uint8_t mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
  45. static uint8_t fcode_tab[MAX_MV * 2 + 1];
  46. static uint8_t uni_mpeg1_ac_vlc_len[64 * 64 * 2];
  47. static uint8_t uni_mpeg2_ac_vlc_len[64 * 64 * 2];
  48. /* simple include everything table for dc, first byte is bits
  49. * number next 3 are code */
  50. static uint32_t mpeg1_lum_dc_uni[512];
  51. static uint32_t mpeg1_chr_dc_uni[512];
  52. static uint8_t mpeg1_index_run[2][64];
  53. static int8_t mpeg1_max_level[2][64];
  54. static av_cold void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len)
  55. {
  56. int i;
  57. for (i = 0; i < 128; i++) {
  58. int level = i - 64;
  59. int run;
  60. if (!level)
  61. continue;
  62. for (run = 0; run < 64; run++) {
  63. int len, code;
  64. int alevel = FFABS(level);
  65. if (alevel > rl->max_level[0][run])
  66. code = 111; /* rl->n */
  67. else
  68. code = rl->index_run[0][run] + alevel - 1;
  69. if (code < 111) { /* rl->n */
  70. /* length of VLC and sign */
  71. len = rl->table_vlc[code][1] + 1;
  72. } else {
  73. len = rl->table_vlc[111 /* rl->n */][1] + 6;
  74. if (alevel < 128)
  75. len += 8;
  76. else
  77. len += 16;
  78. }
  79. uni_ac_vlc_len[UNI_AC_ENC_INDEX(run, i)] = len;
  80. }
  81. }
  82. }
  83. static int find_frame_rate_index(MpegEncContext *s)
  84. {
  85. int i;
  86. AVRational bestq = (AVRational) {0, 0};
  87. AVRational ext;
  88. AVRational target = av_inv_q(s->avctx->time_base);
  89. for (i = 1; i < 14; i++) {
  90. if (s->avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL &&
  91. i >= 9)
  92. break;
  93. for (ext.num=1; ext.num <= 4; ext.num++) {
  94. for (ext.den=1; ext.den <= 32; ext.den++) {
  95. AVRational q = av_mul_q(ext, ff_mpeg12_frame_rate_tab[i]);
  96. if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO && (ext.den!=1 || ext.num!=1))
  97. continue;
  98. if (av_gcd(ext.den, ext.num) != 1)
  99. continue;
  100. if ( bestq.num==0
  101. || av_nearer_q(target, bestq, q) < 0
  102. || ext.num==1 && ext.den==1 && av_nearer_q(target, bestq, q) == 0) {
  103. bestq = q;
  104. s->frame_rate_index = i;
  105. s->mpeg2_frame_rate_ext.num = ext.num;
  106. s->mpeg2_frame_rate_ext.den = ext.den;
  107. }
  108. }
  109. }
  110. }
  111. if (av_cmp_q(target, bestq))
  112. return -1;
  113. else
  114. return 0;
  115. }
  116. static av_cold int encode_init(AVCodecContext *avctx)
  117. {
  118. MpegEncContext *s = avctx->priv_data;
  119. if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && avctx->height > 2800)
  120. avctx->thread_count = 1;
  121. if (ff_MPV_encode_init(avctx) < 0)
  122. return -1;
  123. if (find_frame_rate_index(s) < 0) {
  124. if (s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  125. av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n",
  126. avctx->time_base.den, avctx->time_base.num);
  127. return -1;
  128. } else {
  129. av_log(avctx, AV_LOG_INFO,
  130. "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n",
  131. avctx->time_base.den, avctx->time_base.num);
  132. }
  133. }
  134. if (avctx->profile == FF_PROFILE_UNKNOWN) {
  135. if (avctx->level != FF_LEVEL_UNKNOWN) {
  136. av_log(avctx, AV_LOG_ERROR, "Set profile and level\n");
  137. return -1;
  138. }
  139. /* Main or 4:2:2 */
  140. avctx->profile = s->chroma_format == CHROMA_420 ? 4 : 0;
  141. }
  142. if (avctx->level == FF_LEVEL_UNKNOWN) {
  143. if (avctx->profile == 0) { /* 4:2:2 */
  144. if (avctx->width <= 720 && avctx->height <= 608)
  145. avctx->level = 5; /* Main */
  146. else
  147. avctx->level = 2; /* High */
  148. } else {
  149. if (avctx->profile != 1 && s->chroma_format != CHROMA_420) {
  150. av_log(avctx, AV_LOG_ERROR,
  151. "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n");
  152. return -1;
  153. }
  154. if (avctx->width <= 720 && avctx->height <= 576)
  155. avctx->level = 8; /* Main */
  156. else if (avctx->width <= 1440)
  157. avctx->level = 6; /* High 1440 */
  158. else
  159. avctx->level = 4; /* High */
  160. }
  161. }
  162. if ((avctx->width & 0xFFF) == 0 && (avctx->height & 0xFFF) == 1) {
  163. av_log(avctx, AV_LOG_ERROR, "Width / Height is invalid for MPEG2\n");
  164. return AVERROR(EINVAL);
  165. }
  166. if (s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  167. if ((avctx->width & 0xFFF) == 0 || (avctx->height & 0xFFF) == 0) {
  168. av_log(avctx, AV_LOG_ERROR, "Width or Height are not allowed to be multiplies of 4096\n"
  169. "add '-strict %d' if you want to use them anyway.\n", FF_COMPLIANCE_UNOFFICIAL);
  170. return AVERROR(EINVAL);
  171. }
  172. }
  173. s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE);
  174. if (s->drop_frame_timecode)
  175. s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME;
  176. if (s->drop_frame_timecode && s->frame_rate_index != 4) {
  177. av_log(avctx, AV_LOG_ERROR,
  178. "Drop frame time code only allowed with 1001/30000 fps\n");
  179. return -1;
  180. }
  181. if (s->tc_opt_str) {
  182. AVRational rate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
  183. int ret = av_timecode_init_from_string(&s->tc, rate, s->tc_opt_str, s);
  184. if (ret < 0)
  185. return ret;
  186. s->drop_frame_timecode = !!(s->tc.flags & AV_TIMECODE_FLAG_DROPFRAME);
  187. s->avctx->timecode_frame_start = s->tc.start;
  188. } else {
  189. s->avctx->timecode_frame_start = 0; // default is -1
  190. }
  191. return 0;
  192. }
  193. static void put_header(MpegEncContext *s, int header)
  194. {
  195. avpriv_align_put_bits(&s->pb);
  196. put_bits(&s->pb, 16, header >> 16);
  197. put_sbits(&s->pb, 16, header);
  198. }
  199. /* put sequence header if needed */
  200. static void mpeg1_encode_sequence_header(MpegEncContext *s)
  201. {
  202. unsigned int vbv_buffer_size, fps, v;
  203. int i, constraint_parameter_flag;
  204. uint64_t time_code;
  205. float best_aspect_error = 1E10;
  206. float aspect_ratio = av_q2d(s->avctx->sample_aspect_ratio);
  207. if (aspect_ratio == 0.0)
  208. aspect_ratio = 1.0; // pixel aspect 1.1 (VGA)
  209. if (s->current_picture.f.key_frame) {
  210. AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
  211. /* mpeg1 header repeated every gop */
  212. put_header(s, SEQ_START_CODE);
  213. put_sbits(&s->pb, 12, s->width & 0xFFF);
  214. put_sbits(&s->pb, 12, s->height & 0xFFF);
  215. for (i = 1; i < 15; i++) {
  216. float error = aspect_ratio;
  217. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)
  218. error -= 1.0 / ff_mpeg1_aspect[i];
  219. else
  220. error -= av_q2d(ff_mpeg2_aspect[i]) * s->height / s->width;
  221. error = FFABS(error);
  222. if (error < best_aspect_error) {
  223. best_aspect_error = error;
  224. s->aspect_ratio_info = i;
  225. }
  226. }
  227. put_bits(&s->pb, 4, s->aspect_ratio_info);
  228. put_bits(&s->pb, 4, s->frame_rate_index);
  229. if (s->avctx->rc_max_rate) {
  230. v = (s->avctx->rc_max_rate + 399) / 400;
  231. if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  232. v = 0x3ffff;
  233. } else {
  234. v = 0x3FFFF;
  235. }
  236. if (s->avctx->rc_buffer_size)
  237. vbv_buffer_size = s->avctx->rc_buffer_size;
  238. else
  239. /* VBV calculation: Scaled so that a VCD has the proper
  240. * VBV size of 40 kilobytes */
  241. vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
  242. vbv_buffer_size = (vbv_buffer_size + 16383) / 16384;
  243. put_sbits(&s->pb, 18, v);
  244. put_bits(&s->pb, 1, 1); // marker
  245. put_sbits(&s->pb, 10, vbv_buffer_size);
  246. constraint_parameter_flag =
  247. s->width <= 768 &&
  248. s->height <= 576 &&
  249. s->mb_width * s->mb_height <= 396 &&
  250. s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den &&
  251. framerate.num <= framerate.den * 30 &&
  252. s->avctx->me_range &&
  253. s->avctx->me_range < 128 &&
  254. vbv_buffer_size <= 20 &&
  255. v <= 1856000 / 400 &&
  256. s->codec_id == AV_CODEC_ID_MPEG1VIDEO;
  257. put_bits(&s->pb, 1, constraint_parameter_flag);
  258. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  259. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  260. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  261. put_header(s, EXT_START_CODE);
  262. put_bits(&s->pb, 4, 1); // seq ext
  263. put_bits(&s->pb, 1, s->avctx->profile == 0); // escx 1 for 4:2:2 profile
  264. put_bits(&s->pb, 3, s->avctx->profile); // profile
  265. put_bits(&s->pb, 4, s->avctx->level); // level
  266. put_bits(&s->pb, 1, s->progressive_sequence);
  267. put_bits(&s->pb, 2, s->chroma_format);
  268. put_bits(&s->pb, 2, s->width >> 12);
  269. put_bits(&s->pb, 2, s->height >> 12);
  270. put_bits(&s->pb, 12, v >> 18); // bitrate ext
  271. put_bits(&s->pb, 1, 1); // marker
  272. put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext
  273. put_bits(&s->pb, 1, s->low_delay);
  274. put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n
  275. put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d
  276. }
  277. put_header(s, GOP_START_CODE);
  278. put_bits(&s->pb, 1, s->drop_frame_timecode); // drop frame flag
  279. /* time code: we must convert from the real frame rate to a
  280. * fake MPEG frame rate in case of low frame rate */
  281. fps = (framerate.num + framerate.den / 2) / framerate.den;
  282. time_code = s->current_picture_ptr->f.coded_picture_number +
  283. s->avctx->timecode_frame_start;
  284. s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
  285. av_assert0(s->drop_frame_timecode == !!(s->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
  286. if (s->drop_frame_timecode)
  287. time_code = av_timecode_adjust_ntsc_framenum2(time_code, fps);
  288. put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
  289. put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
  290. put_bits(&s->pb, 1, 1);
  291. put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
  292. put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
  293. put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP));
  294. put_bits(&s->pb, 1, 0); // broken link
  295. }
  296. }
  297. static inline void encode_mb_skip_run(MpegEncContext *s, int run)
  298. {
  299. while (run >= 33) {
  300. put_bits(&s->pb, 11, 0x008);
  301. run -= 33;
  302. }
  303. put_bits(&s->pb, ff_mpeg12_mbAddrIncrTable[run][1],
  304. ff_mpeg12_mbAddrIncrTable[run][0]);
  305. }
  306. static av_always_inline void put_qscale(MpegEncContext *s)
  307. {
  308. if (s->q_scale_type) {
  309. av_assert2(s->qscale >= 1 && s->qscale <= 12);
  310. put_bits(&s->pb, 5, inv_non_linear_qscale[s->qscale]);
  311. } else {
  312. put_bits(&s->pb, 5, s->qscale);
  313. }
  314. }
  315. void ff_mpeg1_encode_slice_header(MpegEncContext *s)
  316. {
  317. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->height > 2800) {
  318. put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127));
  319. /* slice_vertical_position_extension */
  320. put_bits(&s->pb, 3, s->mb_y >> 7);
  321. } else {
  322. put_header(s, SLICE_MIN_START_CODE + s->mb_y);
  323. }
  324. put_qscale(s);
  325. /* slice extra information */
  326. put_bits(&s->pb, 1, 0);
  327. }
  328. void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
  329. {
  330. mpeg1_encode_sequence_header(s);
  331. /* mpeg1 picture header */
  332. put_header(s, PICTURE_START_CODE);
  333. /* temporal reference */
  334. // RAL: s->picture_number instead of s->fake_picture_number
  335. put_bits(&s->pb, 10,
  336. (s->picture_number - s->gop_picture_number) & 0x3ff);
  337. put_bits(&s->pb, 3, s->pict_type);
  338. s->vbv_delay_ptr = s->pb.buf + put_bits_count(&s->pb) / 8;
  339. put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
  340. // RAL: Forward f_code also needed for B-frames
  341. if (s->pict_type == AV_PICTURE_TYPE_P ||
  342. s->pict_type == AV_PICTURE_TYPE_B) {
  343. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  344. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  345. put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
  346. else
  347. put_bits(&s->pb, 3, 7); /* forward_f_code */
  348. }
  349. // RAL: Backward f_code necessary for B-frames
  350. if (s->pict_type == AV_PICTURE_TYPE_B) {
  351. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  352. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  353. put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
  354. else
  355. put_bits(&s->pb, 3, 7); /* backward_f_code */
  356. }
  357. put_bits(&s->pb, 1, 0); /* extra bit picture */
  358. s->frame_pred_frame_dct = 1;
  359. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  360. put_header(s, EXT_START_CODE);
  361. put_bits(&s->pb, 4, 8); /* pic ext */
  362. if (s->pict_type == AV_PICTURE_TYPE_P ||
  363. s->pict_type == AV_PICTURE_TYPE_B) {
  364. put_bits(&s->pb, 4, s->f_code);
  365. put_bits(&s->pb, 4, s->f_code);
  366. } else {
  367. put_bits(&s->pb, 8, 255);
  368. }
  369. if (s->pict_type == AV_PICTURE_TYPE_B) {
  370. put_bits(&s->pb, 4, s->b_code);
  371. put_bits(&s->pb, 4, s->b_code);
  372. } else {
  373. put_bits(&s->pb, 8, 255);
  374. }
  375. put_bits(&s->pb, 2, s->intra_dc_precision);
  376. av_assert0(s->picture_structure == PICT_FRAME);
  377. put_bits(&s->pb, 2, s->picture_structure);
  378. if (s->progressive_sequence)
  379. put_bits(&s->pb, 1, 0); /* no repeat */
  380. else
  381. put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
  382. /* XXX: optimize the generation of this flag with entropy measures */
  383. s->frame_pred_frame_dct = s->progressive_sequence;
  384. put_bits(&s->pb, 1, s->frame_pred_frame_dct);
  385. put_bits(&s->pb, 1, s->concealment_motion_vectors);
  386. put_bits(&s->pb, 1, s->q_scale_type);
  387. put_bits(&s->pb, 1, s->intra_vlc_format);
  388. put_bits(&s->pb, 1, s->alternate_scan);
  389. put_bits(&s->pb, 1, s->repeat_first_field);
  390. s->progressive_frame = s->progressive_sequence;
  391. /* chroma_420_type */
  392. put_bits(&s->pb, 1, s->chroma_format ==
  393. CHROMA_420 ? s->progressive_frame : 0);
  394. put_bits(&s->pb, 1, s->progressive_frame);
  395. put_bits(&s->pb, 1, 0); /* composite_display_flag */
  396. }
  397. if (s->scan_offset) {
  398. int i;
  399. put_header(s, USER_START_CODE);
  400. for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++)
  401. put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
  402. }
  403. s->mb_y = 0;
  404. ff_mpeg1_encode_slice_header(s);
  405. }
  406. static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
  407. int has_mv, int field_motion)
  408. {
  409. put_bits(&s->pb, n, bits);
  410. if (!s->frame_pred_frame_dct) {
  411. if (has_mv)
  412. /* motion_type: frame/field */
  413. put_bits(&s->pb, 2, 2 - field_motion);
  414. put_bits(&s->pb, 1, s->interlaced_dct);
  415. }
  416. }
  417. // RAL: Parameter added: f_or_b_code
  418. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
  419. {
  420. if (val == 0) {
  421. /* zero vector */
  422. put_bits(&s->pb,
  423. ff_mpeg12_mbMotionVectorTable[0][1],
  424. ff_mpeg12_mbMotionVectorTable[0][0]);
  425. } else {
  426. int code, sign, bits;
  427. int bit_size = f_or_b_code - 1;
  428. int range = 1 << bit_size;
  429. /* modulo encoding */
  430. val = sign_extend(val, 5 + bit_size);
  431. if (val >= 0) {
  432. val--;
  433. code = (val >> bit_size) + 1;
  434. bits = val & (range - 1);
  435. sign = 0;
  436. } else {
  437. val = -val;
  438. val--;
  439. code = (val >> bit_size) + 1;
  440. bits = val & (range - 1);
  441. sign = 1;
  442. }
  443. av_assert2(code > 0 && code <= 16);
  444. put_bits(&s->pb,
  445. ff_mpeg12_mbMotionVectorTable[code][1],
  446. ff_mpeg12_mbMotionVectorTable[code][0]);
  447. put_bits(&s->pb, 1, sign);
  448. if (bit_size > 0)
  449. put_bits(&s->pb, bit_size, bits);
  450. }
  451. }
  452. static inline void encode_dc(MpegEncContext *s, int diff, int component)
  453. {
  454. if (((unsigned) (diff + 255)) >= 511) {
  455. int index;
  456. if (diff < 0) {
  457. index = av_log2_16bit(-2 * diff);
  458. diff--;
  459. } else {
  460. index = av_log2_16bit(2 * diff);
  461. }
  462. if (component == 0)
  463. put_bits(&s->pb,
  464. ff_mpeg12_vlc_dc_lum_bits[index] + index,
  465. (ff_mpeg12_vlc_dc_lum_code[index] << index) +
  466. (diff & ((1 << index) - 1)));
  467. else
  468. put_bits(&s->pb,
  469. ff_mpeg12_vlc_dc_chroma_bits[index] + index,
  470. (ff_mpeg12_vlc_dc_chroma_code[index] << index) +
  471. (diff & ((1 << index) - 1)));
  472. } else {
  473. if (component == 0)
  474. put_bits(&s->pb,
  475. mpeg1_lum_dc_uni[diff + 255] & 0xFF,
  476. mpeg1_lum_dc_uni[diff + 255] >> 8);
  477. else
  478. put_bits(&s->pb,
  479. mpeg1_chr_dc_uni[diff + 255] & 0xFF,
  480. mpeg1_chr_dc_uni[diff + 255] >> 8);
  481. }
  482. }
  483. static void mpeg1_encode_block(MpegEncContext *s, int16_t *block, int n)
  484. {
  485. int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
  486. int code, component;
  487. const uint16_t (*table_vlc)[2] = ff_rl_mpeg1.table_vlc;
  488. last_index = s->block_last_index[n];
  489. /* DC coef */
  490. if (s->mb_intra) {
  491. component = (n <= 3 ? 0 : (n & 1) + 1);
  492. dc = block[0]; /* overflow is impossible */
  493. diff = dc - s->last_dc[component];
  494. encode_dc(s, diff, component);
  495. s->last_dc[component] = dc;
  496. i = 1;
  497. if (s->intra_vlc_format)
  498. table_vlc = ff_rl_mpeg2.table_vlc;
  499. } else {
  500. /* encode the first coefficient: needs to be done here because
  501. * it is handled slightly differently */
  502. level = block[0];
  503. if (abs(level) == 1) {
  504. code = ((uint32_t)level >> 31); /* the sign bit */
  505. put_bits(&s->pb, 2, code | 0x02);
  506. i = 1;
  507. } else {
  508. i = 0;
  509. last_non_zero = -1;
  510. goto next_coef;
  511. }
  512. }
  513. /* now quantify & encode AC coefs */
  514. last_non_zero = i - 1;
  515. for (; i <= last_index; i++) {
  516. j = s->intra_scantable.permutated[i];
  517. level = block[j];
  518. next_coef:
  519. /* encode using VLC */
  520. if (level != 0) {
  521. run = i - last_non_zero - 1;
  522. alevel = level;
  523. MASK_ABS(sign, alevel);
  524. sign &= 1;
  525. if (alevel <= mpeg1_max_level[0][run]) {
  526. code = mpeg1_index_run[0][run] + alevel - 1;
  527. /* store the VLC & sign at once */
  528. put_bits(&s->pb, table_vlc[code][1] + 1,
  529. (table_vlc[code][0] << 1) + sign);
  530. } else {
  531. /* escape seems to be pretty rare <5% so I do not optimize it */
  532. put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]);
  533. /* escape: only clip in this case */
  534. put_bits(&s->pb, 6, run);
  535. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  536. if (alevel < 128) {
  537. put_sbits(&s->pb, 8, level);
  538. } else {
  539. if (level < 0)
  540. put_bits(&s->pb, 16, 0x8001 + level + 255);
  541. else
  542. put_sbits(&s->pb, 16, level);
  543. }
  544. } else {
  545. put_sbits(&s->pb, 12, level);
  546. }
  547. }
  548. last_non_zero = i;
  549. }
  550. }
  551. /* end of block */
  552. put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
  553. }
  554. static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
  555. int16_t block[6][64],
  556. int motion_x, int motion_y,
  557. int mb_block_count)
  558. {
  559. int i, cbp;
  560. const int mb_x = s->mb_x;
  561. const int mb_y = s->mb_y;
  562. const int first_mb = mb_x == s->resync_mb_x && mb_y == s->resync_mb_y;
  563. /* compute cbp */
  564. cbp = 0;
  565. for (i = 0; i < mb_block_count; i++)
  566. if (s->block_last_index[i] >= 0)
  567. cbp |= 1 << (mb_block_count - 1 - i);
  568. if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
  569. (mb_x != s->mb_width - 1 ||
  570. (mb_y != s->end_mb_y - 1 && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)) &&
  571. ((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
  572. (s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir &&
  573. (((s->mv_dir & MV_DIR_FORWARD)
  574. ? ((s->mv[0][0][0] - s->last_mv[0][0][0]) |
  575. (s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
  576. ((s->mv_dir & MV_DIR_BACKWARD)
  577. ? ((s->mv[1][0][0] - s->last_mv[1][0][0]) |
  578. (s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
  579. s->mb_skip_run++;
  580. s->qscale -= s->dquant;
  581. s->skip_count++;
  582. s->misc_bits++;
  583. s->last_bits++;
  584. if (s->pict_type == AV_PICTURE_TYPE_P) {
  585. s->last_mv[0][0][0] =
  586. s->last_mv[0][0][1] =
  587. s->last_mv[0][1][0] =
  588. s->last_mv[0][1][1] = 0;
  589. }
  590. } else {
  591. if (first_mb) {
  592. av_assert0(s->mb_skip_run == 0);
  593. encode_mb_skip_run(s, s->mb_x);
  594. } else {
  595. encode_mb_skip_run(s, s->mb_skip_run);
  596. }
  597. if (s->pict_type == AV_PICTURE_TYPE_I) {
  598. if (s->dquant && cbp) {
  599. /* macroblock_type: macroblock_quant = 1 */
  600. put_mb_modes(s, 2, 1, 0, 0);
  601. put_qscale(s);
  602. } else {
  603. /* macroblock_type: macroblock_quant = 0 */
  604. put_mb_modes(s, 1, 1, 0, 0);
  605. s->qscale -= s->dquant;
  606. }
  607. s->misc_bits += get_bits_diff(s);
  608. s->i_count++;
  609. } else if (s->mb_intra) {
  610. if (s->dquant && cbp) {
  611. put_mb_modes(s, 6, 0x01, 0, 0);
  612. put_qscale(s);
  613. } else {
  614. put_mb_modes(s, 5, 0x03, 0, 0);
  615. s->qscale -= s->dquant;
  616. }
  617. s->misc_bits += get_bits_diff(s);
  618. s->i_count++;
  619. memset(s->last_mv, 0, sizeof(s->last_mv));
  620. } else if (s->pict_type == AV_PICTURE_TYPE_P) {
  621. if (s->mv_type == MV_TYPE_16X16) {
  622. if (cbp != 0) {
  623. if ((motion_x | motion_y) == 0) {
  624. if (s->dquant) {
  625. /* macroblock_pattern & quant */
  626. put_mb_modes(s, 5, 1, 0, 0);
  627. put_qscale(s);
  628. } else {
  629. /* macroblock_pattern only */
  630. put_mb_modes(s, 2, 1, 0, 0);
  631. }
  632. s->misc_bits += get_bits_diff(s);
  633. } else {
  634. if (s->dquant) {
  635. put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
  636. put_qscale(s);
  637. } else {
  638. put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
  639. }
  640. s->misc_bits += get_bits_diff(s);
  641. // RAL: f_code parameter added
  642. mpeg1_encode_motion(s,
  643. motion_x - s->last_mv[0][0][0],
  644. s->f_code);
  645. // RAL: f_code parameter added
  646. mpeg1_encode_motion(s,
  647. motion_y - s->last_mv[0][0][1],
  648. s->f_code);
  649. s->mv_bits += get_bits_diff(s);
  650. }
  651. } else {
  652. put_bits(&s->pb, 3, 1); /* motion only */
  653. if (!s->frame_pred_frame_dct)
  654. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  655. s->misc_bits += get_bits_diff(s);
  656. // RAL: f_code parameter added
  657. mpeg1_encode_motion(s,
  658. motion_x - s->last_mv[0][0][0],
  659. s->f_code);
  660. // RAL: f_code parameter added
  661. mpeg1_encode_motion(s,
  662. motion_y - s->last_mv[0][0][1],
  663. s->f_code);
  664. s->qscale -= s->dquant;
  665. s->mv_bits += get_bits_diff(s);
  666. }
  667. s->last_mv[0][1][0] = s->last_mv[0][0][0] = motion_x;
  668. s->last_mv[0][1][1] = s->last_mv[0][0][1] = motion_y;
  669. } else {
  670. av_assert2(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
  671. if (cbp) {
  672. if (s->dquant) {
  673. put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
  674. put_qscale(s);
  675. } else {
  676. put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
  677. }
  678. } else {
  679. put_bits(&s->pb, 3, 1); /* motion only */
  680. put_bits(&s->pb, 2, 1); /* motion_type: field */
  681. s->qscale -= s->dquant;
  682. }
  683. s->misc_bits += get_bits_diff(s);
  684. for (i = 0; i < 2; i++) {
  685. put_bits(&s->pb, 1, s->field_select[0][i]);
  686. mpeg1_encode_motion(s,
  687. s->mv[0][i][0] - s->last_mv[0][i][0],
  688. s->f_code);
  689. mpeg1_encode_motion(s,
  690. s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1),
  691. s->f_code);
  692. s->last_mv[0][i][0] = s->mv[0][i][0];
  693. s->last_mv[0][i][1] = 2 * s->mv[0][i][1];
  694. }
  695. s->mv_bits += get_bits_diff(s);
  696. }
  697. if (cbp) {
  698. if (s->chroma_y_shift) {
  699. put_bits(&s->pb,
  700. ff_mpeg12_mbPatTable[cbp][1],
  701. ff_mpeg12_mbPatTable[cbp][0]);
  702. } else {
  703. put_bits(&s->pb,
  704. ff_mpeg12_mbPatTable[cbp >> 2][1],
  705. ff_mpeg12_mbPatTable[cbp >> 2][0]);
  706. put_sbits(&s->pb, 2, cbp);
  707. }
  708. }
  709. s->f_count++;
  710. } else {
  711. if (s->mv_type == MV_TYPE_16X16) {
  712. if (cbp) { // With coded bloc pattern
  713. if (s->dquant) {
  714. if (s->mv_dir == MV_DIR_FORWARD)
  715. put_mb_modes(s, 6, 3, 1, 0);
  716. else
  717. put_mb_modes(s, 8 - s->mv_dir, 2, 1, 0);
  718. put_qscale(s);
  719. } else {
  720. put_mb_modes(s, 5 - s->mv_dir, 3, 1, 0);
  721. }
  722. } else { // No coded bloc pattern
  723. put_bits(&s->pb, 5 - s->mv_dir, 2);
  724. if (!s->frame_pred_frame_dct)
  725. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  726. s->qscale -= s->dquant;
  727. }
  728. s->misc_bits += get_bits_diff(s);
  729. if (s->mv_dir & MV_DIR_FORWARD) {
  730. mpeg1_encode_motion(s,
  731. s->mv[0][0][0] - s->last_mv[0][0][0],
  732. s->f_code);
  733. mpeg1_encode_motion(s,
  734. s->mv[0][0][1] - s->last_mv[0][0][1],
  735. s->f_code);
  736. s->last_mv[0][0][0] =
  737. s->last_mv[0][1][0] = s->mv[0][0][0];
  738. s->last_mv[0][0][1] =
  739. s->last_mv[0][1][1] = s->mv[0][0][1];
  740. s->f_count++;
  741. }
  742. if (s->mv_dir & MV_DIR_BACKWARD) {
  743. mpeg1_encode_motion(s,
  744. s->mv[1][0][0] - s->last_mv[1][0][0],
  745. s->b_code);
  746. mpeg1_encode_motion(s,
  747. s->mv[1][0][1] - s->last_mv[1][0][1],
  748. s->b_code);
  749. s->last_mv[1][0][0] =
  750. s->last_mv[1][1][0] = s->mv[1][0][0];
  751. s->last_mv[1][0][1] =
  752. s->last_mv[1][1][1] = s->mv[1][0][1];
  753. s->b_count++;
  754. }
  755. } else {
  756. av_assert2(s->mv_type == MV_TYPE_FIELD);
  757. av_assert2(!s->frame_pred_frame_dct);
  758. if (cbp) { // With coded bloc pattern
  759. if (s->dquant) {
  760. if (s->mv_dir == MV_DIR_FORWARD)
  761. put_mb_modes(s, 6, 3, 1, 1);
  762. else
  763. put_mb_modes(s, 8 - s->mv_dir, 2, 1, 1);
  764. put_qscale(s);
  765. } else {
  766. put_mb_modes(s, 5 - s->mv_dir, 3, 1, 1);
  767. }
  768. } else { // No coded bloc pattern
  769. put_bits(&s->pb, 5 - s->mv_dir, 2);
  770. put_bits(&s->pb, 2, 1); /* motion_type: field */
  771. s->qscale -= s->dquant;
  772. }
  773. s->misc_bits += get_bits_diff(s);
  774. if (s->mv_dir & MV_DIR_FORWARD) {
  775. for (i = 0; i < 2; i++) {
  776. put_bits(&s->pb, 1, s->field_select[0][i]);
  777. mpeg1_encode_motion(s,
  778. s->mv[0][i][0] - s->last_mv[0][i][0],
  779. s->f_code);
  780. mpeg1_encode_motion(s,
  781. s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1),
  782. s->f_code);
  783. s->last_mv[0][i][0] = s->mv[0][i][0];
  784. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  785. }
  786. s->f_count++;
  787. }
  788. if (s->mv_dir & MV_DIR_BACKWARD) {
  789. for (i = 0; i < 2; i++) {
  790. put_bits(&s->pb, 1, s->field_select[1][i]);
  791. mpeg1_encode_motion(s,
  792. s->mv[1][i][0] - s->last_mv[1][i][0],
  793. s->b_code);
  794. mpeg1_encode_motion(s,
  795. s->mv[1][i][1] - (s->last_mv[1][i][1] >> 1),
  796. s->b_code);
  797. s->last_mv[1][i][0] = s->mv[1][i][0];
  798. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  799. }
  800. s->b_count++;
  801. }
  802. }
  803. s->mv_bits += get_bits_diff(s);
  804. if (cbp) {
  805. if (s->chroma_y_shift) {
  806. put_bits(&s->pb,
  807. ff_mpeg12_mbPatTable[cbp][1],
  808. ff_mpeg12_mbPatTable[cbp][0]);
  809. } else {
  810. put_bits(&s->pb,
  811. ff_mpeg12_mbPatTable[cbp >> 2][1],
  812. ff_mpeg12_mbPatTable[cbp >> 2][0]);
  813. put_sbits(&s->pb, 2, cbp);
  814. }
  815. }
  816. }
  817. for (i = 0; i < mb_block_count; i++)
  818. if (cbp & (1 << (mb_block_count - 1 - i)))
  819. mpeg1_encode_block(s, block[i], i);
  820. s->mb_skip_run = 0;
  821. if (s->mb_intra)
  822. s->i_tex_bits += get_bits_diff(s);
  823. else
  824. s->p_tex_bits += get_bits_diff(s);
  825. }
  826. }
  827. void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[6][64],
  828. int motion_x, int motion_y)
  829. {
  830. if (s->chroma_format == CHROMA_420)
  831. mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6);
  832. else
  833. mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8);
  834. }
  835. av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
  836. {
  837. static int done = 0;
  838. ff_mpeg12_common_init(s);
  839. if (!done) {
  840. int f_code;
  841. int mv;
  842. int i;
  843. done = 1;
  844. ff_init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
  845. ff_init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
  846. for (i = 0; i < 64; i++) {
  847. mpeg1_max_level[0][i] = ff_rl_mpeg1.max_level[0][i];
  848. mpeg1_index_run[0][i] = ff_rl_mpeg1.index_run[0][i];
  849. }
  850. init_uni_ac_vlc(&ff_rl_mpeg1, uni_mpeg1_ac_vlc_len);
  851. if (s->intra_vlc_format)
  852. init_uni_ac_vlc(&ff_rl_mpeg2, uni_mpeg2_ac_vlc_len);
  853. /* build unified dc encoding tables */
  854. for (i = -255; i < 256; i++) {
  855. int adiff, index;
  856. int bits, code;
  857. int diff = i;
  858. adiff = FFABS(diff);
  859. if (diff < 0)
  860. diff--;
  861. index = av_log2(2 * adiff);
  862. bits = ff_mpeg12_vlc_dc_lum_bits[index] + index;
  863. code = (ff_mpeg12_vlc_dc_lum_code[index] << index) +
  864. (diff & ((1 << index) - 1));
  865. mpeg1_lum_dc_uni[i + 255] = bits + (code << 8);
  866. bits = ff_mpeg12_vlc_dc_chroma_bits[index] + index;
  867. code = (ff_mpeg12_vlc_dc_chroma_code[index] << index) +
  868. (diff & ((1 << index) - 1));
  869. mpeg1_chr_dc_uni[i + 255] = bits + (code << 8);
  870. }
  871. for (f_code = 1; f_code <= MAX_FCODE; f_code++)
  872. for (mv = -MAX_MV; mv <= MAX_MV; mv++) {
  873. int len;
  874. if (mv == 0) {
  875. len = ff_mpeg12_mbMotionVectorTable[0][1];
  876. } else {
  877. int val, bit_size, code;
  878. bit_size = f_code - 1;
  879. val = mv;
  880. if (val < 0)
  881. val = -val;
  882. val--;
  883. code = (val >> bit_size) + 1;
  884. if (code < 17)
  885. len = ff_mpeg12_mbMotionVectorTable[code][1] +
  886. 1 + bit_size;
  887. else
  888. len = ff_mpeg12_mbMotionVectorTable[16][1] +
  889. 2 + bit_size;
  890. }
  891. mv_penalty[f_code][mv + MAX_MV] = len;
  892. }
  893. for (f_code = MAX_FCODE; f_code > 0; f_code--)
  894. for (mv = -(8 << f_code); mv < (8 << f_code); mv++)
  895. fcode_tab[mv + MAX_MV] = f_code;
  896. }
  897. s->me.mv_penalty = mv_penalty;
  898. s->fcode_tab = fcode_tab;
  899. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  900. s->min_qcoeff = -255;
  901. s->max_qcoeff = 255;
  902. } else {
  903. s->min_qcoeff = -2047;
  904. s->max_qcoeff = 2047;
  905. }
  906. if (s->intra_vlc_format) {
  907. s->intra_ac_vlc_length =
  908. s->intra_ac_vlc_last_length = uni_mpeg2_ac_vlc_len;
  909. } else {
  910. s->intra_ac_vlc_length =
  911. s->intra_ac_vlc_last_length = uni_mpeg1_ac_vlc_len;
  912. }
  913. s->inter_ac_vlc_length =
  914. s->inter_ac_vlc_last_length = uni_mpeg1_ac_vlc_len;
  915. }
  916. #define OFFSET(x) offsetof(MpegEncContext, x)
  917. #define VE AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
  918. #define COMMON_OPTS \
  919. { "gop_timecode", "MPEG GOP Timecode in hh:mm:ss[:;.]ff format", \
  920. OFFSET(tc_opt_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, VE },\
  921. { "intra_vlc", "Use MPEG-2 intra VLC table.", \
  922. OFFSET(intra_vlc_format), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE }, \
  923. { "drop_frame_timecode", "Timecode is in drop frame format.", \
  924. OFFSET(drop_frame_timecode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE }, \
  925. { "scan_offset", "Reserve space for SVCD scan offset user data.", \
  926. OFFSET(scan_offset), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  927. static const AVOption mpeg1_options[] = {
  928. COMMON_OPTS
  929. FF_MPV_COMMON_OPTS
  930. { NULL },
  931. };
  932. static const AVOption mpeg2_options[] = {
  933. COMMON_OPTS
  934. { "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  935. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  936. FF_MPV_COMMON_OPTS
  937. { NULL },
  938. };
  939. #define mpeg12_class(x) \
  940. static const AVClass mpeg ## x ## _class = { \
  941. .class_name = "mpeg" # x "video encoder", \
  942. .item_name = av_default_item_name, \
  943. .option = mpeg ## x ## _options, \
  944. .version = LIBAVUTIL_VERSION_INT, \
  945. };
  946. mpeg12_class(1)
  947. mpeg12_class(2)
  948. AVCodec ff_mpeg1video_encoder = {
  949. .name = "mpeg1video",
  950. .type = AVMEDIA_TYPE_VIDEO,
  951. .id = AV_CODEC_ID_MPEG1VIDEO,
  952. .priv_data_size = sizeof(MpegEncContext),
  953. .init = encode_init,
  954. .encode2 = ff_MPV_encode_picture,
  955. .close = ff_MPV_encode_end,
  956. .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
  957. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
  958. AV_PIX_FMT_NONE },
  959. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  960. .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
  961. .priv_class = &mpeg1_class,
  962. };
  963. AVCodec ff_mpeg2video_encoder = {
  964. .name = "mpeg2video",
  965. .type = AVMEDIA_TYPE_VIDEO,
  966. .id = AV_CODEC_ID_MPEG2VIDEO,
  967. .priv_data_size = sizeof(MpegEncContext),
  968. .init = encode_init,
  969. .encode2 = ff_MPV_encode_picture,
  970. .close = ff_MPV_encode_end,
  971. .supported_framerates = ff_mpeg2_frame_rate_tab,
  972. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
  973. AV_PIX_FMT_YUV422P,
  974. AV_PIX_FMT_NONE },
  975. .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
  976. .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
  977. .priv_class = &mpeg2_class,
  978. };