You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1229 lines
48KB

  1. /*
  2. * MPEG-1/2 encoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * MPEG-1/2 encoder
  25. */
  26. #include <stdint.h>
  27. #include "libavutil/attributes.h"
  28. #include "libavutil/avassert.h"
  29. #include "libavutil/log.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/thread.h"
  32. #include "libavutil/timecode.h"
  33. #include "libavutil/stereo3d.h"
  34. #include "avcodec.h"
  35. #include "bytestream.h"
  36. #include "mathops.h"
  37. #include "mpeg12.h"
  38. #include "mpeg12data.h"
  39. #include "mpegutils.h"
  40. #include "mpegvideo.h"
  41. #include "profiles.h"
  42. static const uint8_t svcd_scan_offset_placeholder[] = {
  43. 0x10, 0x0E, 0x00, 0x80, 0x81, 0x00, 0x80,
  44. 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  45. };
  46. static uint8_t mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
  47. static uint8_t fcode_tab[MAX_MV * 2 + 1];
  48. static uint8_t uni_mpeg1_ac_vlc_len[64 * 64 * 2];
  49. static uint8_t uni_mpeg2_ac_vlc_len[64 * 64 * 2];
  50. /* simple include everything table for dc, first byte is bits
  51. * number next 3 are code */
  52. static uint32_t mpeg1_lum_dc_uni[512];
  53. static uint32_t mpeg1_chr_dc_uni[512];
  54. static uint8_t mpeg1_index_run[2][64];
  55. static int8_t mpeg1_max_level[2][64];
  56. #define A53_MAX_CC_COUNT 0x1f
  57. static av_cold void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len)
  58. {
  59. int i;
  60. for (i = 0; i < 128; i++) {
  61. int level = i - 64;
  62. int run;
  63. if (!level)
  64. continue;
  65. for (run = 0; run < 64; run++) {
  66. int len, code;
  67. int alevel = FFABS(level);
  68. if (alevel > rl->max_level[0][run])
  69. code = 111; /* rl->n */
  70. else
  71. code = rl->index_run[0][run] + alevel - 1;
  72. if (code < 111) { /* rl->n */
  73. /* length of VLC and sign */
  74. len = rl->table_vlc[code][1] + 1;
  75. } else {
  76. len = rl->table_vlc[111 /* rl->n */][1] + 6;
  77. if (alevel < 128)
  78. len += 8;
  79. else
  80. len += 16;
  81. }
  82. uni_ac_vlc_len[UNI_AC_ENC_INDEX(run, i)] = len;
  83. }
  84. }
  85. }
  86. static int find_frame_rate_index(MpegEncContext *s)
  87. {
  88. int i;
  89. AVRational bestq = (AVRational) {0, 0};
  90. AVRational ext;
  91. AVRational target = av_inv_q(s->avctx->time_base);
  92. for (i = 1; i < 14; i++) {
  93. if (s->avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL &&
  94. i >= 9)
  95. break;
  96. for (ext.num=1; ext.num <= 4; ext.num++) {
  97. for (ext.den=1; ext.den <= 32; ext.den++) {
  98. AVRational q = av_mul_q(ext, ff_mpeg12_frame_rate_tab[i]);
  99. if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO && (ext.den!=1 || ext.num!=1))
  100. continue;
  101. if (av_gcd(ext.den, ext.num) != 1)
  102. continue;
  103. if ( bestq.num==0
  104. || av_nearer_q(target, bestq, q) < 0
  105. || ext.num==1 && ext.den==1 && av_nearer_q(target, bestq, q) == 0) {
  106. bestq = q;
  107. s->frame_rate_index = i;
  108. s->mpeg2_frame_rate_ext.num = ext.num;
  109. s->mpeg2_frame_rate_ext.den = ext.den;
  110. }
  111. }
  112. }
  113. }
  114. if (av_cmp_q(target, bestq))
  115. return -1;
  116. else
  117. return 0;
  118. }
  119. static av_cold int encode_init(AVCodecContext *avctx)
  120. {
  121. int ret;
  122. MpegEncContext *s = avctx->priv_data;
  123. if ((ret = ff_mpv_encode_init(avctx)) < 0)
  124. return ret;
  125. if (find_frame_rate_index(s) < 0) {
  126. if (s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  127. av_log(avctx, AV_LOG_ERROR, "MPEG-1/2 does not support %d/%d fps\n",
  128. avctx->time_base.den, avctx->time_base.num);
  129. return AVERROR(EINVAL);
  130. } else {
  131. av_log(avctx, AV_LOG_INFO,
  132. "MPEG-1/2 does not support %d/%d fps, there may be AV sync issues\n",
  133. avctx->time_base.den, avctx->time_base.num);
  134. }
  135. }
  136. if (avctx->profile == FF_PROFILE_UNKNOWN) {
  137. if (avctx->level != FF_LEVEL_UNKNOWN) {
  138. av_log(avctx, AV_LOG_ERROR, "Set profile and level\n");
  139. return AVERROR(EINVAL);
  140. }
  141. /* Main or 4:2:2 */
  142. avctx->profile = s->chroma_format == CHROMA_420 ? FF_PROFILE_MPEG2_MAIN : FF_PROFILE_MPEG2_422;
  143. }
  144. if (avctx->level == FF_LEVEL_UNKNOWN) {
  145. if (avctx->profile == FF_PROFILE_MPEG2_422) { /* 4:2:2 */
  146. if (avctx->width <= 720 && avctx->height <= 608)
  147. avctx->level = 5; /* Main */
  148. else
  149. avctx->level = 2; /* High */
  150. } else {
  151. if (avctx->profile != FF_PROFILE_MPEG2_HIGH && s->chroma_format != CHROMA_420) {
  152. av_log(avctx, AV_LOG_ERROR,
  153. "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n");
  154. return AVERROR(EINVAL);
  155. }
  156. if (avctx->width <= 720 && avctx->height <= 576)
  157. avctx->level = 8; /* Main */
  158. else if (avctx->width <= 1440)
  159. avctx->level = 6; /* High 1440 */
  160. else
  161. avctx->level = 4; /* High */
  162. }
  163. }
  164. if ((avctx->width & 0xFFF) == 0 && (avctx->height & 0xFFF) == 1) {
  165. av_log(avctx, AV_LOG_ERROR, "Width / Height is invalid for MPEG2\n");
  166. return AVERROR(EINVAL);
  167. }
  168. if (s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  169. if ((avctx->width & 0xFFF) == 0 || (avctx->height & 0xFFF) == 0) {
  170. av_log(avctx, AV_LOG_ERROR, "Width or Height are not allowed to be multiples of 4096\n"
  171. "add '-strict %d' if you want to use them anyway.\n", FF_COMPLIANCE_UNOFFICIAL);
  172. return AVERROR(EINVAL);
  173. }
  174. }
  175. s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & AV_CODEC_FLAG2_DROP_FRAME_TIMECODE);
  176. if (s->drop_frame_timecode)
  177. s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME;
  178. if (s->drop_frame_timecode && s->frame_rate_index != 4) {
  179. av_log(avctx, AV_LOG_ERROR,
  180. "Drop frame time code only allowed with 1001/30000 fps\n");
  181. return AVERROR(EINVAL);
  182. }
  183. #if FF_API_PRIVATE_OPT
  184. FF_DISABLE_DEPRECATION_WARNINGS
  185. if (avctx->timecode_frame_start)
  186. s->timecode_frame_start = avctx->timecode_frame_start;
  187. FF_ENABLE_DEPRECATION_WARNINGS
  188. #endif
  189. if (s->tc_opt_str) {
  190. AVRational rate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
  191. int ret = av_timecode_init_from_string(&s->tc, rate, s->tc_opt_str, s);
  192. if (ret < 0)
  193. return ret;
  194. s->drop_frame_timecode = !!(s->tc.flags & AV_TIMECODE_FLAG_DROPFRAME);
  195. s->timecode_frame_start = s->tc.start;
  196. } else {
  197. s->timecode_frame_start = 0; // default is -1
  198. }
  199. return 0;
  200. }
  201. static void put_header(MpegEncContext *s, int header)
  202. {
  203. align_put_bits(&s->pb);
  204. put_bits(&s->pb, 16, header >> 16);
  205. put_sbits(&s->pb, 16, header);
  206. }
  207. /* put sequence header if needed */
  208. static void mpeg1_encode_sequence_header(MpegEncContext *s)
  209. {
  210. unsigned int vbv_buffer_size, fps, v;
  211. int i, constraint_parameter_flag;
  212. uint64_t time_code;
  213. int64_t best_aspect_error = INT64_MAX;
  214. AVRational aspect_ratio = s->avctx->sample_aspect_ratio;
  215. if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
  216. aspect_ratio = (AVRational){1,1}; // pixel aspect 1.1 (VGA)
  217. if (s->current_picture.f->key_frame) {
  218. AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];
  219. /* MPEG-1 header repeated every GOP */
  220. put_header(s, SEQ_START_CODE);
  221. put_sbits(&s->pb, 12, s->width & 0xFFF);
  222. put_sbits(&s->pb, 12, s->height & 0xFFF);
  223. for (i = 1; i < 15; i++) {
  224. int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den;
  225. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)
  226. error -= (1LL<<32) / ff_mpeg1_aspect[i];
  227. else
  228. error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->height / s->width / ff_mpeg2_aspect[i].den;
  229. error = FFABS(error);
  230. if (error - 2 <= best_aspect_error) {
  231. best_aspect_error = error;
  232. s->aspect_ratio_info = i;
  233. }
  234. }
  235. put_bits(&s->pb, 4, s->aspect_ratio_info);
  236. put_bits(&s->pb, 4, s->frame_rate_index);
  237. if (s->avctx->rc_max_rate) {
  238. v = (s->avctx->rc_max_rate + 399) / 400;
  239. if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  240. v = 0x3ffff;
  241. } else {
  242. v = 0x3FFFF;
  243. }
  244. if (s->avctx->rc_buffer_size)
  245. vbv_buffer_size = s->avctx->rc_buffer_size;
  246. else
  247. /* VBV calculation: Scaled so that a VCD has the proper
  248. * VBV size of 40 kilobytes */
  249. vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
  250. vbv_buffer_size = (vbv_buffer_size + 16383) / 16384;
  251. put_sbits(&s->pb, 18, v);
  252. put_bits(&s->pb, 1, 1); // marker
  253. put_sbits(&s->pb, 10, vbv_buffer_size);
  254. constraint_parameter_flag =
  255. s->width <= 768 &&
  256. s->height <= 576 &&
  257. s->mb_width * s->mb_height <= 396 &&
  258. s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den &&
  259. framerate.num <= framerate.den * 30 &&
  260. s->avctx->me_range &&
  261. s->avctx->me_range < 128 &&
  262. vbv_buffer_size <= 20 &&
  263. v <= 1856000 / 400 &&
  264. s->codec_id == AV_CODEC_ID_MPEG1VIDEO;
  265. put_bits(&s->pb, 1, constraint_parameter_flag);
  266. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  267. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  268. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  269. AVFrameSideData *side_data;
  270. int width = s->width;
  271. int height = s->height;
  272. int use_seq_disp_ext;
  273. put_header(s, EXT_START_CODE);
  274. put_bits(&s->pb, 4, 1); // seq ext
  275. put_bits(&s->pb, 1, s->avctx->profile == FF_PROFILE_MPEG2_422); // escx 1 for 4:2:2 profile
  276. put_bits(&s->pb, 3, s->avctx->profile); // profile
  277. put_bits(&s->pb, 4, s->avctx->level); // level
  278. put_bits(&s->pb, 1, s->progressive_sequence);
  279. put_bits(&s->pb, 2, s->chroma_format);
  280. put_bits(&s->pb, 2, s->width >> 12);
  281. put_bits(&s->pb, 2, s->height >> 12);
  282. put_bits(&s->pb, 12, v >> 18); // bitrate ext
  283. put_bits(&s->pb, 1, 1); // marker
  284. put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext
  285. put_bits(&s->pb, 1, s->low_delay);
  286. put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n
  287. put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d
  288. side_data = av_frame_get_side_data(s->current_picture_ptr->f, AV_FRAME_DATA_PANSCAN);
  289. if (side_data) {
  290. AVPanScan *pan_scan = (AVPanScan *)side_data->data;
  291. if (pan_scan->width && pan_scan->height) {
  292. width = pan_scan->width >> 4;
  293. height = pan_scan->height >> 4;
  294. }
  295. }
  296. use_seq_disp_ext = (width != s->width ||
  297. height != s->height ||
  298. s->avctx->color_primaries != AVCOL_PRI_UNSPECIFIED ||
  299. s->avctx->color_trc != AVCOL_TRC_UNSPECIFIED ||
  300. s->avctx->colorspace != AVCOL_SPC_UNSPECIFIED ||
  301. s->video_format != VIDEO_FORMAT_UNSPECIFIED);
  302. if (s->seq_disp_ext == 1 || (s->seq_disp_ext == -1 && use_seq_disp_ext)) {
  303. put_header(s, EXT_START_CODE);
  304. put_bits(&s->pb, 4, 2); // sequence display extension
  305. put_bits(&s->pb, 3, s->video_format); // video_format
  306. put_bits(&s->pb, 1, 1); // colour_description
  307. put_bits(&s->pb, 8, s->avctx->color_primaries); // colour_primaries
  308. put_bits(&s->pb, 8, s->avctx->color_trc); // transfer_characteristics
  309. put_bits(&s->pb, 8, s->avctx->colorspace); // matrix_coefficients
  310. put_bits(&s->pb, 14, width); // display_horizontal_size
  311. put_bits(&s->pb, 1, 1); // marker_bit
  312. put_bits(&s->pb, 14, height); // display_vertical_size
  313. put_bits(&s->pb, 3, 0); // remaining 3 bits are zero padding
  314. }
  315. }
  316. put_header(s, GOP_START_CODE);
  317. put_bits(&s->pb, 1, s->drop_frame_timecode); // drop frame flag
  318. /* time code: we must convert from the real frame rate to a
  319. * fake MPEG frame rate in case of low frame rate */
  320. fps = (framerate.num + framerate.den / 2) / framerate.den;
  321. time_code = s->current_picture_ptr->f->coded_picture_number +
  322. s->timecode_frame_start;
  323. s->gop_picture_number = s->current_picture_ptr->f->coded_picture_number;
  324. av_assert0(s->drop_frame_timecode == !!(s->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
  325. if (s->drop_frame_timecode)
  326. time_code = av_timecode_adjust_ntsc_framenum2(time_code, fps);
  327. put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
  328. put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
  329. put_bits(&s->pb, 1, 1);
  330. put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
  331. put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
  332. put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) || s->intra_only || !s->gop_picture_number);
  333. put_bits(&s->pb, 1, 0); // broken link
  334. }
  335. }
  336. static inline void encode_mb_skip_run(MpegEncContext *s, int run)
  337. {
  338. while (run >= 33) {
  339. put_bits(&s->pb, 11, 0x008);
  340. run -= 33;
  341. }
  342. put_bits(&s->pb, ff_mpeg12_mbAddrIncrTable[run][1],
  343. ff_mpeg12_mbAddrIncrTable[run][0]);
  344. }
  345. static av_always_inline void put_qscale(MpegEncContext *s)
  346. {
  347. put_bits(&s->pb, 5, s->qscale);
  348. }
  349. void ff_mpeg1_encode_slice_header(MpegEncContext *s)
  350. {
  351. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->height > 2800) {
  352. put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127));
  353. /* slice_vertical_position_extension */
  354. put_bits(&s->pb, 3, s->mb_y >> 7);
  355. } else {
  356. put_header(s, SLICE_MIN_START_CODE + s->mb_y);
  357. }
  358. put_qscale(s);
  359. /* slice extra information */
  360. put_bits(&s->pb, 1, 0);
  361. }
  362. void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
  363. {
  364. AVFrameSideData *side_data;
  365. mpeg1_encode_sequence_header(s);
  366. /* MPEG-1 picture header */
  367. put_header(s, PICTURE_START_CODE);
  368. /* temporal reference */
  369. // RAL: s->picture_number instead of s->fake_picture_number
  370. put_bits(&s->pb, 10,
  371. (s->picture_number - s->gop_picture_number) & 0x3ff);
  372. put_bits(&s->pb, 3, s->pict_type);
  373. s->vbv_delay_ptr = s->pb.buf + put_bits_count(&s->pb) / 8;
  374. put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
  375. // RAL: Forward f_code also needed for B-frames
  376. if (s->pict_type == AV_PICTURE_TYPE_P ||
  377. s->pict_type == AV_PICTURE_TYPE_B) {
  378. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  379. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  380. put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
  381. else
  382. put_bits(&s->pb, 3, 7); /* forward_f_code */
  383. }
  384. // RAL: Backward f_code necessary for B-frames
  385. if (s->pict_type == AV_PICTURE_TYPE_B) {
  386. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  387. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
  388. put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
  389. else
  390. put_bits(&s->pb, 3, 7); /* backward_f_code */
  391. }
  392. put_bits(&s->pb, 1, 0); /* extra bit picture */
  393. s->frame_pred_frame_dct = 1;
  394. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  395. put_header(s, EXT_START_CODE);
  396. put_bits(&s->pb, 4, 8); /* pic ext */
  397. if (s->pict_type == AV_PICTURE_TYPE_P ||
  398. s->pict_type == AV_PICTURE_TYPE_B) {
  399. put_bits(&s->pb, 4, s->f_code);
  400. put_bits(&s->pb, 4, s->f_code);
  401. } else {
  402. put_bits(&s->pb, 8, 255);
  403. }
  404. if (s->pict_type == AV_PICTURE_TYPE_B) {
  405. put_bits(&s->pb, 4, s->b_code);
  406. put_bits(&s->pb, 4, s->b_code);
  407. } else {
  408. put_bits(&s->pb, 8, 255);
  409. }
  410. put_bits(&s->pb, 2, s->intra_dc_precision);
  411. av_assert0(s->picture_structure == PICT_FRAME);
  412. put_bits(&s->pb, 2, s->picture_structure);
  413. if (s->progressive_sequence)
  414. put_bits(&s->pb, 1, 0); /* no repeat */
  415. else
  416. put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first);
  417. /* XXX: optimize the generation of this flag with entropy measures */
  418. s->frame_pred_frame_dct = s->progressive_sequence;
  419. put_bits(&s->pb, 1, s->frame_pred_frame_dct);
  420. put_bits(&s->pb, 1, s->concealment_motion_vectors);
  421. put_bits(&s->pb, 1, s->q_scale_type);
  422. put_bits(&s->pb, 1, s->intra_vlc_format);
  423. put_bits(&s->pb, 1, s->alternate_scan);
  424. put_bits(&s->pb, 1, s->repeat_first_field);
  425. s->progressive_frame = s->progressive_sequence;
  426. /* chroma_420_type */
  427. put_bits(&s->pb, 1, s->chroma_format ==
  428. CHROMA_420 ? s->progressive_frame : 0);
  429. put_bits(&s->pb, 1, s->progressive_frame);
  430. put_bits(&s->pb, 1, 0); /* composite_display_flag */
  431. }
  432. if (s->scan_offset) {
  433. int i;
  434. put_header(s, USER_START_CODE);
  435. for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++)
  436. put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
  437. }
  438. side_data = av_frame_get_side_data(s->current_picture_ptr->f,
  439. AV_FRAME_DATA_STEREO3D);
  440. if (side_data) {
  441. AVStereo3D *stereo = (AVStereo3D *)side_data->data;
  442. uint8_t fpa_type;
  443. switch (stereo->type) {
  444. case AV_STEREO3D_SIDEBYSIDE:
  445. fpa_type = 0x03;
  446. break;
  447. case AV_STEREO3D_TOPBOTTOM:
  448. fpa_type = 0x04;
  449. break;
  450. case AV_STEREO3D_2D:
  451. fpa_type = 0x08;
  452. break;
  453. case AV_STEREO3D_SIDEBYSIDE_QUINCUNX:
  454. fpa_type = 0x23;
  455. break;
  456. default:
  457. fpa_type = 0;
  458. break;
  459. }
  460. if (fpa_type != 0) {
  461. put_header(s, USER_START_CODE);
  462. put_bits(&s->pb, 8, 'J'); // S3D_video_format_signaling_identifier
  463. put_bits(&s->pb, 8, 'P');
  464. put_bits(&s->pb, 8, '3');
  465. put_bits(&s->pb, 8, 'D');
  466. put_bits(&s->pb, 8, 0x03); // S3D_video_format_length
  467. put_bits(&s->pb, 1, 1); // reserved_bit
  468. put_bits(&s->pb, 7, fpa_type); // S3D_video_format_type
  469. put_bits(&s->pb, 8, 0x04); // reserved_data[0]
  470. put_bits(&s->pb, 8, 0xFF); // reserved_data[1]
  471. }
  472. }
  473. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->a53_cc) {
  474. side_data = av_frame_get_side_data(s->current_picture_ptr->f,
  475. AV_FRAME_DATA_A53_CC);
  476. if (side_data) {
  477. if (side_data->size <= A53_MAX_CC_COUNT * 3 && side_data->size % 3 == 0) {
  478. int i = 0;
  479. put_header (s, USER_START_CODE);
  480. put_bits(&s->pb, 8, 'G'); // user_identifier
  481. put_bits(&s->pb, 8, 'A');
  482. put_bits(&s->pb, 8, '9');
  483. put_bits(&s->pb, 8, '4');
  484. put_bits(&s->pb, 8, 3); // user_data_type_code
  485. put_bits(&s->pb, 8,
  486. (side_data->size / 3 & A53_MAX_CC_COUNT) | 0x40); // flags, cc_count
  487. put_bits(&s->pb, 8, 0xff); // em_data
  488. for (i = 0; i < side_data->size; i++)
  489. put_bits(&s->pb, 8, side_data->data[i]);
  490. put_bits(&s->pb, 8, 0xff); // marker_bits
  491. } else {
  492. av_log(s->avctx, AV_LOG_WARNING,
  493. "Warning Closed Caption size (%d) can not exceed 93 bytes "
  494. "and must be a multiple of 3\n", side_data->size);
  495. }
  496. }
  497. }
  498. s->mb_y = 0;
  499. ff_mpeg1_encode_slice_header(s);
  500. }
  501. static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
  502. int has_mv, int field_motion)
  503. {
  504. put_bits(&s->pb, n, bits);
  505. if (!s->frame_pred_frame_dct) {
  506. if (has_mv)
  507. /* motion_type: frame/field */
  508. put_bits(&s->pb, 2, 2 - field_motion);
  509. put_bits(&s->pb, 1, s->interlaced_dct);
  510. }
  511. }
  512. // RAL: Parameter added: f_or_b_code
  513. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
  514. {
  515. if (val == 0) {
  516. /* zero vector */
  517. put_bits(&s->pb,
  518. ff_mpeg12_mbMotionVectorTable[0][1],
  519. ff_mpeg12_mbMotionVectorTable[0][0]);
  520. } else {
  521. int code, sign, bits;
  522. int bit_size = f_or_b_code - 1;
  523. int range = 1 << bit_size;
  524. /* modulo encoding */
  525. val = sign_extend(val, 5 + bit_size);
  526. if (val >= 0) {
  527. val--;
  528. code = (val >> bit_size) + 1;
  529. bits = val & (range - 1);
  530. sign = 0;
  531. } else {
  532. val = -val;
  533. val--;
  534. code = (val >> bit_size) + 1;
  535. bits = val & (range - 1);
  536. sign = 1;
  537. }
  538. av_assert2(code > 0 && code <= 16);
  539. put_bits(&s->pb,
  540. ff_mpeg12_mbMotionVectorTable[code][1],
  541. ff_mpeg12_mbMotionVectorTable[code][0]);
  542. put_bits(&s->pb, 1, sign);
  543. if (bit_size > 0)
  544. put_bits(&s->pb, bit_size, bits);
  545. }
  546. }
  547. static inline void encode_dc(MpegEncContext *s, int diff, int component)
  548. {
  549. unsigned int diff_u = diff + 255;
  550. if (diff_u >= 511) {
  551. int index;
  552. if (diff < 0) {
  553. index = av_log2_16bit(-2 * diff);
  554. diff--;
  555. } else {
  556. index = av_log2_16bit(2 * diff);
  557. }
  558. if (component == 0)
  559. put_bits(&s->pb,
  560. ff_mpeg12_vlc_dc_lum_bits[index] + index,
  561. (ff_mpeg12_vlc_dc_lum_code[index] << index) +
  562. av_mod_uintp2(diff, index));
  563. else
  564. put_bits(&s->pb,
  565. ff_mpeg12_vlc_dc_chroma_bits[index] + index,
  566. (ff_mpeg12_vlc_dc_chroma_code[index] << index) +
  567. av_mod_uintp2(diff, index));
  568. } else {
  569. if (component == 0)
  570. put_bits(&s->pb,
  571. mpeg1_lum_dc_uni[diff + 255] & 0xFF,
  572. mpeg1_lum_dc_uni[diff + 255] >> 8);
  573. else
  574. put_bits(&s->pb,
  575. mpeg1_chr_dc_uni[diff + 255] & 0xFF,
  576. mpeg1_chr_dc_uni[diff + 255] >> 8);
  577. }
  578. }
  579. static void mpeg1_encode_block(MpegEncContext *s, int16_t *block, int n)
  580. {
  581. int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
  582. int code, component;
  583. const uint16_t (*table_vlc)[2] = ff_rl_mpeg1.table_vlc;
  584. last_index = s->block_last_index[n];
  585. /* DC coef */
  586. if (s->mb_intra) {
  587. component = (n <= 3 ? 0 : (n & 1) + 1);
  588. dc = block[0]; /* overflow is impossible */
  589. diff = dc - s->last_dc[component];
  590. encode_dc(s, diff, component);
  591. s->last_dc[component] = dc;
  592. i = 1;
  593. if (s->intra_vlc_format)
  594. table_vlc = ff_rl_mpeg2.table_vlc;
  595. } else {
  596. /* encode the first coefficient: needs to be done here because
  597. * it is handled slightly differently */
  598. level = block[0];
  599. if (abs(level) == 1) {
  600. code = ((uint32_t)level >> 31); /* the sign bit */
  601. put_bits(&s->pb, 2, code | 0x02);
  602. i = 1;
  603. } else {
  604. i = 0;
  605. last_non_zero = -1;
  606. goto next_coef;
  607. }
  608. }
  609. /* now quantify & encode AC coefs */
  610. last_non_zero = i - 1;
  611. for (; i <= last_index; i++) {
  612. j = s->intra_scantable.permutated[i];
  613. level = block[j];
  614. next_coef:
  615. /* encode using VLC */
  616. if (level != 0) {
  617. run = i - last_non_zero - 1;
  618. alevel = level;
  619. MASK_ABS(sign, alevel);
  620. sign &= 1;
  621. if (alevel <= mpeg1_max_level[0][run]) {
  622. code = mpeg1_index_run[0][run] + alevel - 1;
  623. /* store the VLC & sign at once */
  624. put_bits(&s->pb, table_vlc[code][1] + 1,
  625. (table_vlc[code][0] << 1) + sign);
  626. } else {
  627. /* escape seems to be pretty rare <5% so I do not optimize it */
  628. put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]);
  629. /* escape: only clip in this case */
  630. put_bits(&s->pb, 6, run);
  631. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  632. if (alevel < 128) {
  633. put_sbits(&s->pb, 8, level);
  634. } else {
  635. if (level < 0)
  636. put_bits(&s->pb, 16, 0x8001 + level + 255);
  637. else
  638. put_sbits(&s->pb, 16, level);
  639. }
  640. } else {
  641. put_sbits(&s->pb, 12, level);
  642. }
  643. }
  644. last_non_zero = i;
  645. }
  646. }
  647. /* end of block */
  648. put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
  649. }
  650. static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
  651. int16_t block[8][64],
  652. int motion_x, int motion_y,
  653. int mb_block_count)
  654. {
  655. int i, cbp;
  656. const int mb_x = s->mb_x;
  657. const int mb_y = s->mb_y;
  658. const int first_mb = mb_x == s->resync_mb_x && mb_y == s->resync_mb_y;
  659. /* compute cbp */
  660. cbp = 0;
  661. for (i = 0; i < mb_block_count; i++)
  662. if (s->block_last_index[i] >= 0)
  663. cbp |= 1 << (mb_block_count - 1 - i);
  664. if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
  665. (mb_x != s->mb_width - 1 ||
  666. (mb_y != s->end_mb_y - 1 && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)) &&
  667. ((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
  668. (s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir &&
  669. (((s->mv_dir & MV_DIR_FORWARD)
  670. ? ((s->mv[0][0][0] - s->last_mv[0][0][0]) |
  671. (s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
  672. ((s->mv_dir & MV_DIR_BACKWARD)
  673. ? ((s->mv[1][0][0] - s->last_mv[1][0][0]) |
  674. (s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
  675. s->mb_skip_run++;
  676. s->qscale -= s->dquant;
  677. s->skip_count++;
  678. s->misc_bits++;
  679. s->last_bits++;
  680. if (s->pict_type == AV_PICTURE_TYPE_P) {
  681. s->last_mv[0][0][0] =
  682. s->last_mv[0][0][1] =
  683. s->last_mv[0][1][0] =
  684. s->last_mv[0][1][1] = 0;
  685. }
  686. } else {
  687. if (first_mb) {
  688. av_assert0(s->mb_skip_run == 0);
  689. encode_mb_skip_run(s, s->mb_x);
  690. } else {
  691. encode_mb_skip_run(s, s->mb_skip_run);
  692. }
  693. if (s->pict_type == AV_PICTURE_TYPE_I) {
  694. if (s->dquant && cbp) {
  695. /* macroblock_type: macroblock_quant = 1 */
  696. put_mb_modes(s, 2, 1, 0, 0);
  697. put_qscale(s);
  698. } else {
  699. /* macroblock_type: macroblock_quant = 0 */
  700. put_mb_modes(s, 1, 1, 0, 0);
  701. s->qscale -= s->dquant;
  702. }
  703. s->misc_bits += get_bits_diff(s);
  704. s->i_count++;
  705. } else if (s->mb_intra) {
  706. if (s->dquant && cbp) {
  707. put_mb_modes(s, 6, 0x01, 0, 0);
  708. put_qscale(s);
  709. } else {
  710. put_mb_modes(s, 5, 0x03, 0, 0);
  711. s->qscale -= s->dquant;
  712. }
  713. s->misc_bits += get_bits_diff(s);
  714. s->i_count++;
  715. memset(s->last_mv, 0, sizeof(s->last_mv));
  716. } else if (s->pict_type == AV_PICTURE_TYPE_P) {
  717. if (s->mv_type == MV_TYPE_16X16) {
  718. if (cbp != 0) {
  719. if ((motion_x | motion_y) == 0) {
  720. if (s->dquant) {
  721. /* macroblock_pattern & quant */
  722. put_mb_modes(s, 5, 1, 0, 0);
  723. put_qscale(s);
  724. } else {
  725. /* macroblock_pattern only */
  726. put_mb_modes(s, 2, 1, 0, 0);
  727. }
  728. s->misc_bits += get_bits_diff(s);
  729. } else {
  730. if (s->dquant) {
  731. put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
  732. put_qscale(s);
  733. } else {
  734. put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
  735. }
  736. s->misc_bits += get_bits_diff(s);
  737. // RAL: f_code parameter added
  738. mpeg1_encode_motion(s,
  739. motion_x - s->last_mv[0][0][0],
  740. s->f_code);
  741. // RAL: f_code parameter added
  742. mpeg1_encode_motion(s,
  743. motion_y - s->last_mv[0][0][1],
  744. s->f_code);
  745. s->mv_bits += get_bits_diff(s);
  746. }
  747. } else {
  748. put_bits(&s->pb, 3, 1); /* motion only */
  749. if (!s->frame_pred_frame_dct)
  750. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  751. s->misc_bits += get_bits_diff(s);
  752. // RAL: f_code parameter added
  753. mpeg1_encode_motion(s,
  754. motion_x - s->last_mv[0][0][0],
  755. s->f_code);
  756. // RAL: f_code parameter added
  757. mpeg1_encode_motion(s,
  758. motion_y - s->last_mv[0][0][1],
  759. s->f_code);
  760. s->qscale -= s->dquant;
  761. s->mv_bits += get_bits_diff(s);
  762. }
  763. s->last_mv[0][1][0] = s->last_mv[0][0][0] = motion_x;
  764. s->last_mv[0][1][1] = s->last_mv[0][0][1] = motion_y;
  765. } else {
  766. av_assert2(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
  767. if (cbp) {
  768. if (s->dquant) {
  769. put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
  770. put_qscale(s);
  771. } else {
  772. put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
  773. }
  774. } else {
  775. put_bits(&s->pb, 3, 1); /* motion only */
  776. put_bits(&s->pb, 2, 1); /* motion_type: field */
  777. s->qscale -= s->dquant;
  778. }
  779. s->misc_bits += get_bits_diff(s);
  780. for (i = 0; i < 2; i++) {
  781. put_bits(&s->pb, 1, s->field_select[0][i]);
  782. mpeg1_encode_motion(s,
  783. s->mv[0][i][0] - s->last_mv[0][i][0],
  784. s->f_code);
  785. mpeg1_encode_motion(s,
  786. s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1),
  787. s->f_code);
  788. s->last_mv[0][i][0] = s->mv[0][i][0];
  789. s->last_mv[0][i][1] = 2 * s->mv[0][i][1];
  790. }
  791. s->mv_bits += get_bits_diff(s);
  792. }
  793. if (cbp) {
  794. if (s->chroma_y_shift) {
  795. put_bits(&s->pb,
  796. ff_mpeg12_mbPatTable[cbp][1],
  797. ff_mpeg12_mbPatTable[cbp][0]);
  798. } else {
  799. put_bits(&s->pb,
  800. ff_mpeg12_mbPatTable[cbp >> 2][1],
  801. ff_mpeg12_mbPatTable[cbp >> 2][0]);
  802. put_sbits(&s->pb, 2, cbp);
  803. }
  804. }
  805. s->f_count++;
  806. } else {
  807. if (s->mv_type == MV_TYPE_16X16) {
  808. if (cbp) { // With coded bloc pattern
  809. if (s->dquant) {
  810. if (s->mv_dir == MV_DIR_FORWARD)
  811. put_mb_modes(s, 6, 3, 1, 0);
  812. else
  813. put_mb_modes(s, 8 - s->mv_dir, 2, 1, 0);
  814. put_qscale(s);
  815. } else {
  816. put_mb_modes(s, 5 - s->mv_dir, 3, 1, 0);
  817. }
  818. } else { // No coded bloc pattern
  819. put_bits(&s->pb, 5 - s->mv_dir, 2);
  820. if (!s->frame_pred_frame_dct)
  821. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  822. s->qscale -= s->dquant;
  823. }
  824. s->misc_bits += get_bits_diff(s);
  825. if (s->mv_dir & MV_DIR_FORWARD) {
  826. mpeg1_encode_motion(s,
  827. s->mv[0][0][0] - s->last_mv[0][0][0],
  828. s->f_code);
  829. mpeg1_encode_motion(s,
  830. s->mv[0][0][1] - s->last_mv[0][0][1],
  831. s->f_code);
  832. s->last_mv[0][0][0] =
  833. s->last_mv[0][1][0] = s->mv[0][0][0];
  834. s->last_mv[0][0][1] =
  835. s->last_mv[0][1][1] = s->mv[0][0][1];
  836. s->f_count++;
  837. }
  838. if (s->mv_dir & MV_DIR_BACKWARD) {
  839. mpeg1_encode_motion(s,
  840. s->mv[1][0][0] - s->last_mv[1][0][0],
  841. s->b_code);
  842. mpeg1_encode_motion(s,
  843. s->mv[1][0][1] - s->last_mv[1][0][1],
  844. s->b_code);
  845. s->last_mv[1][0][0] =
  846. s->last_mv[1][1][0] = s->mv[1][0][0];
  847. s->last_mv[1][0][1] =
  848. s->last_mv[1][1][1] = s->mv[1][0][1];
  849. s->b_count++;
  850. }
  851. } else {
  852. av_assert2(s->mv_type == MV_TYPE_FIELD);
  853. av_assert2(!s->frame_pred_frame_dct);
  854. if (cbp) { // With coded bloc pattern
  855. if (s->dquant) {
  856. if (s->mv_dir == MV_DIR_FORWARD)
  857. put_mb_modes(s, 6, 3, 1, 1);
  858. else
  859. put_mb_modes(s, 8 - s->mv_dir, 2, 1, 1);
  860. put_qscale(s);
  861. } else {
  862. put_mb_modes(s, 5 - s->mv_dir, 3, 1, 1);
  863. }
  864. } else { // No coded bloc pattern
  865. put_bits(&s->pb, 5 - s->mv_dir, 2);
  866. put_bits(&s->pb, 2, 1); /* motion_type: field */
  867. s->qscale -= s->dquant;
  868. }
  869. s->misc_bits += get_bits_diff(s);
  870. if (s->mv_dir & MV_DIR_FORWARD) {
  871. for (i = 0; i < 2; i++) {
  872. put_bits(&s->pb, 1, s->field_select[0][i]);
  873. mpeg1_encode_motion(s,
  874. s->mv[0][i][0] - s->last_mv[0][i][0],
  875. s->f_code);
  876. mpeg1_encode_motion(s,
  877. s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1),
  878. s->f_code);
  879. s->last_mv[0][i][0] = s->mv[0][i][0];
  880. s->last_mv[0][i][1] = s->mv[0][i][1] * 2;
  881. }
  882. s->f_count++;
  883. }
  884. if (s->mv_dir & MV_DIR_BACKWARD) {
  885. for (i = 0; i < 2; i++) {
  886. put_bits(&s->pb, 1, s->field_select[1][i]);
  887. mpeg1_encode_motion(s,
  888. s->mv[1][i][0] - s->last_mv[1][i][0],
  889. s->b_code);
  890. mpeg1_encode_motion(s,
  891. s->mv[1][i][1] - (s->last_mv[1][i][1] >> 1),
  892. s->b_code);
  893. s->last_mv[1][i][0] = s->mv[1][i][0];
  894. s->last_mv[1][i][1] = s->mv[1][i][1] * 2;
  895. }
  896. s->b_count++;
  897. }
  898. }
  899. s->mv_bits += get_bits_diff(s);
  900. if (cbp) {
  901. if (s->chroma_y_shift) {
  902. put_bits(&s->pb,
  903. ff_mpeg12_mbPatTable[cbp][1],
  904. ff_mpeg12_mbPatTable[cbp][0]);
  905. } else {
  906. put_bits(&s->pb,
  907. ff_mpeg12_mbPatTable[cbp >> 2][1],
  908. ff_mpeg12_mbPatTable[cbp >> 2][0]);
  909. put_sbits(&s->pb, 2, cbp);
  910. }
  911. }
  912. }
  913. for (i = 0; i < mb_block_count; i++)
  914. if (cbp & (1 << (mb_block_count - 1 - i)))
  915. mpeg1_encode_block(s, block[i], i);
  916. s->mb_skip_run = 0;
  917. if (s->mb_intra)
  918. s->i_tex_bits += get_bits_diff(s);
  919. else
  920. s->p_tex_bits += get_bits_diff(s);
  921. }
  922. }
  923. void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64],
  924. int motion_x, int motion_y)
  925. {
  926. if (s->chroma_format == CHROMA_420)
  927. mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6);
  928. else
  929. mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8);
  930. }
  931. static av_cold void mpeg12_encode_init_static(void)
  932. {
  933. int f_code;
  934. int mv;
  935. int i;
  936. ff_rl_init(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
  937. ff_rl_init(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
  938. for (i = 0; i < 64; i++) {
  939. mpeg1_max_level[0][i] = ff_rl_mpeg1.max_level[0][i];
  940. mpeg1_index_run[0][i] = ff_rl_mpeg1.index_run[0][i];
  941. }
  942. init_uni_ac_vlc(&ff_rl_mpeg1, uni_mpeg1_ac_vlc_len);
  943. init_uni_ac_vlc(&ff_rl_mpeg2, uni_mpeg2_ac_vlc_len);
  944. /* build unified dc encoding tables */
  945. for (i = -255; i < 256; i++) {
  946. int adiff, index;
  947. int bits, code;
  948. int diff = i;
  949. adiff = FFABS(diff);
  950. if (diff < 0)
  951. diff--;
  952. index = av_log2(2 * adiff);
  953. bits = ff_mpeg12_vlc_dc_lum_bits[index] + index;
  954. code = (ff_mpeg12_vlc_dc_lum_code[index] << index) +
  955. av_mod_uintp2(diff, index);
  956. mpeg1_lum_dc_uni[i + 255] = bits + (code << 8);
  957. bits = ff_mpeg12_vlc_dc_chroma_bits[index] + index;
  958. code = (ff_mpeg12_vlc_dc_chroma_code[index] << index) +
  959. av_mod_uintp2(diff, index);
  960. mpeg1_chr_dc_uni[i + 255] = bits + (code << 8);
  961. }
  962. for (f_code = 1; f_code <= MAX_FCODE; f_code++)
  963. for (mv = -MAX_DMV; mv <= MAX_DMV; mv++) {
  964. int len;
  965. if (mv == 0) {
  966. len = ff_mpeg12_mbMotionVectorTable[0][1];
  967. } else {
  968. int val, bit_size, code;
  969. bit_size = f_code - 1;
  970. val = mv;
  971. if (val < 0)
  972. val = -val;
  973. val--;
  974. code = (val >> bit_size) + 1;
  975. if (code < 17)
  976. len = ff_mpeg12_mbMotionVectorTable[code][1] +
  977. 1 + bit_size;
  978. else
  979. len = ff_mpeg12_mbMotionVectorTable[16][1] +
  980. 2 + bit_size;
  981. }
  982. mv_penalty[f_code][mv + MAX_DMV] = len;
  983. }
  984. for (f_code = MAX_FCODE; f_code > 0; f_code--)
  985. for (mv = -(8 << f_code); mv < (8 << f_code); mv++)
  986. fcode_tab[mv + MAX_MV] = f_code;
  987. }
  988. av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
  989. {
  990. static AVOnce init_static_once = AV_ONCE_INIT;
  991. ff_mpeg12_common_init(s);
  992. s->me.mv_penalty = mv_penalty;
  993. s->fcode_tab = fcode_tab;
  994. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  995. s->min_qcoeff = -255;
  996. s->max_qcoeff = 255;
  997. } else {
  998. s->min_qcoeff = -2047;
  999. s->max_qcoeff = 2047;
  1000. }
  1001. if (s->intra_vlc_format) {
  1002. s->intra_ac_vlc_length =
  1003. s->intra_ac_vlc_last_length = uni_mpeg2_ac_vlc_len;
  1004. } else {
  1005. s->intra_ac_vlc_length =
  1006. s->intra_ac_vlc_last_length = uni_mpeg1_ac_vlc_len;
  1007. }
  1008. s->inter_ac_vlc_length =
  1009. s->inter_ac_vlc_last_length = uni_mpeg1_ac_vlc_len;
  1010. ff_thread_once(&init_static_once, mpeg12_encode_init_static);
  1011. }
  1012. #define OFFSET(x) offsetof(MpegEncContext, x)
  1013. #define VE AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
  1014. #define COMMON_OPTS \
  1015. { "gop_timecode", "MPEG GOP Timecode in hh:mm:ss[:;.]ff format. Overrides timecode_frame_start.", \
  1016. OFFSET(tc_opt_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE },\
  1017. { "intra_vlc", "Use MPEG-2 intra VLC table.", \
  1018. OFFSET(intra_vlc_format), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  1019. { "drop_frame_timecode", "Timecode is in drop frame format.", \
  1020. OFFSET(drop_frame_timecode), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  1021. { "scan_offset", "Reserve space for SVCD scan offset user data.", \
  1022. OFFSET(scan_offset), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  1023. { "timecode_frame_start", "GOP timecode frame start number, in non-drop-frame format", \
  1024. OFFSET(timecode_frame_start), AV_OPT_TYPE_INT64, {.i64 = -1 }, -1, INT64_MAX, VE}, \
  1025. static const AVOption mpeg1_options[] = {
  1026. COMMON_OPTS
  1027. FF_MPV_COMMON_OPTS
  1028. { NULL },
  1029. };
  1030. static const AVOption mpeg2_options[] = {
  1031. COMMON_OPTS
  1032. { "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1033. { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1034. { "seq_disp_ext", "Write sequence_display_extension blocks.", OFFSET(seq_disp_ext), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, "seq_disp_ext" },
  1035. { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, "seq_disp_ext" },
  1036. { "never", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, VE, "seq_disp_ext" },
  1037. { "always", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, VE, "seq_disp_ext" },
  1038. { "video_format", "Video_format in the sequence_display_extension indicating the source of the video.", OFFSET(video_format), AV_OPT_TYPE_INT, { .i64 = VIDEO_FORMAT_UNSPECIFIED }, 0, 7, VE, "video_format" },
  1039. { "component", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_COMPONENT }, 0, 0, VE, "video_format" },
  1040. { "pal", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_PAL }, 0, 0, VE, "video_format" },
  1041. { "ntsc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_NTSC }, 0, 0, VE, "video_format" },
  1042. { "secam", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_SECAM }, 0, 0, VE, "video_format" },
  1043. { "mac", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_MAC }, 0, 0, VE, "video_format" },
  1044. { "unspecified", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_FORMAT_UNSPECIFIED}, 0, 0, VE, "video_format" },
  1045. #define LEVEL(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, { .i64 = value }, 0, 0, VE, "avctx.level"
  1046. { LEVEL("high", 4) },
  1047. { LEVEL("high1440", 6) },
  1048. { LEVEL("main", 8) },
  1049. { LEVEL("low", 10) },
  1050. #undef LEVEL
  1051. FF_MPV_COMMON_OPTS
  1052. FF_MPEG2_PROFILE_OPTS
  1053. { NULL },
  1054. };
  1055. #define mpeg12_class(x) \
  1056. static const AVClass mpeg ## x ## _class = { \
  1057. .class_name = "mpeg" # x "video encoder", \
  1058. .item_name = av_default_item_name, \
  1059. .option = mpeg ## x ## _options, \
  1060. .version = LIBAVUTIL_VERSION_INT, \
  1061. };
  1062. mpeg12_class(1)
  1063. mpeg12_class(2)
  1064. AVCodec ff_mpeg1video_encoder = {
  1065. .name = "mpeg1video",
  1066. .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
  1067. .type = AVMEDIA_TYPE_VIDEO,
  1068. .id = AV_CODEC_ID_MPEG1VIDEO,
  1069. .priv_data_size = sizeof(MpegEncContext),
  1070. .init = encode_init,
  1071. .encode2 = ff_mpv_encode_picture,
  1072. .close = ff_mpv_encode_end,
  1073. .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
  1074. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
  1075. AV_PIX_FMT_NONE },
  1076. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
  1077. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  1078. .priv_class = &mpeg1_class,
  1079. };
  1080. AVCodec ff_mpeg2video_encoder = {
  1081. .name = "mpeg2video",
  1082. .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
  1083. .type = AVMEDIA_TYPE_VIDEO,
  1084. .id = AV_CODEC_ID_MPEG2VIDEO,
  1085. .priv_data_size = sizeof(MpegEncContext),
  1086. .init = encode_init,
  1087. .encode2 = ff_mpv_encode_picture,
  1088. .close = ff_mpv_encode_end,
  1089. .supported_framerates = ff_mpeg2_frame_rate_tab,
  1090. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
  1091. AV_PIX_FMT_YUV422P,
  1092. AV_PIX_FMT_NONE },
  1093. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
  1094. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  1095. .priv_class = &mpeg2_class,
  1096. };