You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3359 lines
114KB

  1. /*
  2. * MPEG1 codec / MPEG2 decoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard.
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file mpeg12.c
  24. * MPEG1/2 codec
  25. */
  26. //#define DEBUG
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "mpeg12data.h"
  31. #include "bytestream.h"
  32. //#undef NDEBUG
  33. //#include <assert.h>
  34. #define DC_VLC_BITS 9
  35. #define MV_VLC_BITS 9
  36. #define MBINCR_VLC_BITS 9
  37. #define MB_PAT_VLC_BITS 9
  38. #define MB_PTYPE_VLC_BITS 6
  39. #define MB_BTYPE_VLC_BITS 6
  40. #define TEX_VLC_BITS 9
  41. #ifdef CONFIG_ENCODERS
  42. static void mpeg1_encode_block(MpegEncContext *s,
  43. DCTELEM *block,
  44. int component);
  45. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added
  46. #endif //CONFIG_ENCODERS
  47. static inline int mpeg1_decode_block_inter(MpegEncContext *s,
  48. DCTELEM *block,
  49. int n);
  50. static inline int mpeg1_decode_block_intra(MpegEncContext *s,
  51. DCTELEM *block,
  52. int n);
  53. static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n);
  54. static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
  55. DCTELEM *block,
  56. int n);
  57. static inline int mpeg2_decode_block_intra(MpegEncContext *s,
  58. DCTELEM *block,
  59. int n);
  60. static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n);
  61. static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n);
  62. static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred);
  63. static void exchange_uv(MpegEncContext *s);
  64. extern int XVMC_field_start(MpegEncContext *s, AVCodecContext *avctx);
  65. extern int XVMC_field_end(MpegEncContext *s);
  66. extern void XVMC_pack_pblocks(MpegEncContext *s,int cbp);
  67. extern void XVMC_init_block(MpegEncContext *s);//set s->block
  68. static const enum PixelFormat pixfmt_yuv_420[]= {PIX_FMT_YUV420P,-1};
  69. static const enum PixelFormat pixfmt_yuv_422[]= {PIX_FMT_YUV422P,-1};
  70. static const enum PixelFormat pixfmt_yuv_444[]= {PIX_FMT_YUV444P,-1};
  71. static const enum PixelFormat pixfmt_xvmc_mpg2_420[] = {
  72. PIX_FMT_XVMC_MPEG2_IDCT,
  73. PIX_FMT_XVMC_MPEG2_MC,
  74. -1};
  75. #ifdef CONFIG_ENCODERS
  76. static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
  77. static uint8_t fcode_tab[MAX_MV*2+1];
  78. static uint8_t uni_mpeg1_ac_vlc_len [64*64*2];
  79. static uint8_t uni_mpeg2_ac_vlc_len [64*64*2];
  80. /* simple include everything table for dc, first byte is bits number next 3 are code*/
  81. static uint32_t mpeg1_lum_dc_uni[512];
  82. static uint32_t mpeg1_chr_dc_uni[512];
  83. static uint8_t mpeg1_index_run[2][64];
  84. static int8_t mpeg1_max_level[2][64];
  85. #endif //CONFIG_ENCODERS
  86. static uint8_t static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
  87. static void init_2d_vlc_rl(RLTable *rl, int use_static)
  88. {
  89. int i;
  90. init_vlc(&rl->vlc, TEX_VLC_BITS, rl->n + 2,
  91. &rl->table_vlc[0][1], 4, 2,
  92. &rl->table_vlc[0][0], 4, 2, use_static);
  93. if(use_static)
  94. rl->rl_vlc[0]= av_mallocz_static(rl->vlc.table_size*sizeof(RL_VLC_ELEM));
  95. else
  96. rl->rl_vlc[0]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM));
  97. for(i=0; i<rl->vlc.table_size; i++){
  98. int code= rl->vlc.table[i][0];
  99. int len = rl->vlc.table[i][1];
  100. int level, run;
  101. if(len==0){ // illegal code
  102. run= 65;
  103. level= MAX_LEVEL;
  104. }else if(len<0){ //more bits needed
  105. run= 0;
  106. level= code;
  107. }else{
  108. if(code==rl->n){ //esc
  109. run= 65;
  110. level= 0;
  111. }else if(code==rl->n+1){ //eob
  112. run= 0;
  113. level= 127;
  114. }else{
  115. run= rl->table_run [code] + 1;
  116. level= rl->table_level[code];
  117. }
  118. }
  119. rl->rl_vlc[0][i].len= len;
  120. rl->rl_vlc[0][i].level= level;
  121. rl->rl_vlc[0][i].run= run;
  122. }
  123. }
  124. #ifdef CONFIG_ENCODERS
  125. static void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len){
  126. int i;
  127. for(i=0; i<128; i++){
  128. int level= i-64;
  129. int run;
  130. for(run=0; run<64; run++){
  131. int len, bits, code;
  132. int alevel= FFABS(level);
  133. int sign= (level>>31)&1;
  134. if (alevel > rl->max_level[0][run])
  135. code= 111; /*rl->n*/
  136. else
  137. code= rl->index_run[0][run] + alevel - 1;
  138. if (code < 111 /* rl->n */) {
  139. /* store the vlc & sign at once */
  140. len= rl->table_vlc[code][1]+1;
  141. bits= (rl->table_vlc[code][0]<<1) + sign;
  142. } else {
  143. len= rl->table_vlc[111/*rl->n*/][1]+6;
  144. bits= rl->table_vlc[111/*rl->n*/][0]<<6;
  145. bits|= run;
  146. if (alevel < 128) {
  147. bits<<=8; len+=8;
  148. bits|= level & 0xff;
  149. } else {
  150. bits<<=16; len+=16;
  151. bits|= level & 0xff;
  152. if (level < 0) {
  153. bits|= 0x8001 + level + 255;
  154. } else {
  155. bits|= level & 0xffff;
  156. }
  157. }
  158. }
  159. uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len;
  160. }
  161. }
  162. }
  163. static int find_frame_rate_index(MpegEncContext *s){
  164. int i;
  165. int64_t dmin= INT64_MAX;
  166. int64_t d;
  167. for(i=1;i<14;i++) {
  168. int64_t n0= 1001LL/ff_frame_rate_tab[i].den*ff_frame_rate_tab[i].num*s->avctx->time_base.num;
  169. int64_t n1= 1001LL*s->avctx->time_base.den;
  170. if(s->avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL && i>=9) break;
  171. d = FFABS(n0 - n1);
  172. if(d < dmin){
  173. dmin=d;
  174. s->frame_rate_index= i;
  175. }
  176. }
  177. if(dmin)
  178. return -1;
  179. else
  180. return 0;
  181. }
  182. static int encode_init(AVCodecContext *avctx)
  183. {
  184. MpegEncContext *s = avctx->priv_data;
  185. if(MPV_encode_init(avctx) < 0)
  186. return -1;
  187. if(find_frame_rate_index(s) < 0){
  188. if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){
  189. av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num);
  190. return -1;
  191. }else{
  192. av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num);
  193. }
  194. }
  195. if(avctx->profile == FF_PROFILE_UNKNOWN){
  196. if(avctx->level != FF_LEVEL_UNKNOWN){
  197. av_log(avctx, AV_LOG_ERROR, "Set profile and level\n");
  198. return -1;
  199. }
  200. avctx->profile = s->chroma_format == CHROMA_420 ? 4 : 0; /* Main or 4:2:2 */
  201. }
  202. if(avctx->level == FF_LEVEL_UNKNOWN){
  203. if(avctx->profile == 0){ /* 4:2:2 */
  204. if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */
  205. else avctx->level = 2; /* High */
  206. }else{
  207. if(avctx->profile != 1 && s->chroma_format != CHROMA_420){
  208. av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n");
  209. return -1;
  210. }
  211. if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */
  212. else if(avctx->width <= 1440) avctx->level = 6; /* High 1440 */
  213. else avctx->level = 4; /* High */
  214. }
  215. }
  216. if((avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) && s->frame_rate_index != 4){
  217. av_log(avctx, AV_LOG_ERROR, "Drop frame time code only allowed with 1001/30000 fps\n");
  218. return -1;
  219. }
  220. return 0;
  221. }
  222. static void put_header(MpegEncContext *s, int header)
  223. {
  224. align_put_bits(&s->pb);
  225. put_bits(&s->pb, 16, header>>16);
  226. put_bits(&s->pb, 16, header&0xFFFF);
  227. }
  228. /* put sequence header if needed */
  229. static void mpeg1_encode_sequence_header(MpegEncContext *s)
  230. {
  231. unsigned int vbv_buffer_size;
  232. unsigned int fps, v;
  233. int i;
  234. uint64_t time_code;
  235. float best_aspect_error= 1E10;
  236. float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);
  237. int constraint_parameter_flag;
  238. if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
  239. if (s->current_picture.key_frame) {
  240. AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
  241. /* mpeg1 header repeated every gop */
  242. put_header(s, SEQ_START_CODE);
  243. put_bits(&s->pb, 12, s->width);
  244. put_bits(&s->pb, 12, s->height);
  245. for(i=1; i<15; i++){
  246. float error= aspect_ratio;
  247. if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1)
  248. error-= 1.0/mpeg1_aspect[i];
  249. else
  250. error-= av_q2d(mpeg2_aspect[i])*s->height/s->width;
  251. error= FFABS(error);
  252. if(error < best_aspect_error){
  253. best_aspect_error= error;
  254. s->aspect_ratio_info= i;
  255. }
  256. }
  257. put_bits(&s->pb, 4, s->aspect_ratio_info);
  258. put_bits(&s->pb, 4, s->frame_rate_index);
  259. if(s->avctx->rc_max_rate){
  260. v = (s->avctx->rc_max_rate + 399) / 400;
  261. if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO)
  262. v = 0x3ffff;
  263. }else{
  264. v= 0x3FFFF;
  265. }
  266. if(s->avctx->rc_buffer_size)
  267. vbv_buffer_size = s->avctx->rc_buffer_size;
  268. else
  269. /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */
  270. vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
  271. vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;
  272. put_bits(&s->pb, 18, v & 0x3FFFF);
  273. put_bits(&s->pb, 1, 1); /* marker */
  274. put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF);
  275. constraint_parameter_flag=
  276. s->width <= 768 && s->height <= 576 &&
  277. s->mb_width * s->mb_height <= 396 &&
  278. s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&
  279. framerate.num <= framerate.den*30 &&
  280. s->avctx->me_range && s->avctx->me_range < 128 &&
  281. vbv_buffer_size <= 20 &&
  282. v <= 1856000/400 &&
  283. s->codec_id == CODEC_ID_MPEG1VIDEO;
  284. put_bits(&s->pb, 1, constraint_parameter_flag);
  285. ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
  286. ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);
  287. if(s->codec_id == CODEC_ID_MPEG2VIDEO){
  288. put_header(s, EXT_START_CODE);
  289. put_bits(&s->pb, 4, 1); //seq ext
  290. put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */
  291. put_bits(&s->pb, 3, s->avctx->profile); //profile
  292. put_bits(&s->pb, 4, s->avctx->level); //level
  293. put_bits(&s->pb, 1, s->progressive_sequence);
  294. put_bits(&s->pb, 2, s->chroma_format);
  295. put_bits(&s->pb, 2, 0); //horizontal size ext
  296. put_bits(&s->pb, 2, 0); //vertical size ext
  297. put_bits(&s->pb, 12, v>>18); //bitrate ext
  298. put_bits(&s->pb, 1, 1); //marker
  299. put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext
  300. put_bits(&s->pb, 1, s->low_delay);
  301. put_bits(&s->pb, 2, 0); // frame_rate_ext_n
  302. put_bits(&s->pb, 5, 0); // frame_rate_ext_d
  303. }
  304. put_header(s, GOP_START_CODE);
  305. put_bits(&s->pb, 1, !!(s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE)); /* drop frame flag */
  306. /* time code : we must convert from the real frame rate to a
  307. fake mpeg frame rate in case of low frame rate */
  308. fps = (framerate.num + framerate.den/2)/ framerate.den;
  309. time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
  310. s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
  311. if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
  312. /* only works for NTSC 29.97 */
  313. int d = time_code / 17982;
  314. int m = time_code % 17982;
  315. //if (m < 2) m += 2; /* not needed since -2,-1 / 1798 in C returns 0 */
  316. time_code += 18 * d + 2 * ((m - 2) / 1798);
  317. }
  318. put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
  319. put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
  320. put_bits(&s->pb, 1, 1);
  321. put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
  322. put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
  323. put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP));
  324. put_bits(&s->pb, 1, 0); /* broken link */
  325. }
  326. }
  327. static inline void encode_mb_skip_run(MpegEncContext *s, int run){
  328. while (run >= 33) {
  329. put_bits(&s->pb, 11, 0x008);
  330. run -= 33;
  331. }
  332. put_bits(&s->pb, mbAddrIncrTable[run][1],
  333. mbAddrIncrTable[run][0]);
  334. }
  335. #endif //CONFIG_ENCODERS
  336. static void common_init(MpegEncContext *s)
  337. {
  338. s->y_dc_scale_table=
  339. s->c_dc_scale_table= mpeg2_dc_scale_table[s->intra_dc_precision];
  340. }
  341. void ff_mpeg1_clean_buffers(MpegEncContext *s){
  342. s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
  343. s->last_dc[1] = s->last_dc[0];
  344. s->last_dc[2] = s->last_dc[0];
  345. memset(s->last_mv, 0, sizeof(s->last_mv));
  346. }
  347. #ifdef CONFIG_ENCODERS
  348. static av_always_inline void put_qscale(MpegEncContext *s)
  349. {
  350. if(s->q_scale_type){
  351. assert(s->qscale>=1 && s->qscale <=12);
  352. put_bits(&s->pb, 5, inv_non_linear_qscale[s->qscale]);
  353. }else{
  354. put_bits(&s->pb, 5, s->qscale);
  355. }
  356. }
  357. void ff_mpeg1_encode_slice_header(MpegEncContext *s){
  358. put_header(s, SLICE_MIN_START_CODE + s->mb_y);
  359. put_qscale(s);
  360. put_bits(&s->pb, 1, 0); /* slice extra information */
  361. }
  362. void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
  363. {
  364. mpeg1_encode_sequence_header(s);
  365. /* mpeg1 picture header */
  366. put_header(s, PICTURE_START_CODE);
  367. /* temporal reference */
  368. // RAL: s->picture_number instead of s->fake_picture_number
  369. put_bits(&s->pb, 10, (s->picture_number -
  370. s->gop_picture_number) & 0x3ff);
  371. put_bits(&s->pb, 3, s->pict_type);
  372. s->vbv_delay_ptr= s->pb.buf + put_bits_count(&s->pb)/8;
  373. put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
  374. // RAL: Forward f_code also needed for B frames
  375. if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
  376. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  377. if(s->codec_id == CODEC_ID_MPEG1VIDEO)
  378. put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
  379. else
  380. put_bits(&s->pb, 3, 7); /* forward_f_code */
  381. }
  382. // RAL: Backward f_code necessary for B frames
  383. if (s->pict_type == B_TYPE) {
  384. put_bits(&s->pb, 1, 0); /* half pel coordinates */
  385. if(s->codec_id == CODEC_ID_MPEG1VIDEO)
  386. put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
  387. else
  388. put_bits(&s->pb, 3, 7); /* backward_f_code */
  389. }
  390. put_bits(&s->pb, 1, 0); /* extra bit picture */
  391. s->frame_pred_frame_dct = 1;
  392. if(s->codec_id == CODEC_ID_MPEG2VIDEO){
  393. put_header(s, EXT_START_CODE);
  394. put_bits(&s->pb, 4, 8); //pic ext
  395. if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
  396. put_bits(&s->pb, 4, s->f_code);
  397. put_bits(&s->pb, 4, s->f_code);
  398. }else{
  399. put_bits(&s->pb, 8, 255);
  400. }
  401. if (s->pict_type == B_TYPE) {
  402. put_bits(&s->pb, 4, s->b_code);
  403. put_bits(&s->pb, 4, s->b_code);
  404. }else{
  405. put_bits(&s->pb, 8, 255);
  406. }
  407. put_bits(&s->pb, 2, s->intra_dc_precision);
  408. assert(s->picture_structure == PICT_FRAME);
  409. put_bits(&s->pb, 2, s->picture_structure);
  410. if (s->progressive_sequence) {
  411. put_bits(&s->pb, 1, 0); /* no repeat */
  412. } else {
  413. put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
  414. }
  415. /* XXX: optimize the generation of this flag with entropy
  416. measures */
  417. s->frame_pred_frame_dct = s->progressive_sequence;
  418. put_bits(&s->pb, 1, s->frame_pred_frame_dct);
  419. put_bits(&s->pb, 1, s->concealment_motion_vectors);
  420. put_bits(&s->pb, 1, s->q_scale_type);
  421. put_bits(&s->pb, 1, s->intra_vlc_format);
  422. put_bits(&s->pb, 1, s->alternate_scan);
  423. put_bits(&s->pb, 1, s->repeat_first_field);
  424. s->progressive_frame = s->progressive_sequence;
  425. put_bits(&s->pb, 1, s->chroma_format == CHROMA_420 ? s->progressive_frame : 0); /* chroma_420_type */
  426. put_bits(&s->pb, 1, s->progressive_frame);
  427. put_bits(&s->pb, 1, 0); //composite_display_flag
  428. }
  429. if(s->flags & CODEC_FLAG_SVCD_SCAN_OFFSET){
  430. int i;
  431. put_header(s, USER_START_CODE);
  432. for(i=0; i<sizeof(svcd_scan_offset_placeholder); i++){
  433. put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
  434. }
  435. }
  436. s->mb_y=0;
  437. ff_mpeg1_encode_slice_header(s);
  438. }
  439. static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
  440. int has_mv, int field_motion)
  441. {
  442. put_bits(&s->pb, n, bits);
  443. if (!s->frame_pred_frame_dct) {
  444. if (has_mv)
  445. put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */
  446. put_bits(&s->pb, 1, s->interlaced_dct);
  447. }
  448. }
  449. static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
  450. DCTELEM block[6][64],
  451. int motion_x, int motion_y,
  452. int mb_block_count)
  453. {
  454. int i, cbp;
  455. const int mb_x = s->mb_x;
  456. const int mb_y = s->mb_y;
  457. const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y;
  458. /* compute cbp */
  459. cbp = 0;
  460. for(i=0;i<mb_block_count;i++) {
  461. if (s->block_last_index[i] >= 0)
  462. cbp |= 1 << (mb_block_count - 1 - i);
  463. }
  464. if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
  465. (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
  466. ((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) ||
  467. (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
  468. ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
  469. s->mb_skip_run++;
  470. s->qscale -= s->dquant;
  471. s->skip_count++;
  472. s->misc_bits++;
  473. s->last_bits++;
  474. if(s->pict_type == P_TYPE){
  475. s->last_mv[0][1][0]= s->last_mv[0][0][0]=
  476. s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
  477. }
  478. } else {
  479. if(first_mb){
  480. assert(s->mb_skip_run == 0);
  481. encode_mb_skip_run(s, s->mb_x);
  482. }else{
  483. encode_mb_skip_run(s, s->mb_skip_run);
  484. }
  485. if (s->pict_type == I_TYPE) {
  486. if(s->dquant && cbp){
  487. put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
  488. put_qscale(s);
  489. }else{
  490. put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */
  491. s->qscale -= s->dquant;
  492. }
  493. s->misc_bits+= get_bits_diff(s);
  494. s->i_count++;
  495. } else if (s->mb_intra) {
  496. if(s->dquant && cbp){
  497. put_mb_modes(s, 6, 0x01, 0, 0);
  498. put_qscale(s);
  499. }else{
  500. put_mb_modes(s, 5, 0x03, 0, 0);
  501. s->qscale -= s->dquant;
  502. }
  503. s->misc_bits+= get_bits_diff(s);
  504. s->i_count++;
  505. memset(s->last_mv, 0, sizeof(s->last_mv));
  506. } else if (s->pict_type == P_TYPE) {
  507. if(s->mv_type == MV_TYPE_16X16){
  508. if (cbp != 0) {
  509. if ((motion_x|motion_y) == 0) {
  510. if(s->dquant){
  511. put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */
  512. put_qscale(s);
  513. }else{
  514. put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */
  515. }
  516. s->misc_bits+= get_bits_diff(s);
  517. } else {
  518. if(s->dquant){
  519. put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
  520. put_qscale(s);
  521. }else{
  522. put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
  523. }
  524. s->misc_bits+= get_bits_diff(s);
  525. mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
  526. mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
  527. s->mv_bits+= get_bits_diff(s);
  528. }
  529. } else {
  530. put_bits(&s->pb, 3, 1); /* motion only */
  531. if (!s->frame_pred_frame_dct)
  532. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  533. s->misc_bits+= get_bits_diff(s);
  534. mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
  535. mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
  536. s->qscale -= s->dquant;
  537. s->mv_bits+= get_bits_diff(s);
  538. }
  539. s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x;
  540. s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y;
  541. }else{
  542. assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
  543. if (cbp) {
  544. if(s->dquant){
  545. put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
  546. put_qscale(s);
  547. }else{
  548. put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
  549. }
  550. } else {
  551. put_bits(&s->pb, 3, 1); /* motion only */
  552. put_bits(&s->pb, 2, 1); /* motion_type: field */
  553. s->qscale -= s->dquant;
  554. }
  555. s->misc_bits+= get_bits_diff(s);
  556. for(i=0; i<2; i++){
  557. put_bits(&s->pb, 1, s->field_select[0][i]);
  558. mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
  559. mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
  560. s->last_mv[0][i][0]= s->mv[0][i][0];
  561. s->last_mv[0][i][1]= 2*s->mv[0][i][1];
  562. }
  563. s->mv_bits+= get_bits_diff(s);
  564. }
  565. if(cbp) {
  566. if (s->chroma_y_shift) {
  567. put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]);
  568. } else {
  569. put_bits(&s->pb, mbPatTable[cbp>>2][1], mbPatTable[cbp>>2][0]);
  570. put_bits(&s->pb, 2, cbp & 3);
  571. }
  572. }
  573. s->f_count++;
  574. } else{
  575. static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi
  576. if(s->mv_type == MV_TYPE_16X16){
  577. if (cbp){ // With coded bloc pattern
  578. if (s->dquant) {
  579. if(s->mv_dir == MV_DIR_FORWARD)
  580. put_mb_modes(s, 6, 3, 1, 0);
  581. else
  582. put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0);
  583. put_qscale(s);
  584. } else {
  585. put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0);
  586. }
  587. }else{ // No coded bloc pattern
  588. put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
  589. if (!s->frame_pred_frame_dct)
  590. put_bits(&s->pb, 2, 2); /* motion_type: frame */
  591. s->qscale -= s->dquant;
  592. }
  593. s->misc_bits += get_bits_diff(s);
  594. if (s->mv_dir&MV_DIR_FORWARD){
  595. mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
  596. mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
  597. s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0];
  598. s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1];
  599. s->f_count++;
  600. }
  601. if (s->mv_dir&MV_DIR_BACKWARD){
  602. mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
  603. mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
  604. s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0];
  605. s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1];
  606. s->b_count++;
  607. }
  608. }else{
  609. assert(s->mv_type == MV_TYPE_FIELD);
  610. assert(!s->frame_pred_frame_dct);
  611. if (cbp){ // With coded bloc pattern
  612. if (s->dquant) {
  613. if(s->mv_dir == MV_DIR_FORWARD)
  614. put_mb_modes(s, 6, 3, 1, 1);
  615. else
  616. put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1);
  617. put_qscale(s);
  618. } else {
  619. put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1);
  620. }
  621. }else{ // No coded bloc pattern
  622. put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
  623. put_bits(&s->pb, 2, 1); /* motion_type: field */
  624. s->qscale -= s->dquant;
  625. }
  626. s->misc_bits += get_bits_diff(s);
  627. if (s->mv_dir&MV_DIR_FORWARD){
  628. for(i=0; i<2; i++){
  629. put_bits(&s->pb, 1, s->field_select[0][i]);
  630. mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
  631. mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
  632. s->last_mv[0][i][0]= s->mv[0][i][0];
  633. s->last_mv[0][i][1]= 2*s->mv[0][i][1];
  634. }
  635. s->f_count++;
  636. }
  637. if (s->mv_dir&MV_DIR_BACKWARD){
  638. for(i=0; i<2; i++){
  639. put_bits(&s->pb, 1, s->field_select[1][i]);
  640. mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code);
  641. mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code);
  642. s->last_mv[1][i][0]= s->mv[1][i][0];
  643. s->last_mv[1][i][1]= 2*s->mv[1][i][1];
  644. }
  645. s->b_count++;
  646. }
  647. }
  648. s->mv_bits += get_bits_diff(s);
  649. if(cbp) {
  650. if (s->chroma_y_shift) {
  651. put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]);
  652. } else {
  653. put_bits(&s->pb, mbPatTable[cbp>>2][1], mbPatTable[cbp>>2][0]);
  654. put_bits(&s->pb, 2, cbp & 3);
  655. }
  656. }
  657. }
  658. for(i=0;i<mb_block_count;i++) {
  659. if (cbp & (1 << (mb_block_count - 1 - i))) {
  660. mpeg1_encode_block(s, block[i], i);
  661. }
  662. }
  663. s->mb_skip_run = 0;
  664. if(s->mb_intra)
  665. s->i_tex_bits+= get_bits_diff(s);
  666. else
  667. s->p_tex_bits+= get_bits_diff(s);
  668. }
  669. }
  670. void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y)
  671. {
  672. if (s->chroma_format == CHROMA_420) mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6);
  673. else mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8);
  674. }
  675. // RAL: Parameter added: f_or_b_code
  676. static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
  677. {
  678. int code, bit_size, l, bits, range, sign;
  679. if (val == 0) {
  680. /* zero vector */
  681. code = 0;
  682. put_bits(&s->pb,
  683. mbMotionVectorTable[0][1],
  684. mbMotionVectorTable[0][0]);
  685. } else {
  686. bit_size = f_or_b_code - 1;
  687. range = 1 << bit_size;
  688. /* modulo encoding */
  689. l= INT_BIT - 5 - bit_size;
  690. val= (val<<l)>>l;
  691. if (val >= 0) {
  692. val--;
  693. code = (val >> bit_size) + 1;
  694. bits = val & (range - 1);
  695. sign = 0;
  696. } else {
  697. val = -val;
  698. val--;
  699. code = (val >> bit_size) + 1;
  700. bits = val & (range - 1);
  701. sign = 1;
  702. }
  703. assert(code > 0 && code <= 16);
  704. put_bits(&s->pb,
  705. mbMotionVectorTable[code][1],
  706. mbMotionVectorTable[code][0]);
  707. put_bits(&s->pb, 1, sign);
  708. if (bit_size > 0) {
  709. put_bits(&s->pb, bit_size, bits);
  710. }
  711. }
  712. }
  713. void ff_mpeg1_encode_init(MpegEncContext *s)
  714. {
  715. static int done=0;
  716. common_init(s);
  717. if(!done){
  718. int f_code;
  719. int mv;
  720. int i;
  721. done=1;
  722. init_rl(&rl_mpeg1, static_rl_table_store[0]);
  723. init_rl(&rl_mpeg2, static_rl_table_store[1]);
  724. for(i=0; i<64; i++)
  725. {
  726. mpeg1_max_level[0][i]= rl_mpeg1.max_level[0][i];
  727. mpeg1_index_run[0][i]= rl_mpeg1.index_run[0][i];
  728. }
  729. init_uni_ac_vlc(&rl_mpeg1, uni_mpeg1_ac_vlc_len);
  730. if(s->intra_vlc_format)
  731. init_uni_ac_vlc(&rl_mpeg2, uni_mpeg2_ac_vlc_len);
  732. /* build unified dc encoding tables */
  733. for(i=-255; i<256; i++)
  734. {
  735. int adiff, index;
  736. int bits, code;
  737. int diff=i;
  738. adiff = FFABS(diff);
  739. if(diff<0) diff--;
  740. index = av_log2(2*adiff);
  741. bits= vlc_dc_lum_bits[index] + index;
  742. code= (vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1));
  743. mpeg1_lum_dc_uni[i+255]= bits + (code<<8);
  744. bits= vlc_dc_chroma_bits[index] + index;
  745. code= (vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1));
  746. mpeg1_chr_dc_uni[i+255]= bits + (code<<8);
  747. }
  748. for(f_code=1; f_code<=MAX_FCODE; f_code++){
  749. for(mv=-MAX_MV; mv<=MAX_MV; mv++){
  750. int len;
  751. if(mv==0) len= mbMotionVectorTable[0][1];
  752. else{
  753. int val, bit_size, range, code;
  754. bit_size = f_code - 1;
  755. range = 1 << bit_size;
  756. val=mv;
  757. if (val < 0)
  758. val = -val;
  759. val--;
  760. code = (val >> bit_size) + 1;
  761. if(code<17){
  762. len= mbMotionVectorTable[code][1] + 1 + bit_size;
  763. }else{
  764. len= mbMotionVectorTable[16][1] + 2 + bit_size;
  765. }
  766. }
  767. mv_penalty[f_code][mv+MAX_MV]= len;
  768. }
  769. }
  770. for(f_code=MAX_FCODE; f_code>0; f_code--){
  771. for(mv=-(8<<f_code); mv<(8<<f_code); mv++){
  772. fcode_tab[mv+MAX_MV]= f_code;
  773. }
  774. }
  775. }
  776. s->me.mv_penalty= mv_penalty;
  777. s->fcode_tab= fcode_tab;
  778. if(s->codec_id == CODEC_ID_MPEG1VIDEO){
  779. s->min_qcoeff=-255;
  780. s->max_qcoeff= 255;
  781. }else{
  782. s->min_qcoeff=-2047;
  783. s->max_qcoeff= 2047;
  784. }
  785. if (s->intra_vlc_format) {
  786. s->intra_ac_vlc_length=
  787. s->intra_ac_vlc_last_length= uni_mpeg2_ac_vlc_len;
  788. } else {
  789. s->intra_ac_vlc_length=
  790. s->intra_ac_vlc_last_length= uni_mpeg1_ac_vlc_len;
  791. }
  792. s->inter_ac_vlc_length=
  793. s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len;
  794. }
  795. static inline void encode_dc(MpegEncContext *s, int diff, int component)
  796. {
  797. if(((unsigned) (diff+255)) >= 511){
  798. int index;
  799. if(diff<0){
  800. index= av_log2_16bit(-2*diff);
  801. diff--;
  802. }else{
  803. index= av_log2_16bit(2*diff);
  804. }
  805. if (component == 0) {
  806. put_bits(
  807. &s->pb,
  808. vlc_dc_lum_bits[index] + index,
  809. (vlc_dc_lum_code[index]<<index) + (diff & ((1 << index) - 1)));
  810. }else{
  811. put_bits(
  812. &s->pb,
  813. vlc_dc_chroma_bits[index] + index,
  814. (vlc_dc_chroma_code[index]<<index) + (diff & ((1 << index) - 1)));
  815. }
  816. }else{
  817. if (component == 0) {
  818. put_bits(
  819. &s->pb,
  820. mpeg1_lum_dc_uni[diff+255]&0xFF,
  821. mpeg1_lum_dc_uni[diff+255]>>8);
  822. } else {
  823. put_bits(
  824. &s->pb,
  825. mpeg1_chr_dc_uni[diff+255]&0xFF,
  826. mpeg1_chr_dc_uni[diff+255]>>8);
  827. }
  828. }
  829. }
  830. static void mpeg1_encode_block(MpegEncContext *s,
  831. DCTELEM *block,
  832. int n)
  833. {
  834. int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
  835. int code, component;
  836. const uint16_t (*table_vlc)[2] = rl_mpeg1.table_vlc;
  837. last_index = s->block_last_index[n];
  838. /* DC coef */
  839. if (s->mb_intra) {
  840. component = (n <= 3 ? 0 : (n&1) + 1);
  841. dc = block[0]; /* overflow is impossible */
  842. diff = dc - s->last_dc[component];
  843. encode_dc(s, diff, component);
  844. s->last_dc[component] = dc;
  845. i = 1;
  846. if (s->intra_vlc_format)
  847. table_vlc = rl_mpeg2.table_vlc;
  848. } else {
  849. /* encode the first coefficient : needs to be done here because
  850. it is handled slightly differently */
  851. level = block[0];
  852. if (abs(level) == 1) {
  853. code = ((uint32_t)level >> 31); /* the sign bit */
  854. put_bits(&s->pb, 2, code | 0x02);
  855. i = 1;
  856. } else {
  857. i = 0;
  858. last_non_zero = -1;
  859. goto next_coef;
  860. }
  861. }
  862. /* now quantify & encode AC coefs */
  863. last_non_zero = i - 1;
  864. for(;i<=last_index;i++) {
  865. j = s->intra_scantable.permutated[i];
  866. level = block[j];
  867. next_coef:
  868. #if 0
  869. if (level != 0)
  870. dprintf(s->avctx, "level[%d]=%d\n", i, level);
  871. #endif
  872. /* encode using VLC */
  873. if (level != 0) {
  874. run = i - last_non_zero - 1;
  875. alevel= level;
  876. MASK_ABS(sign, alevel)
  877. sign&=1;
  878. if (alevel <= mpeg1_max_level[0][run]){
  879. code= mpeg1_index_run[0][run] + alevel - 1;
  880. /* store the vlc & sign at once */
  881. put_bits(&s->pb, table_vlc[code][1]+1, (table_vlc[code][0]<<1) + sign);
  882. } else {
  883. /* escape seems to be pretty rare <5% so I do not optimize it */
  884. put_bits(&s->pb, table_vlc[111][1], table_vlc[111][0]);
  885. /* escape: only clip in this case */
  886. put_bits(&s->pb, 6, run);
  887. if(s->codec_id == CODEC_ID_MPEG1VIDEO){
  888. if (alevel < 128) {
  889. put_bits(&s->pb, 8, level & 0xff);
  890. } else {
  891. if (level < 0) {
  892. put_bits(&s->pb, 16, 0x8001 + level + 255);
  893. } else {
  894. put_bits(&s->pb, 16, level & 0xffff);
  895. }
  896. }
  897. }else{
  898. put_bits(&s->pb, 12, level & 0xfff);
  899. }
  900. }
  901. last_non_zero = i;
  902. }
  903. }
  904. /* end of block */
  905. put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
  906. }
  907. #endif //CONFIG_ENCODERS
  908. /******************************************/
  909. /* decoding */
  910. static VLC dc_lum_vlc;
  911. static VLC dc_chroma_vlc;
  912. static VLC mv_vlc;
  913. static VLC mbincr_vlc;
  914. static VLC mb_ptype_vlc;
  915. static VLC mb_btype_vlc;
  916. static VLC mb_pat_vlc;
  917. static void init_vlcs(void)
  918. {
  919. static int done = 0;
  920. if (!done) {
  921. done = 1;
  922. init_vlc(&dc_lum_vlc, DC_VLC_BITS, 12,
  923. vlc_dc_lum_bits, 1, 1,
  924. vlc_dc_lum_code, 2, 2, 1);
  925. init_vlc(&dc_chroma_vlc, DC_VLC_BITS, 12,
  926. vlc_dc_chroma_bits, 1, 1,
  927. vlc_dc_chroma_code, 2, 2, 1);
  928. init_vlc(&mv_vlc, MV_VLC_BITS, 17,
  929. &mbMotionVectorTable[0][1], 2, 1,
  930. &mbMotionVectorTable[0][0], 2, 1, 1);
  931. init_vlc(&mbincr_vlc, MBINCR_VLC_BITS, 36,
  932. &mbAddrIncrTable[0][1], 2, 1,
  933. &mbAddrIncrTable[0][0], 2, 1, 1);
  934. init_vlc(&mb_pat_vlc, MB_PAT_VLC_BITS, 64,
  935. &mbPatTable[0][1], 2, 1,
  936. &mbPatTable[0][0], 2, 1, 1);
  937. init_vlc(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7,
  938. &table_mb_ptype[0][1], 2, 1,
  939. &table_mb_ptype[0][0], 2, 1, 1);
  940. init_vlc(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
  941. &table_mb_btype[0][1], 2, 1,
  942. &table_mb_btype[0][0], 2, 1, 1);
  943. init_rl(&rl_mpeg1, static_rl_table_store[0]);
  944. init_rl(&rl_mpeg2, static_rl_table_store[1]);
  945. init_2d_vlc_rl(&rl_mpeg1, 1);
  946. init_2d_vlc_rl(&rl_mpeg2, 1);
  947. }
  948. }
  949. static inline int get_dmv(MpegEncContext *s)
  950. {
  951. if(get_bits1(&s->gb))
  952. return 1 - (get_bits1(&s->gb) << 1);
  953. else
  954. return 0;
  955. }
  956. static inline int get_qscale(MpegEncContext *s)
  957. {
  958. int qscale = get_bits(&s->gb, 5);
  959. if (s->q_scale_type) {
  960. return non_linear_qscale[qscale];
  961. } else {
  962. return qscale << 1;
  963. }
  964. }
  965. /* motion type (for mpeg2) */
  966. #define MT_FIELD 1
  967. #define MT_FRAME 2
  968. #define MT_16X8 2
  969. #define MT_DMV 3
  970. static int mpeg_decode_mb(MpegEncContext *s,
  971. DCTELEM block[12][64])
  972. {
  973. int i, j, k, cbp, val, mb_type, motion_type;
  974. const int mb_block_count = 4 + (1<< s->chroma_format);
  975. dprintf(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
  976. assert(s->mb_skipped==0);
  977. if (s->mb_skip_run-- != 0) {
  978. if(s->pict_type == I_TYPE){
  979. av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
  980. return -1;
  981. }
  982. /* skip mb */
  983. s->mb_intra = 0;
  984. for(i=0;i<12;i++)
  985. s->block_last_index[i] = -1;
  986. if(s->picture_structure == PICT_FRAME)
  987. s->mv_type = MV_TYPE_16X16;
  988. else
  989. s->mv_type = MV_TYPE_FIELD;
  990. if (s->pict_type == P_TYPE) {
  991. /* if P type, zero motion vector is implied */
  992. s->mv_dir = MV_DIR_FORWARD;
  993. s->mv[0][0][0] = s->mv[0][0][1] = 0;
  994. s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
  995. s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
  996. s->field_select[0][0]= s->picture_structure - 1;
  997. s->mb_skipped = 1;
  998. s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
  999. } else {
  1000. int mb_type;
  1001. if(s->mb_x)
  1002. mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
  1003. else
  1004. mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in mpeg at all,
  1005. if(IS_INTRA(mb_type))
  1006. return -1;
  1007. /* if B type, reuse previous vectors and directions */
  1008. s->mv[0][0][0] = s->last_mv[0][0][0];
  1009. s->mv[0][0][1] = s->last_mv[0][0][1];
  1010. s->mv[1][0][0] = s->last_mv[1][0][0];
  1011. s->mv[1][0][1] = s->last_mv[1][0][1];
  1012. s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
  1013. mb_type | MB_TYPE_SKIP;
  1014. // assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
  1015. if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
  1016. s->mb_skipped = 1;
  1017. }
  1018. return 0;
  1019. }
  1020. switch(s->pict_type) {
  1021. default:
  1022. case I_TYPE:
  1023. if (get_bits1(&s->gb) == 0) {
  1024. if (get_bits1(&s->gb) == 0){
  1025. av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
  1026. return -1;
  1027. }
  1028. mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
  1029. } else {
  1030. mb_type = MB_TYPE_INTRA;
  1031. }
  1032. break;
  1033. case P_TYPE:
  1034. mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
  1035. if (mb_type < 0){
  1036. av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
  1037. return -1;
  1038. }
  1039. mb_type = ptype2mb_type[ mb_type ];
  1040. break;
  1041. case B_TYPE:
  1042. mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
  1043. if (mb_type < 0){
  1044. av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
  1045. return -1;
  1046. }
  1047. mb_type = btype2mb_type[ mb_type ];
  1048. break;
  1049. }
  1050. dprintf(s->avctx, "mb_type=%x\n", mb_type);
  1051. // motion_type = 0; /* avoid warning */
  1052. if (IS_INTRA(mb_type)) {
  1053. s->dsp.clear_blocks(s->block[0]);
  1054. if(!s->chroma_y_shift){
  1055. s->dsp.clear_blocks(s->block[6]);
  1056. }
  1057. /* compute dct type */
  1058. if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
  1059. !s->frame_pred_frame_dct) {
  1060. s->interlaced_dct = get_bits1(&s->gb);
  1061. }
  1062. if (IS_QUANT(mb_type))
  1063. s->qscale = get_qscale(s);
  1064. if (s->concealment_motion_vectors) {
  1065. /* just parse them */
  1066. if (s->picture_structure != PICT_FRAME)
  1067. skip_bits1(&s->gb); /* field select */
  1068. s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] =
  1069. mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]);
  1070. s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] =
  1071. mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]);
  1072. skip_bits1(&s->gb); /* marker */
  1073. }else
  1074. memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */
  1075. s->mb_intra = 1;
  1076. #ifdef HAVE_XVMC
  1077. //one 1 we memcpy blocks in xvmcvideo
  1078. if(s->avctx->xvmc_acceleration > 1){
  1079. XVMC_pack_pblocks(s,-1);//inter are always full blocks
  1080. if(s->swap_uv){
  1081. exchange_uv(s);
  1082. }
  1083. }
  1084. #endif
  1085. if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
  1086. if(s->flags2 & CODEC_FLAG2_FAST){
  1087. for(i=0;i<6;i++) {
  1088. mpeg2_fast_decode_block_intra(s, s->pblocks[i], i);
  1089. }
  1090. }else{
  1091. for(i=0;i<mb_block_count;i++) {
  1092. if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0)
  1093. return -1;
  1094. }
  1095. }
  1096. } else {
  1097. for(i=0;i<6;i++) {
  1098. if (mpeg1_decode_block_intra(s, s->pblocks[i], i) < 0)
  1099. return -1;
  1100. }
  1101. }
  1102. } else {
  1103. if (mb_type & MB_TYPE_ZERO_MV){
  1104. assert(mb_type & MB_TYPE_CBP);
  1105. /* compute dct type */
  1106. if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
  1107. !s->frame_pred_frame_dct) {
  1108. s->interlaced_dct = get_bits1(&s->gb);
  1109. }
  1110. if (IS_QUANT(mb_type))
  1111. s->qscale = get_qscale(s);
  1112. s->mv_dir = MV_DIR_FORWARD;
  1113. if(s->picture_structure == PICT_FRAME)
  1114. s->mv_type = MV_TYPE_16X16;
  1115. else{
  1116. s->mv_type = MV_TYPE_FIELD;
  1117. mb_type |= MB_TYPE_INTERLACED;
  1118. s->field_select[0][0]= s->picture_structure - 1;
  1119. }
  1120. s->last_mv[0][0][0] = 0;
  1121. s->last_mv[0][0][1] = 0;
  1122. s->last_mv[0][1][0] = 0;
  1123. s->last_mv[0][1][1] = 0;
  1124. s->mv[0][0][0] = 0;
  1125. s->mv[0][0][1] = 0;
  1126. }else{
  1127. assert(mb_type & MB_TYPE_L0L1);
  1128. //FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
  1129. /* get additional motion vector type */
  1130. if (s->frame_pred_frame_dct)
  1131. motion_type = MT_FRAME;
  1132. else{
  1133. motion_type = get_bits(&s->gb, 2);
  1134. }
  1135. /* compute dct type */
  1136. if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
  1137. !s->frame_pred_frame_dct && HAS_CBP(mb_type)) {
  1138. s->interlaced_dct = get_bits1(&s->gb);
  1139. }
  1140. if (IS_QUANT(mb_type))
  1141. s->qscale = get_qscale(s);
  1142. /* motion vectors */
  1143. s->mv_dir = 0;
  1144. for(i=0;i<2;i++) {
  1145. if (USES_LIST(mb_type, i)) {
  1146. s->mv_dir |= (MV_DIR_FORWARD >> i);
  1147. dprintf(s->avctx, "motion_type=%d\n", motion_type);
  1148. switch(motion_type) {
  1149. case MT_FRAME: /* or MT_16X8 */
  1150. if (s->picture_structure == PICT_FRAME) {
  1151. /* MT_FRAME */
  1152. mb_type |= MB_TYPE_16x16;
  1153. s->mv_type = MV_TYPE_16X16;
  1154. s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] =
  1155. mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]);
  1156. s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] =
  1157. mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]);
  1158. /* full_pel: only for mpeg1 */
  1159. if (s->full_pel[i]){
  1160. s->mv[i][0][0] <<= 1;
  1161. s->mv[i][0][1] <<= 1;
  1162. }
  1163. } else {
  1164. /* MT_16X8 */
  1165. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1166. s->mv_type = MV_TYPE_16X8;
  1167. for(j=0;j<2;j++) {
  1168. s->field_select[i][j] = get_bits1(&s->gb);
  1169. for(k=0;k<2;k++) {
  1170. val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
  1171. s->last_mv[i][j][k]);
  1172. s->last_mv[i][j][k] = val;
  1173. s->mv[i][j][k] = val;
  1174. }
  1175. }
  1176. }
  1177. break;
  1178. case MT_FIELD:
  1179. s->mv_type = MV_TYPE_FIELD;
  1180. if (s->picture_structure == PICT_FRAME) {
  1181. mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
  1182. for(j=0;j<2;j++) {
  1183. s->field_select[i][j] = get_bits1(&s->gb);
  1184. val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
  1185. s->last_mv[i][j][0]);
  1186. s->last_mv[i][j][0] = val;
  1187. s->mv[i][j][0] = val;
  1188. dprintf(s->avctx, "fmx=%d\n", val);
  1189. val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
  1190. s->last_mv[i][j][1] >> 1);
  1191. s->last_mv[i][j][1] = val << 1;
  1192. s->mv[i][j][1] = val;
  1193. dprintf(s->avctx, "fmy=%d\n", val);
  1194. }
  1195. } else {
  1196. mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
  1197. s->field_select[i][0] = get_bits1(&s->gb);
  1198. for(k=0;k<2;k++) {
  1199. val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
  1200. s->last_mv[i][0][k]);
  1201. s->last_mv[i][0][k] = val;
  1202. s->last_mv[i][1][k] = val;
  1203. s->mv[i][0][k] = val;
  1204. }
  1205. }
  1206. break;
  1207. case MT_DMV:
  1208. {
  1209. int dmx, dmy, mx, my, m;
  1210. mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
  1211. s->last_mv[i][0][0]);
  1212. s->last_mv[i][0][0] = mx;
  1213. s->last_mv[i][1][0] = mx;
  1214. dmx = get_dmv(s);
  1215. my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
  1216. s->last_mv[i][0][1] >> 1);
  1217. dmy = get_dmv(s);
  1218. s->mv_type = MV_TYPE_DMV;
  1219. s->last_mv[i][0][1] = my<<1;
  1220. s->last_mv[i][1][1] = my<<1;
  1221. s->mv[i][0][0] = mx;
  1222. s->mv[i][0][1] = my;
  1223. s->mv[i][1][0] = mx;//not used
  1224. s->mv[i][1][1] = my;//not used
  1225. if (s->picture_structure == PICT_FRAME) {
  1226. mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
  1227. //m = 1 + 2 * s->top_field_first;
  1228. m = s->top_field_first ? 1 : 3;
  1229. /* top -> top pred */
  1230. s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
  1231. s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
  1232. m = 4 - m;
  1233. s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
  1234. s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
  1235. } else {
  1236. mb_type |= MB_TYPE_16x16;
  1237. s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
  1238. s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
  1239. if(s->picture_structure == PICT_TOP_FIELD)
  1240. s->mv[i][2][1]--;
  1241. else
  1242. s->mv[i][2][1]++;
  1243. }
  1244. }
  1245. break;
  1246. default:
  1247. av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
  1248. return -1;
  1249. }
  1250. }
  1251. }
  1252. }
  1253. s->mb_intra = 0;
  1254. if (HAS_CBP(mb_type)) {
  1255. s->dsp.clear_blocks(s->block[0]);
  1256. if(!s->chroma_y_shift){
  1257. s->dsp.clear_blocks(s->block[6]);
  1258. }
  1259. cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
  1260. if (cbp < 0 || ((cbp == 0) && (s->chroma_format < 2)) ){
  1261. av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
  1262. return -1;
  1263. }
  1264. if(mb_block_count > 6){
  1265. cbp<<= mb_block_count-6;
  1266. cbp |= get_bits(&s->gb, mb_block_count-6);
  1267. }
  1268. #ifdef HAVE_XVMC
  1269. //on 1 we memcpy blocks in xvmcvideo
  1270. if(s->avctx->xvmc_acceleration > 1){
  1271. XVMC_pack_pblocks(s,cbp);
  1272. if(s->swap_uv){
  1273. exchange_uv(s);
  1274. }
  1275. }
  1276. #endif
  1277. if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
  1278. if(s->flags2 & CODEC_FLAG2_FAST){
  1279. for(i=0;i<6;i++) {
  1280. if(cbp & 32) {
  1281. mpeg2_fast_decode_block_non_intra(s, s->pblocks[i], i);
  1282. } else {
  1283. s->block_last_index[i] = -1;
  1284. }
  1285. cbp+=cbp;
  1286. }
  1287. }else{
  1288. cbp<<= 12-mb_block_count;
  1289. for(i=0;i<mb_block_count;i++) {
  1290. if ( cbp & (1<<11) ) {
  1291. if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0)
  1292. return -1;
  1293. } else {
  1294. s->block_last_index[i] = -1;
  1295. }
  1296. cbp+=cbp;
  1297. }
  1298. }
  1299. } else {
  1300. if(s->flags2 & CODEC_FLAG2_FAST){
  1301. for(i=0;i<6;i++) {
  1302. if (cbp & 32) {
  1303. mpeg1_fast_decode_block_inter(s, s->pblocks[i], i);
  1304. } else {
  1305. s->block_last_index[i] = -1;
  1306. }
  1307. cbp+=cbp;
  1308. }
  1309. }else{
  1310. for(i=0;i<6;i++) {
  1311. if (cbp & 32) {
  1312. if (mpeg1_decode_block_inter(s, s->pblocks[i], i) < 0)
  1313. return -1;
  1314. } else {
  1315. s->block_last_index[i] = -1;
  1316. }
  1317. cbp+=cbp;
  1318. }
  1319. }
  1320. }
  1321. }else{
  1322. for(i=0;i<12;i++)
  1323. s->block_last_index[i] = -1;
  1324. }
  1325. }
  1326. s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
  1327. return 0;
  1328. }
  1329. /* as h263, but only 17 codes */
  1330. static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
  1331. {
  1332. int code, sign, val, l, shift;
  1333. code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
  1334. if (code == 0) {
  1335. return pred;
  1336. }
  1337. if (code < 0) {
  1338. return 0xffff;
  1339. }
  1340. sign = get_bits1(&s->gb);
  1341. shift = fcode - 1;
  1342. val = code;
  1343. if (shift) {
  1344. val = (val - 1) << shift;
  1345. val |= get_bits(&s->gb, shift);
  1346. val++;
  1347. }
  1348. if (sign)
  1349. val = -val;
  1350. val += pred;
  1351. /* modulo decoding */
  1352. l= INT_BIT - 5 - shift;
  1353. val = (val<<l)>>l;
  1354. return val;
  1355. }
  1356. static inline int decode_dc(GetBitContext *gb, int component)
  1357. {
  1358. int code, diff;
  1359. if (component == 0) {
  1360. code = get_vlc2(gb, dc_lum_vlc.table, DC_VLC_BITS, 2);
  1361. } else {
  1362. code = get_vlc2(gb, dc_chroma_vlc.table, DC_VLC_BITS, 2);
  1363. }
  1364. if (code < 0){
  1365. av_log(NULL, AV_LOG_ERROR, "invalid dc code at\n");
  1366. return 0xffff;
  1367. }
  1368. if (code == 0) {
  1369. diff = 0;
  1370. } else {
  1371. diff = get_xbits(gb, code);
  1372. }
  1373. return diff;
  1374. }
  1375. static inline int mpeg1_decode_block_intra(MpegEncContext *s,
  1376. DCTELEM *block,
  1377. int n)
  1378. {
  1379. int level, dc, diff, i, j, run;
  1380. int component;
  1381. RLTable *rl = &rl_mpeg1;
  1382. uint8_t * const scantable= s->intra_scantable.permutated;
  1383. const uint16_t *quant_matrix= s->intra_matrix;
  1384. const int qscale= s->qscale;
  1385. /* DC coef */
  1386. component = (n <= 3 ? 0 : n - 4 + 1);
  1387. diff = decode_dc(&s->gb, component);
  1388. if (diff >= 0xffff)
  1389. return -1;
  1390. dc = s->last_dc[component];
  1391. dc += diff;
  1392. s->last_dc[component] = dc;
  1393. block[0] = dc<<3;
  1394. dprintf(s->avctx, "dc=%d diff=%d\n", dc, diff);
  1395. i = 0;
  1396. {
  1397. OPEN_READER(re, &s->gb);
  1398. /* now quantify & encode AC coefs */
  1399. for(;;) {
  1400. UPDATE_CACHE(re, &s->gb);
  1401. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1402. if(level == 127){
  1403. break;
  1404. } else if(level != 0) {
  1405. i += run;
  1406. j = scantable[i];
  1407. level= (level*qscale*quant_matrix[j])>>4;
  1408. level= (level-1)|1;
  1409. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1410. LAST_SKIP_BITS(re, &s->gb, 1);
  1411. } else {
  1412. /* escape */
  1413. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1414. UPDATE_CACHE(re, &s->gb);
  1415. level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
  1416. if (level == -128) {
  1417. level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8);
  1418. } else if (level == 0) {
  1419. level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8);
  1420. }
  1421. i += run;
  1422. j = scantable[i];
  1423. if(level<0){
  1424. level= -level;
  1425. level= (level*qscale*quant_matrix[j])>>4;
  1426. level= (level-1)|1;
  1427. level= -level;
  1428. }else{
  1429. level= (level*qscale*quant_matrix[j])>>4;
  1430. level= (level-1)|1;
  1431. }
  1432. }
  1433. if (i > 63){
  1434. av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1435. return -1;
  1436. }
  1437. block[j] = level;
  1438. }
  1439. CLOSE_READER(re, &s->gb);
  1440. }
  1441. s->block_last_index[n] = i;
  1442. return 0;
  1443. }
  1444. static inline int mpeg1_decode_block_inter(MpegEncContext *s,
  1445. DCTELEM *block,
  1446. int n)
  1447. {
  1448. int level, i, j, run;
  1449. RLTable *rl = &rl_mpeg1;
  1450. uint8_t * const scantable= s->intra_scantable.permutated;
  1451. const uint16_t *quant_matrix= s->inter_matrix;
  1452. const int qscale= s->qscale;
  1453. {
  1454. OPEN_READER(re, &s->gb);
  1455. i = -1;
  1456. /* special case for the first coef. no need to add a second vlc table */
  1457. UPDATE_CACHE(re, &s->gb);
  1458. if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
  1459. level= (3*qscale*quant_matrix[0])>>5;
  1460. level= (level-1)|1;
  1461. if(GET_CACHE(re, &s->gb)&0x40000000)
  1462. level= -level;
  1463. block[0] = level;
  1464. i++;
  1465. SKIP_BITS(re, &s->gb, 2);
  1466. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1467. goto end;
  1468. }
  1469. /* now quantify & encode AC coefs */
  1470. for(;;) {
  1471. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1472. if(level != 0) {
  1473. i += run;
  1474. j = scantable[i];
  1475. level= ((level*2+1)*qscale*quant_matrix[j])>>5;
  1476. level= (level-1)|1;
  1477. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1478. SKIP_BITS(re, &s->gb, 1);
  1479. } else {
  1480. /* escape */
  1481. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1482. UPDATE_CACHE(re, &s->gb);
  1483. level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
  1484. if (level == -128) {
  1485. level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8);
  1486. } else if (level == 0) {
  1487. level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
  1488. }
  1489. i += run;
  1490. j = scantable[i];
  1491. if(level<0){
  1492. level= -level;
  1493. level= ((level*2+1)*qscale*quant_matrix[j])>>5;
  1494. level= (level-1)|1;
  1495. level= -level;
  1496. }else{
  1497. level= ((level*2+1)*qscale*quant_matrix[j])>>5;
  1498. level= (level-1)|1;
  1499. }
  1500. }
  1501. if (i > 63){
  1502. av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1503. return -1;
  1504. }
  1505. block[j] = level;
  1506. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1507. break;
  1508. UPDATE_CACHE(re, &s->gb);
  1509. }
  1510. end:
  1511. LAST_SKIP_BITS(re, &s->gb, 2);
  1512. CLOSE_READER(re, &s->gb);
  1513. }
  1514. s->block_last_index[n] = i;
  1515. return 0;
  1516. }
  1517. static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n)
  1518. {
  1519. int level, i, j, run;
  1520. RLTable *rl = &rl_mpeg1;
  1521. uint8_t * const scantable= s->intra_scantable.permutated;
  1522. const int qscale= s->qscale;
  1523. {
  1524. OPEN_READER(re, &s->gb);
  1525. i = -1;
  1526. /* special case for the first coef. no need to add a second vlc table */
  1527. UPDATE_CACHE(re, &s->gb);
  1528. if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
  1529. level= (3*qscale)>>1;
  1530. level= (level-1)|1;
  1531. if(GET_CACHE(re, &s->gb)&0x40000000)
  1532. level= -level;
  1533. block[0] = level;
  1534. i++;
  1535. SKIP_BITS(re, &s->gb, 2);
  1536. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1537. goto end;
  1538. }
  1539. /* now quantify & encode AC coefs */
  1540. for(;;) {
  1541. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1542. if(level != 0) {
  1543. i += run;
  1544. j = scantable[i];
  1545. level= ((level*2+1)*qscale)>>1;
  1546. level= (level-1)|1;
  1547. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1548. SKIP_BITS(re, &s->gb, 1);
  1549. } else {
  1550. /* escape */
  1551. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1552. UPDATE_CACHE(re, &s->gb);
  1553. level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8);
  1554. if (level == -128) {
  1555. level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8);
  1556. } else if (level == 0) {
  1557. level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
  1558. }
  1559. i += run;
  1560. j = scantable[i];
  1561. if(level<0){
  1562. level= -level;
  1563. level= ((level*2+1)*qscale)>>1;
  1564. level= (level-1)|1;
  1565. level= -level;
  1566. }else{
  1567. level= ((level*2+1)*qscale)>>1;
  1568. level= (level-1)|1;
  1569. }
  1570. }
  1571. block[j] = level;
  1572. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1573. break;
  1574. UPDATE_CACHE(re, &s->gb);
  1575. }
  1576. end:
  1577. LAST_SKIP_BITS(re, &s->gb, 2);
  1578. CLOSE_READER(re, &s->gb);
  1579. }
  1580. s->block_last_index[n] = i;
  1581. return 0;
  1582. }
  1583. static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
  1584. DCTELEM *block,
  1585. int n)
  1586. {
  1587. int level, i, j, run;
  1588. RLTable *rl = &rl_mpeg1;
  1589. uint8_t * const scantable= s->intra_scantable.permutated;
  1590. const uint16_t *quant_matrix;
  1591. const int qscale= s->qscale;
  1592. int mismatch;
  1593. mismatch = 1;
  1594. {
  1595. OPEN_READER(re, &s->gb);
  1596. i = -1;
  1597. if (n < 4)
  1598. quant_matrix = s->inter_matrix;
  1599. else
  1600. quant_matrix = s->chroma_inter_matrix;
  1601. /* special case for the first coef. no need to add a second vlc table */
  1602. UPDATE_CACHE(re, &s->gb);
  1603. if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
  1604. level= (3*qscale*quant_matrix[0])>>5;
  1605. if(GET_CACHE(re, &s->gb)&0x40000000)
  1606. level= -level;
  1607. block[0] = level;
  1608. mismatch ^= level;
  1609. i++;
  1610. SKIP_BITS(re, &s->gb, 2);
  1611. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1612. goto end;
  1613. }
  1614. /* now quantify & encode AC coefs */
  1615. for(;;) {
  1616. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1617. if(level != 0) {
  1618. i += run;
  1619. j = scantable[i];
  1620. level= ((level*2+1)*qscale*quant_matrix[j])>>5;
  1621. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1622. SKIP_BITS(re, &s->gb, 1);
  1623. } else {
  1624. /* escape */
  1625. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1626. UPDATE_CACHE(re, &s->gb);
  1627. level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
  1628. i += run;
  1629. j = scantable[i];
  1630. if(level<0){
  1631. level= ((-level*2+1)*qscale*quant_matrix[j])>>5;
  1632. level= -level;
  1633. }else{
  1634. level= ((level*2+1)*qscale*quant_matrix[j])>>5;
  1635. }
  1636. }
  1637. if (i > 63){
  1638. av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1639. return -1;
  1640. }
  1641. mismatch ^= level;
  1642. block[j] = level;
  1643. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1644. break;
  1645. UPDATE_CACHE(re, &s->gb);
  1646. }
  1647. end:
  1648. LAST_SKIP_BITS(re, &s->gb, 2);
  1649. CLOSE_READER(re, &s->gb);
  1650. }
  1651. block[63] ^= (mismatch & 1);
  1652. s->block_last_index[n] = i;
  1653. return 0;
  1654. }
  1655. static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
  1656. DCTELEM *block,
  1657. int n)
  1658. {
  1659. int level, i, j, run;
  1660. RLTable *rl = &rl_mpeg1;
  1661. uint8_t * const scantable= s->intra_scantable.permutated;
  1662. const int qscale= s->qscale;
  1663. OPEN_READER(re, &s->gb);
  1664. i = -1;
  1665. /* special case for the first coef. no need to add a second vlc table */
  1666. UPDATE_CACHE(re, &s->gb);
  1667. if (((int32_t)GET_CACHE(re, &s->gb)) < 0) {
  1668. level= (3*qscale)>>1;
  1669. if(GET_CACHE(re, &s->gb)&0x40000000)
  1670. level= -level;
  1671. block[0] = level;
  1672. i++;
  1673. SKIP_BITS(re, &s->gb, 2);
  1674. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1675. goto end;
  1676. }
  1677. /* now quantify & encode AC coefs */
  1678. for(;;) {
  1679. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1680. if(level != 0) {
  1681. i += run;
  1682. j = scantable[i];
  1683. level= ((level*2+1)*qscale)>>1;
  1684. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1685. SKIP_BITS(re, &s->gb, 1);
  1686. } else {
  1687. /* escape */
  1688. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1689. UPDATE_CACHE(re, &s->gb);
  1690. level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
  1691. i += run;
  1692. j = scantable[i];
  1693. if(level<0){
  1694. level= ((-level*2+1)*qscale)>>1;
  1695. level= -level;
  1696. }else{
  1697. level= ((level*2+1)*qscale)>>1;
  1698. }
  1699. }
  1700. block[j] = level;
  1701. if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF)
  1702. break;
  1703. UPDATE_CACHE(re, &s->gb);
  1704. }
  1705. end:
  1706. LAST_SKIP_BITS(re, &s->gb, 2);
  1707. CLOSE_READER(re, &s->gb);
  1708. s->block_last_index[n] = i;
  1709. return 0;
  1710. }
  1711. static inline int mpeg2_decode_block_intra(MpegEncContext *s,
  1712. DCTELEM *block,
  1713. int n)
  1714. {
  1715. int level, dc, diff, i, j, run;
  1716. int component;
  1717. RLTable *rl;
  1718. uint8_t * const scantable= s->intra_scantable.permutated;
  1719. const uint16_t *quant_matrix;
  1720. const int qscale= s->qscale;
  1721. int mismatch;
  1722. /* DC coef */
  1723. if (n < 4){
  1724. quant_matrix = s->intra_matrix;
  1725. component = 0;
  1726. }else{
  1727. quant_matrix = s->chroma_intra_matrix;
  1728. component = (n&1) + 1;
  1729. }
  1730. diff = decode_dc(&s->gb, component);
  1731. if (diff >= 0xffff)
  1732. return -1;
  1733. dc = s->last_dc[component];
  1734. dc += diff;
  1735. s->last_dc[component] = dc;
  1736. block[0] = dc << (3 - s->intra_dc_precision);
  1737. dprintf(s->avctx, "dc=%d\n", block[0]);
  1738. mismatch = block[0] ^ 1;
  1739. i = 0;
  1740. if (s->intra_vlc_format)
  1741. rl = &rl_mpeg2;
  1742. else
  1743. rl = &rl_mpeg1;
  1744. {
  1745. OPEN_READER(re, &s->gb);
  1746. /* now quantify & encode AC coefs */
  1747. for(;;) {
  1748. UPDATE_CACHE(re, &s->gb);
  1749. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1750. if(level == 127){
  1751. break;
  1752. } else if(level != 0) {
  1753. i += run;
  1754. j = scantable[i];
  1755. level= (level*qscale*quant_matrix[j])>>4;
  1756. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1757. LAST_SKIP_BITS(re, &s->gb, 1);
  1758. } else {
  1759. /* escape */
  1760. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1761. UPDATE_CACHE(re, &s->gb);
  1762. level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
  1763. i += run;
  1764. j = scantable[i];
  1765. if(level<0){
  1766. level= (-level*qscale*quant_matrix[j])>>4;
  1767. level= -level;
  1768. }else{
  1769. level= (level*qscale*quant_matrix[j])>>4;
  1770. }
  1771. }
  1772. if (i > 63){
  1773. av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
  1774. return -1;
  1775. }
  1776. mismatch^= level;
  1777. block[j] = level;
  1778. }
  1779. CLOSE_READER(re, &s->gb);
  1780. }
  1781. block[63]^= mismatch&1;
  1782. s->block_last_index[n] = i;
  1783. return 0;
  1784. }
  1785. static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s,
  1786. DCTELEM *block,
  1787. int n)
  1788. {
  1789. int level, dc, diff, j, run;
  1790. int component;
  1791. RLTable *rl;
  1792. uint8_t * scantable= s->intra_scantable.permutated;
  1793. const uint16_t *quant_matrix;
  1794. const int qscale= s->qscale;
  1795. /* DC coef */
  1796. if (n < 4){
  1797. quant_matrix = s->intra_matrix;
  1798. component = 0;
  1799. }else{
  1800. quant_matrix = s->chroma_intra_matrix;
  1801. component = (n&1) + 1;
  1802. }
  1803. diff = decode_dc(&s->gb, component);
  1804. if (diff >= 0xffff)
  1805. return -1;
  1806. dc = s->last_dc[component];
  1807. dc += diff;
  1808. s->last_dc[component] = dc;
  1809. block[0] = dc << (3 - s->intra_dc_precision);
  1810. if (s->intra_vlc_format)
  1811. rl = &rl_mpeg2;
  1812. else
  1813. rl = &rl_mpeg1;
  1814. {
  1815. OPEN_READER(re, &s->gb);
  1816. /* now quantify & encode AC coefs */
  1817. for(;;) {
  1818. UPDATE_CACHE(re, &s->gb);
  1819. GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
  1820. if(level == 127){
  1821. break;
  1822. } else if(level != 0) {
  1823. scantable += run;
  1824. j = *scantable;
  1825. level= (level*qscale*quant_matrix[j])>>4;
  1826. level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
  1827. LAST_SKIP_BITS(re, &s->gb, 1);
  1828. } else {
  1829. /* escape */
  1830. run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
  1831. UPDATE_CACHE(re, &s->gb);
  1832. level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
  1833. scantable += run;
  1834. j = *scantable;
  1835. if(level<0){
  1836. level= (-level*qscale*quant_matrix[j])>>4;
  1837. level= -level;
  1838. }else{
  1839. level= (level*qscale*quant_matrix[j])>>4;
  1840. }
  1841. }
  1842. block[j] = level;
  1843. }
  1844. CLOSE_READER(re, &s->gb);
  1845. }
  1846. s->block_last_index[n] = scantable - s->intra_scantable.permutated;
  1847. return 0;
  1848. }
  1849. typedef struct Mpeg1Context {
  1850. MpegEncContext mpeg_enc_ctx;
  1851. int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
  1852. int repeat_field; /* true if we must repeat the field */
  1853. AVPanScan pan_scan; /** some temporary storage for the panscan */
  1854. int slice_count;
  1855. int swap_uv;//indicate VCR2
  1856. int save_aspect_info;
  1857. int save_width, save_height;
  1858. AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
  1859. } Mpeg1Context;
  1860. static int mpeg_decode_init(AVCodecContext *avctx)
  1861. {
  1862. Mpeg1Context *s = avctx->priv_data;
  1863. MpegEncContext *s2 = &s->mpeg_enc_ctx;
  1864. int i;
  1865. //we need some parmutation to store
  1866. //matrixes, until MPV_common_init()
  1867. //set the real permutatuon
  1868. for(i=0;i<64;i++)
  1869. s2->dsp.idct_permutation[i]=i;
  1870. MPV_decode_defaults(s2);
  1871. s->mpeg_enc_ctx.avctx= avctx;
  1872. s->mpeg_enc_ctx.flags= avctx->flags;
  1873. s->mpeg_enc_ctx.flags2= avctx->flags2;
  1874. common_init(&s->mpeg_enc_ctx);
  1875. init_vlcs();
  1876. s->mpeg_enc_ctx_allocated = 0;
  1877. s->mpeg_enc_ctx.picture_number = 0;
  1878. s->repeat_field = 0;
  1879. s->mpeg_enc_ctx.codec_id= avctx->codec->id;
  1880. return 0;
  1881. }
  1882. static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
  1883. const uint8_t *new_perm){
  1884. uint16_t temp_matrix[64];
  1885. int i;
  1886. memcpy(temp_matrix,matrix,64*sizeof(uint16_t));
  1887. for(i=0;i<64;i++){
  1888. matrix[new_perm[i]] = temp_matrix[old_perm[i]];
  1889. }
  1890. }
  1891. //Call this function when we know all parameters
  1892. //it may be called in different places for mpeg1 and mpeg2
  1893. static int mpeg_decode_postinit(AVCodecContext *avctx){
  1894. Mpeg1Context *s1 = avctx->priv_data;
  1895. MpegEncContext *s = &s1->mpeg_enc_ctx;
  1896. uint8_t old_permutation[64];
  1897. if (
  1898. (s1->mpeg_enc_ctx_allocated == 0)||
  1899. avctx->coded_width != s->width ||
  1900. avctx->coded_height != s->height||
  1901. s1->save_width != s->width ||
  1902. s1->save_height != s->height ||
  1903. s1->save_aspect_info != s->aspect_ratio_info||
  1904. 0)
  1905. {
  1906. if (s1->mpeg_enc_ctx_allocated) {
  1907. ParseContext pc= s->parse_context;
  1908. s->parse_context.buffer=0;
  1909. MPV_common_end(s);
  1910. s->parse_context= pc;
  1911. }
  1912. if( (s->width == 0 )||(s->height == 0))
  1913. return -2;
  1914. avcodec_set_dimensions(avctx, s->width, s->height);
  1915. avctx->bit_rate = s->bit_rate;
  1916. s1->save_aspect_info = s->aspect_ratio_info;
  1917. s1->save_width = s->width;
  1918. s1->save_height = s->height;
  1919. //low_delay may be forced, in this case we will have B frames
  1920. //that behave like P frames
  1921. avctx->has_b_frames = !(s->low_delay);
  1922. if(avctx->sub_id==1){//s->codec_id==avctx->codec_id==CODEC_ID
  1923. //mpeg1 fps
  1924. avctx->time_base.den= ff_frame_rate_tab[s->frame_rate_index].num;
  1925. avctx->time_base.num= ff_frame_rate_tab[s->frame_rate_index].den;
  1926. //mpeg1 aspect
  1927. avctx->sample_aspect_ratio= av_d2q(
  1928. 1.0/mpeg1_aspect[s->aspect_ratio_info], 255);
  1929. }else{//mpeg2
  1930. //mpeg2 fps
  1931. av_reduce(
  1932. &s->avctx->time_base.den,
  1933. &s->avctx->time_base.num,
  1934. ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num,
  1935. ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den,
  1936. 1<<30);
  1937. //mpeg2 aspect
  1938. if(s->aspect_ratio_info > 1){
  1939. if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) ){
  1940. s->avctx->sample_aspect_ratio=
  1941. av_div_q(
  1942. mpeg2_aspect[s->aspect_ratio_info],
  1943. (AVRational){s->width, s->height}
  1944. );
  1945. }else{
  1946. s->avctx->sample_aspect_ratio=
  1947. av_div_q(
  1948. mpeg2_aspect[s->aspect_ratio_info],
  1949. (AVRational){s1->pan_scan.width, s1->pan_scan.height}
  1950. );
  1951. }
  1952. }else{
  1953. s->avctx->sample_aspect_ratio=
  1954. mpeg2_aspect[s->aspect_ratio_info];
  1955. }
  1956. }//mpeg2
  1957. if(avctx->xvmc_acceleration){
  1958. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420);
  1959. }else{
  1960. if(s->chroma_format < 2){
  1961. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420);
  1962. }else
  1963. if(s->chroma_format == 2){
  1964. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_422);
  1965. }else
  1966. if(s->chroma_format > 2){
  1967. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_444);
  1968. }
  1969. }
  1970. //until then pix_fmt may be changed right after codec init
  1971. if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT )
  1972. if( avctx->idct_algo == FF_IDCT_AUTO )
  1973. avctx->idct_algo = FF_IDCT_SIMPLE;
  1974. //quantization matrixes may need reordering
  1975. //if dct permutation is changed
  1976. memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t));
  1977. if (MPV_common_init(s) < 0)
  1978. return -2;
  1979. quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation);
  1980. quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation);
  1981. quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation);
  1982. quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation);
  1983. s1->mpeg_enc_ctx_allocated = 1;
  1984. }
  1985. return 0;
  1986. }
  1987. static int mpeg1_decode_picture(AVCodecContext *avctx,
  1988. const uint8_t *buf, int buf_size)
  1989. {
  1990. Mpeg1Context *s1 = avctx->priv_data;
  1991. MpegEncContext *s = &s1->mpeg_enc_ctx;
  1992. int ref, f_code, vbv_delay;
  1993. if(mpeg_decode_postinit(s->avctx) < 0)
  1994. return -2;
  1995. init_get_bits(&s->gb, buf, buf_size*8);
  1996. ref = get_bits(&s->gb, 10); /* temporal ref */
  1997. s->pict_type = get_bits(&s->gb, 3);
  1998. if(s->pict_type == 0 || s->pict_type > 3)
  1999. return -1;
  2000. vbv_delay= get_bits(&s->gb, 16);
  2001. if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
  2002. s->full_pel[0] = get_bits1(&s->gb);
  2003. f_code = get_bits(&s->gb, 3);
  2004. if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT)
  2005. return -1;
  2006. s->mpeg_f_code[0][0] = f_code;
  2007. s->mpeg_f_code[0][1] = f_code;
  2008. }
  2009. if (s->pict_type == B_TYPE) {
  2010. s->full_pel[1] = get_bits1(&s->gb);
  2011. f_code = get_bits(&s->gb, 3);
  2012. if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT)
  2013. return -1;
  2014. s->mpeg_f_code[1][0] = f_code;
  2015. s->mpeg_f_code[1][1] = f_code;
  2016. }
  2017. s->current_picture.pict_type= s->pict_type;
  2018. s->current_picture.key_frame= s->pict_type == I_TYPE;
  2019. if(avctx->debug & FF_DEBUG_PICT_INFO)
  2020. av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
  2021. s->y_dc_scale = 8;
  2022. s->c_dc_scale = 8;
  2023. s->first_slice = 1;
  2024. return 0;
  2025. }
  2026. static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
  2027. {
  2028. MpegEncContext *s= &s1->mpeg_enc_ctx;
  2029. int horiz_size_ext, vert_size_ext;
  2030. int bit_rate_ext;
  2031. skip_bits(&s->gb, 1); /* profil and level esc*/
  2032. s->avctx->profile= get_bits(&s->gb, 3);
  2033. s->avctx->level= get_bits(&s->gb, 4);
  2034. s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
  2035. s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
  2036. horiz_size_ext = get_bits(&s->gb, 2);
  2037. vert_size_ext = get_bits(&s->gb, 2);
  2038. s->width |= (horiz_size_ext << 12);
  2039. s->height |= (vert_size_ext << 12);
  2040. bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
  2041. s->bit_rate += (bit_rate_ext << 18) * 400;
  2042. skip_bits1(&s->gb); /* marker */
  2043. s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10;
  2044. s->low_delay = get_bits1(&s->gb);
  2045. if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
  2046. s1->frame_rate_ext.num = get_bits(&s->gb, 2)+1;
  2047. s1->frame_rate_ext.den = get_bits(&s->gb, 5)+1;
  2048. dprintf(s->avctx, "sequence extension\n");
  2049. s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO;
  2050. s->avctx->sub_id = 2; /* indicates mpeg2 found */
  2051. if(s->avctx->debug & FF_DEBUG_PICT_INFO)
  2052. av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n",
  2053. s->avctx->profile, s->avctx->level, s->avctx->rc_buffer_size, s->bit_rate);
  2054. }
  2055. static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
  2056. {
  2057. MpegEncContext *s= &s1->mpeg_enc_ctx;
  2058. int color_description, w, h;
  2059. skip_bits(&s->gb, 3); /* video format */
  2060. color_description= get_bits1(&s->gb);
  2061. if(color_description){
  2062. skip_bits(&s->gb, 8); /* color primaries */
  2063. skip_bits(&s->gb, 8); /* transfer_characteristics */
  2064. skip_bits(&s->gb, 8); /* matrix_coefficients */
  2065. }
  2066. w= get_bits(&s->gb, 14);
  2067. skip_bits(&s->gb, 1); //marker
  2068. h= get_bits(&s->gb, 14);
  2069. skip_bits(&s->gb, 1); //marker
  2070. s1->pan_scan.width= 16*w;
  2071. s1->pan_scan.height=16*h;
  2072. if(s->avctx->debug & FF_DEBUG_PICT_INFO)
  2073. av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
  2074. }
  2075. static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
  2076. {
  2077. MpegEncContext *s= &s1->mpeg_enc_ctx;
  2078. int i,nofco;
  2079. nofco = 1;
  2080. if(s->progressive_sequence){
  2081. if(s->repeat_first_field){
  2082. nofco++;
  2083. if(s->top_field_first)
  2084. nofco++;
  2085. }
  2086. }else{
  2087. if(s->picture_structure == PICT_FRAME){
  2088. nofco++;
  2089. if(s->repeat_first_field)
  2090. nofco++;
  2091. }
  2092. }
  2093. for(i=0; i<nofco; i++){
  2094. s1->pan_scan.position[i][0]= get_sbits(&s->gb, 16);
  2095. skip_bits(&s->gb, 1); //marker
  2096. s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16);
  2097. skip_bits(&s->gb, 1); //marker
  2098. }
  2099. if(s->avctx->debug & FF_DEBUG_PICT_INFO)
  2100. av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
  2101. s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
  2102. s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
  2103. s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]
  2104. );
  2105. }
  2106. static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
  2107. {
  2108. int i, v, j;
  2109. dprintf(s->avctx, "matrix extension\n");
  2110. if (get_bits1(&s->gb)) {
  2111. for(i=0;i<64;i++) {
  2112. v = get_bits(&s->gb, 8);
  2113. j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2114. s->intra_matrix[j] = v;
  2115. s->chroma_intra_matrix[j] = v;
  2116. }
  2117. }
  2118. if (get_bits1(&s->gb)) {
  2119. for(i=0;i<64;i++) {
  2120. v = get_bits(&s->gb, 8);
  2121. j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2122. s->inter_matrix[j] = v;
  2123. s->chroma_inter_matrix[j] = v;
  2124. }
  2125. }
  2126. if (get_bits1(&s->gb)) {
  2127. for(i=0;i<64;i++) {
  2128. v = get_bits(&s->gb, 8);
  2129. j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2130. s->chroma_intra_matrix[j] = v;
  2131. }
  2132. }
  2133. if (get_bits1(&s->gb)) {
  2134. for(i=0;i<64;i++) {
  2135. v = get_bits(&s->gb, 8);
  2136. j= s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2137. s->chroma_inter_matrix[j] = v;
  2138. }
  2139. }
  2140. }
  2141. static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
  2142. {
  2143. s->full_pel[0] = s->full_pel[1] = 0;
  2144. s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
  2145. s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
  2146. s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
  2147. s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
  2148. s->intra_dc_precision = get_bits(&s->gb, 2);
  2149. s->picture_structure = get_bits(&s->gb, 2);
  2150. s->top_field_first = get_bits1(&s->gb);
  2151. s->frame_pred_frame_dct = get_bits1(&s->gb);
  2152. s->concealment_motion_vectors = get_bits1(&s->gb);
  2153. s->q_scale_type = get_bits1(&s->gb);
  2154. s->intra_vlc_format = get_bits1(&s->gb);
  2155. s->alternate_scan = get_bits1(&s->gb);
  2156. s->repeat_first_field = get_bits1(&s->gb);
  2157. s->chroma_420_type = get_bits1(&s->gb);
  2158. s->progressive_frame = get_bits1(&s->gb);
  2159. if(s->picture_structure == PICT_FRAME){
  2160. s->first_field=0;
  2161. s->v_edge_pos= 16*s->mb_height;
  2162. }else{
  2163. s->first_field ^= 1;
  2164. s->v_edge_pos= 8*s->mb_height;
  2165. memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
  2166. }
  2167. if(s->alternate_scan){
  2168. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
  2169. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
  2170. }else{
  2171. ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
  2172. ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
  2173. }
  2174. /* composite display not parsed */
  2175. dprintf(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
  2176. dprintf(s->avctx, "picture_structure=%d\n", s->picture_structure);
  2177. dprintf(s->avctx, "top field first=%d\n", s->top_field_first);
  2178. dprintf(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
  2179. dprintf(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
  2180. dprintf(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
  2181. dprintf(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
  2182. dprintf(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
  2183. dprintf(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
  2184. }
  2185. static void mpeg_decode_extension(AVCodecContext *avctx,
  2186. const uint8_t *buf, int buf_size)
  2187. {
  2188. Mpeg1Context *s1 = avctx->priv_data;
  2189. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2190. int ext_type;
  2191. init_get_bits(&s->gb, buf, buf_size*8);
  2192. ext_type = get_bits(&s->gb, 4);
  2193. switch(ext_type) {
  2194. case 0x1:
  2195. mpeg_decode_sequence_extension(s1);
  2196. break;
  2197. case 0x2:
  2198. mpeg_decode_sequence_display_extension(s1);
  2199. break;
  2200. case 0x3:
  2201. mpeg_decode_quant_matrix_extension(s);
  2202. break;
  2203. case 0x7:
  2204. mpeg_decode_picture_display_extension(s1);
  2205. break;
  2206. case 0x8:
  2207. mpeg_decode_picture_coding_extension(s);
  2208. break;
  2209. }
  2210. }
  2211. static void exchange_uv(MpegEncContext *s){
  2212. short * tmp = s->pblocks[4];
  2213. s->pblocks[4] = s->pblocks[5];
  2214. s->pblocks[5] = tmp;
  2215. }
  2216. static int mpeg_field_start(MpegEncContext *s){
  2217. AVCodecContext *avctx= s->avctx;
  2218. Mpeg1Context *s1 = (Mpeg1Context*)s;
  2219. /* start frame decoding */
  2220. if(s->first_field || s->picture_structure==PICT_FRAME){
  2221. if(MPV_frame_start(s, avctx) < 0)
  2222. return -1;
  2223. ff_er_frame_start(s);
  2224. /* first check if we must repeat the frame */
  2225. s->current_picture_ptr->repeat_pict = 0;
  2226. if (s->repeat_first_field) {
  2227. if (s->progressive_sequence) {
  2228. if (s->top_field_first)
  2229. s->current_picture_ptr->repeat_pict = 4;
  2230. else
  2231. s->current_picture_ptr->repeat_pict = 2;
  2232. } else if (s->progressive_frame) {
  2233. s->current_picture_ptr->repeat_pict = 1;
  2234. }
  2235. }
  2236. *s->current_picture_ptr->pan_scan= s1->pan_scan;
  2237. }else{ //second field
  2238. int i;
  2239. if(!s->current_picture_ptr){
  2240. av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
  2241. return -1;
  2242. }
  2243. for(i=0; i<4; i++){
  2244. s->current_picture.data[i] = s->current_picture_ptr->data[i];
  2245. if(s->picture_structure == PICT_BOTTOM_FIELD){
  2246. s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
  2247. }
  2248. }
  2249. }
  2250. #ifdef HAVE_XVMC
  2251. // MPV_frame_start will call this function too,
  2252. // but we need to call it on every field
  2253. if(s->avctx->xvmc_acceleration)
  2254. XVMC_field_start(s,avctx);
  2255. #endif
  2256. return 0;
  2257. }
  2258. #define DECODE_SLICE_ERROR -1
  2259. #define DECODE_SLICE_OK 0
  2260. /**
  2261. * decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode
  2262. * @return DECODE_SLICE_ERROR if the slice is damaged<br>
  2263. * DECODE_SLICE_OK if this slice is ok<br>
  2264. */
  2265. static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
  2266. const uint8_t **buf, int buf_size)
  2267. {
  2268. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2269. AVCodecContext *avctx= s->avctx;
  2270. int ret;
  2271. const int field_pic= s->picture_structure != PICT_FRAME;
  2272. const int lowres= s->avctx->lowres;
  2273. s->resync_mb_x=
  2274. s->resync_mb_y= -1;
  2275. if (mb_y<<field_pic >= s->mb_height){
  2276. av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s->mb_height);
  2277. return -1;
  2278. }
  2279. init_get_bits(&s->gb, *buf, buf_size*8);
  2280. ff_mpeg1_clean_buffers(s);
  2281. s->interlaced_dct = 0;
  2282. s->qscale = get_qscale(s);
  2283. if(s->qscale == 0){
  2284. av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
  2285. return -1;
  2286. }
  2287. /* extra slice info */
  2288. while (get_bits1(&s->gb) != 0) {
  2289. skip_bits(&s->gb, 8);
  2290. }
  2291. s->mb_x=0;
  2292. for(;;) {
  2293. int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
  2294. if (code < 0){
  2295. av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
  2296. return -1;
  2297. }
  2298. if (code >= 33) {
  2299. if (code == 33) {
  2300. s->mb_x += 33;
  2301. }
  2302. /* otherwise, stuffing, nothing to do */
  2303. } else {
  2304. s->mb_x += code;
  2305. break;
  2306. }
  2307. }
  2308. if(s->mb_x >= (unsigned)s->mb_width){
  2309. av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
  2310. return -1;
  2311. }
  2312. s->resync_mb_x= s->mb_x;
  2313. s->resync_mb_y= s->mb_y= mb_y;
  2314. s->mb_skip_run= 0;
  2315. ff_init_block_index(s);
  2316. if (s->mb_y==0 && s->mb_x==0 && (s->first_field || s->picture_structure==PICT_FRAME)) {
  2317. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  2318. av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
  2319. s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
  2320. s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")),
  2321. s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
  2322. s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
  2323. s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
  2324. }
  2325. }
  2326. for(;;) {
  2327. #ifdef HAVE_XVMC
  2328. //one 1 we memcpy blocks in xvmcvideo
  2329. if(s->avctx->xvmc_acceleration > 1)
  2330. XVMC_init_block(s);//set s->block
  2331. #endif
  2332. ret = mpeg_decode_mb(s, s->block);
  2333. s->chroma_qscale= s->qscale;
  2334. dprintf(s->avctx, "ret=%d\n", ret);
  2335. if (ret < 0)
  2336. return -1;
  2337. if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
  2338. const int wrap = field_pic ? 2*s->b8_stride : s->b8_stride;
  2339. int xy = s->mb_x*2 + s->mb_y*2*wrap;
  2340. int motion_x, motion_y, dir, i;
  2341. if(field_pic && !s->first_field)
  2342. xy += wrap/2;
  2343. for(i=0; i<2; i++){
  2344. for(dir=0; dir<2; dir++){
  2345. if (s->mb_intra || (dir==1 && s->pict_type != B_TYPE)) {
  2346. motion_x = motion_y = 0;
  2347. }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
  2348. motion_x = s->mv[dir][0][0];
  2349. motion_y = s->mv[dir][0][1];
  2350. } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ {
  2351. motion_x = s->mv[dir][i][0];
  2352. motion_y = s->mv[dir][i][1];
  2353. }
  2354. s->current_picture.motion_val[dir][xy ][0] = motion_x;
  2355. s->current_picture.motion_val[dir][xy ][1] = motion_y;
  2356. s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
  2357. s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
  2358. s->current_picture.ref_index [dir][xy ]=
  2359. s->current_picture.ref_index [dir][xy + 1]= s->field_select[dir][i];
  2360. assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
  2361. }
  2362. xy += wrap;
  2363. }
  2364. }
  2365. s->dest[0] += 16 >> lowres;
  2366. s->dest[1] += 16 >> (s->chroma_x_shift + lowres);
  2367. s->dest[2] += 16 >> (s->chroma_x_shift + lowres);
  2368. MPV_decode_mb(s, s->block);
  2369. if (++s->mb_x >= s->mb_width) {
  2370. const int mb_size= 16>>s->avctx->lowres;
  2371. ff_draw_horiz_band(s, mb_size*s->mb_y, mb_size);
  2372. s->mb_x = 0;
  2373. s->mb_y++;
  2374. if(s->mb_y<<field_pic >= s->mb_height){
  2375. int left= s->gb.size_in_bits - get_bits_count(&s->gb);
  2376. int is_d10= s->chroma_format==2 && s->pict_type==I_TYPE && avctx->profile==0 && avctx->level==5
  2377. && s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
  2378. && s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
  2379. if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10)
  2380. || (avctx->error_resilience >= FF_ER_AGGRESSIVE && left>8)){
  2381. av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23)));
  2382. return -1;
  2383. }else
  2384. goto eos;
  2385. }
  2386. ff_init_block_index(s);
  2387. }
  2388. /* skip mb handling */
  2389. if (s->mb_skip_run == -1) {
  2390. /* read again increment */
  2391. s->mb_skip_run = 0;
  2392. for(;;) {
  2393. int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2);
  2394. if (code < 0){
  2395. av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
  2396. return -1;
  2397. }
  2398. if (code >= 33) {
  2399. if (code == 33) {
  2400. s->mb_skip_run += 33;
  2401. }else if(code == 35){
  2402. if(s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0){
  2403. av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
  2404. return -1;
  2405. }
  2406. goto eos; /* end of slice */
  2407. }
  2408. /* otherwise, stuffing, nothing to do */
  2409. } else {
  2410. s->mb_skip_run += code;
  2411. break;
  2412. }
  2413. }
  2414. }
  2415. }
  2416. eos: // end of slice
  2417. *buf += get_bits_count(&s->gb)/8 - 1;
  2418. //printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
  2419. return 0;
  2420. }
  2421. static int slice_decode_thread(AVCodecContext *c, void *arg){
  2422. MpegEncContext *s= arg;
  2423. const uint8_t *buf= s->gb.buffer;
  2424. int mb_y= s->start_mb_y;
  2425. s->error_count= 3*(s->end_mb_y - s->start_mb_y)*s->mb_width;
  2426. for(;;){
  2427. uint32_t start_code;
  2428. int ret;
  2429. ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf);
  2430. emms_c();
  2431. //av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
  2432. //ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count);
  2433. if(ret < 0){
  2434. if(s->resync_mb_x>=0 && s->resync_mb_y>=0)
  2435. ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
  2436. }else{
  2437. ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
  2438. }
  2439. if(s->mb_y == s->end_mb_y)
  2440. return 0;
  2441. start_code= -1;
  2442. buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code);
  2443. mb_y= start_code - SLICE_MIN_START_CODE;
  2444. if(mb_y < 0 || mb_y >= s->end_mb_y)
  2445. return -1;
  2446. }
  2447. return 0; //not reached
  2448. }
  2449. /**
  2450. * handles slice ends.
  2451. * @return 1 if it seems to be the last slice of
  2452. */
  2453. static int slice_end(AVCodecContext *avctx, AVFrame *pict)
  2454. {
  2455. Mpeg1Context *s1 = avctx->priv_data;
  2456. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2457. if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
  2458. return 0;
  2459. #ifdef HAVE_XVMC
  2460. if(s->avctx->xvmc_acceleration)
  2461. XVMC_field_end(s);
  2462. #endif
  2463. /* end of slice reached */
  2464. if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
  2465. /* end of image */
  2466. s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
  2467. ff_er_frame_end(s);
  2468. MPV_frame_end(s);
  2469. if (s->pict_type == B_TYPE || s->low_delay) {
  2470. *pict= *(AVFrame*)s->current_picture_ptr;
  2471. ff_print_debug_info(s, pict);
  2472. } else {
  2473. s->picture_number++;
  2474. /* latency of 1 frame for I and P frames */
  2475. /* XXX: use another variable than picture_number */
  2476. if (s->last_picture_ptr != NULL) {
  2477. *pict= *(AVFrame*)s->last_picture_ptr;
  2478. ff_print_debug_info(s, pict);
  2479. }
  2480. }
  2481. return 1;
  2482. } else {
  2483. return 0;
  2484. }
  2485. }
  2486. static int mpeg1_decode_sequence(AVCodecContext *avctx,
  2487. const uint8_t *buf, int buf_size)
  2488. {
  2489. Mpeg1Context *s1 = avctx->priv_data;
  2490. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2491. int width,height;
  2492. int i, v, j;
  2493. init_get_bits(&s->gb, buf, buf_size*8);
  2494. width = get_bits(&s->gb, 12);
  2495. height = get_bits(&s->gb, 12);
  2496. if (width <= 0 || height <= 0 ||
  2497. (width % 2) != 0 || (height % 2) != 0)
  2498. return -1;
  2499. s->aspect_ratio_info= get_bits(&s->gb, 4);
  2500. if (s->aspect_ratio_info == 0)
  2501. return -1;
  2502. s->frame_rate_index = get_bits(&s->gb, 4);
  2503. if (s->frame_rate_index == 0 || s->frame_rate_index > 13)
  2504. return -1;
  2505. s->bit_rate = get_bits(&s->gb, 18) * 400;
  2506. if (get_bits1(&s->gb) == 0) /* marker */
  2507. return -1;
  2508. s->width = width;
  2509. s->height = height;
  2510. s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16;
  2511. skip_bits(&s->gb, 1);
  2512. /* get matrix */
  2513. if (get_bits1(&s->gb)) {
  2514. for(i=0;i<64;i++) {
  2515. v = get_bits(&s->gb, 8);
  2516. if(v==0){
  2517. av_log(s->avctx, AV_LOG_ERROR, "intra matrix damaged\n");
  2518. return -1;
  2519. }
  2520. j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2521. s->intra_matrix[j] = v;
  2522. s->chroma_intra_matrix[j] = v;
  2523. }
  2524. #ifdef DEBUG
  2525. dprintf(s->avctx, "intra matrix present\n");
  2526. for(i=0;i<64;i++)
  2527. dprintf(s->avctx, " %d", s->intra_matrix[s->dsp.idct_permutation[i]]);
  2528. dprintf(s->avctx, "\n");
  2529. #endif
  2530. } else {
  2531. for(i=0;i<64;i++) {
  2532. j = s->dsp.idct_permutation[i];
  2533. v = ff_mpeg1_default_intra_matrix[i];
  2534. s->intra_matrix[j] = v;
  2535. s->chroma_intra_matrix[j] = v;
  2536. }
  2537. }
  2538. if (get_bits1(&s->gb)) {
  2539. for(i=0;i<64;i++) {
  2540. v = get_bits(&s->gb, 8);
  2541. if(v==0){
  2542. av_log(s->avctx, AV_LOG_ERROR, "inter matrix damaged\n");
  2543. return -1;
  2544. }
  2545. j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
  2546. s->inter_matrix[j] = v;
  2547. s->chroma_inter_matrix[j] = v;
  2548. }
  2549. #ifdef DEBUG
  2550. dprintf(s->avctx, "non intra matrix present\n");
  2551. for(i=0;i<64;i++)
  2552. dprintf(s->avctx, " %d", s->inter_matrix[s->dsp.idct_permutation[i]]);
  2553. dprintf(s->avctx, "\n");
  2554. #endif
  2555. } else {
  2556. for(i=0;i<64;i++) {
  2557. int j= s->dsp.idct_permutation[i];
  2558. v = ff_mpeg1_default_non_intra_matrix[i];
  2559. s->inter_matrix[j] = v;
  2560. s->chroma_inter_matrix[j] = v;
  2561. }
  2562. }
  2563. if(show_bits(&s->gb, 23) != 0){
  2564. av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
  2565. return -1;
  2566. }
  2567. /* we set mpeg2 parameters so that it emulates mpeg1 */
  2568. s->progressive_sequence = 1;
  2569. s->progressive_frame = 1;
  2570. s->picture_structure = PICT_FRAME;
  2571. s->frame_pred_frame_dct = 1;
  2572. s->chroma_format = 1;
  2573. s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO;
  2574. avctx->sub_id = 1; /* indicates mpeg1 */
  2575. s->out_format = FMT_MPEG1;
  2576. s->swap_uv = 0;//AFAIK VCR2 don't have SEQ_HEADER
  2577. if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1;
  2578. if(s->avctx->debug & FF_DEBUG_PICT_INFO)
  2579. av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
  2580. s->avctx->rc_buffer_size, s->bit_rate);
  2581. return 0;
  2582. }
  2583. static int vcr2_init_sequence(AVCodecContext *avctx)
  2584. {
  2585. Mpeg1Context *s1 = avctx->priv_data;
  2586. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2587. int i, v;
  2588. /* start new mpeg1 context decoding */
  2589. s->out_format = FMT_MPEG1;
  2590. if (s1->mpeg_enc_ctx_allocated) {
  2591. MPV_common_end(s);
  2592. }
  2593. s->width = avctx->coded_width;
  2594. s->height = avctx->coded_height;
  2595. avctx->has_b_frames= 0; //true?
  2596. s->low_delay= 1;
  2597. if(avctx->xvmc_acceleration){
  2598. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420);
  2599. }else{
  2600. avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420);
  2601. }
  2602. if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT )
  2603. if( avctx->idct_algo == FF_IDCT_AUTO )
  2604. avctx->idct_algo = FF_IDCT_SIMPLE;
  2605. if (MPV_common_init(s) < 0)
  2606. return -1;
  2607. exchange_uv(s);//common init reset pblocks, so we swap them here
  2608. s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB
  2609. s1->mpeg_enc_ctx_allocated = 1;
  2610. for(i=0;i<64;i++) {
  2611. int j= s->dsp.idct_permutation[i];
  2612. v = ff_mpeg1_default_intra_matrix[i];
  2613. s->intra_matrix[j] = v;
  2614. s->chroma_intra_matrix[j] = v;
  2615. v = ff_mpeg1_default_non_intra_matrix[i];
  2616. s->inter_matrix[j] = v;
  2617. s->chroma_inter_matrix[j] = v;
  2618. }
  2619. s->progressive_sequence = 1;
  2620. s->progressive_frame = 1;
  2621. s->picture_structure = PICT_FRAME;
  2622. s->frame_pred_frame_dct = 1;
  2623. s->chroma_format = 1;
  2624. s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO;
  2625. avctx->sub_id = 2; /* indicates mpeg2 */
  2626. return 0;
  2627. }
  2628. static void mpeg_decode_user_data(AVCodecContext *avctx,
  2629. const uint8_t *buf, int buf_size)
  2630. {
  2631. const uint8_t *p;
  2632. int len, flags;
  2633. p = buf;
  2634. len = buf_size;
  2635. /* we parse the DTG active format information */
  2636. if (len >= 5 &&
  2637. p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
  2638. flags = p[4];
  2639. p += 5;
  2640. len -= 5;
  2641. if (flags & 0x80) {
  2642. /* skip event id */
  2643. if (len < 2)
  2644. return;
  2645. p += 2;
  2646. len -= 2;
  2647. }
  2648. if (flags & 0x40) {
  2649. if (len < 1)
  2650. return;
  2651. avctx->dtg_active_format = p[0] & 0x0f;
  2652. }
  2653. }
  2654. }
  2655. static void mpeg_decode_gop(AVCodecContext *avctx,
  2656. const uint8_t *buf, int buf_size){
  2657. Mpeg1Context *s1 = avctx->priv_data;
  2658. MpegEncContext *s = &s1->mpeg_enc_ctx;
  2659. int drop_frame_flag;
  2660. int time_code_hours, time_code_minutes;
  2661. int time_code_seconds, time_code_pictures;
  2662. int broken_link;
  2663. init_get_bits(&s->gb, buf, buf_size*8);
  2664. drop_frame_flag = get_bits1(&s->gb);
  2665. time_code_hours=get_bits(&s->gb,5);
  2666. time_code_minutes = get_bits(&s->gb,6);
  2667. skip_bits1(&s->gb);//marker bit
  2668. time_code_seconds = get_bits(&s->gb,6);
  2669. time_code_pictures = get_bits(&s->gb,6);
  2670. /*broken_link indicate that after editing the
  2671. reference frames of the first B-Frames after GOP I-Frame
  2672. are missing (open gop)*/
  2673. broken_link = get_bits1(&s->gb);
  2674. if(s->avctx->debug & FF_DEBUG_PICT_INFO)
  2675. av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) broken_link=%d\n",
  2676. time_code_hours, time_code_minutes, time_code_seconds,
  2677. time_code_pictures, broken_link);
  2678. }
  2679. /**
  2680. * finds the end of the current frame in the bitstream.
  2681. * @return the position of the first byte of the next frame, or -1
  2682. */
  2683. int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size)
  2684. {
  2685. int i;
  2686. uint32_t state= pc->state;
  2687. i=0;
  2688. if(!pc->frame_start_found){
  2689. for(i=0; i<buf_size; i++){
  2690. i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1;
  2691. if(state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){
  2692. i++;
  2693. pc->frame_start_found=1;
  2694. break;
  2695. }
  2696. }
  2697. }
  2698. if(pc->frame_start_found){
  2699. /* EOF considered as end of frame */
  2700. if (buf_size == 0)
  2701. return 0;
  2702. for(; i<buf_size; i++){
  2703. i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1;
  2704. if((state&0xFFFFFF00) == 0x100){
  2705. if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
  2706. pc->frame_start_found=0;
  2707. pc->state=-1;
  2708. return i-3;
  2709. }
  2710. }
  2711. }
  2712. }
  2713. pc->state= state;
  2714. return END_NOT_FOUND;
  2715. }
  2716. /* handle buffering and image synchronisation */
  2717. static int mpeg_decode_frame(AVCodecContext *avctx,
  2718. void *data, int *data_size,
  2719. uint8_t *buf, int buf_size)
  2720. {
  2721. Mpeg1Context *s = avctx->priv_data;
  2722. const uint8_t *buf_end;
  2723. const uint8_t *buf_ptr;
  2724. uint32_t start_code;
  2725. int ret, input_size;
  2726. AVFrame *picture = data;
  2727. MpegEncContext *s2 = &s->mpeg_enc_ctx;
  2728. dprintf(avctx, "fill_buffer\n");
  2729. if (buf_size == 0) {
  2730. /* special case for last picture */
  2731. if (s2->low_delay==0 && s2->next_picture_ptr) {
  2732. *picture= *(AVFrame*)s2->next_picture_ptr;
  2733. s2->next_picture_ptr= NULL;
  2734. *data_size = sizeof(AVFrame);
  2735. }
  2736. return 0;
  2737. }
  2738. if(s2->flags&CODEC_FLAG_TRUNCATED){
  2739. int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size);
  2740. if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 )
  2741. return buf_size;
  2742. }
  2743. buf_ptr = buf;
  2744. buf_end = buf + buf_size;
  2745. #if 0
  2746. if (s->repeat_field % 2 == 1) {
  2747. s->repeat_field++;
  2748. //fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number,
  2749. // s2->picture_number, s->repeat_field);
  2750. if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) {
  2751. *data_size = sizeof(AVPicture);
  2752. goto the_end;
  2753. }
  2754. }
  2755. #endif
  2756. if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == ff_get_fourcc("VCR2"))
  2757. vcr2_init_sequence(avctx);
  2758. s->slice_count= 0;
  2759. for(;;) {
  2760. /* find start next code */
  2761. start_code = -1;
  2762. buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
  2763. if (start_code > 0x1ff){
  2764. if(s2->pict_type != B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
  2765. if(avctx->thread_count > 1){
  2766. int i;
  2767. avctx->execute(avctx, slice_decode_thread, (void**)&(s2->thread_context[0]), NULL, s->slice_count);
  2768. for(i=0; i<s->slice_count; i++)
  2769. s2->error_count += s2->thread_context[i]->error_count;
  2770. }
  2771. if (slice_end(avctx, picture)) {
  2772. if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
  2773. *data_size = sizeof(AVPicture);
  2774. }
  2775. }
  2776. return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index);
  2777. }
  2778. input_size = buf_end - buf_ptr;
  2779. if(avctx->debug & FF_DEBUG_STARTCODE){
  2780. av_log(avctx, AV_LOG_DEBUG, "%3X at %zd left %d\n", start_code, buf_ptr-buf, input_size);
  2781. }
  2782. /* prepare data for next start code */
  2783. switch(start_code) {
  2784. case SEQ_START_CODE:
  2785. mpeg1_decode_sequence(avctx, buf_ptr,
  2786. input_size);
  2787. break;
  2788. case PICTURE_START_CODE:
  2789. /* we have a complete image : we try to decompress it */
  2790. mpeg1_decode_picture(avctx,
  2791. buf_ptr, input_size);
  2792. break;
  2793. case EXT_START_CODE:
  2794. mpeg_decode_extension(avctx,
  2795. buf_ptr, input_size);
  2796. break;
  2797. case USER_START_CODE:
  2798. mpeg_decode_user_data(avctx,
  2799. buf_ptr, input_size);
  2800. break;
  2801. case GOP_START_CODE:
  2802. s2->first_field=0;
  2803. mpeg_decode_gop(avctx,
  2804. buf_ptr, input_size);
  2805. break;
  2806. default:
  2807. if (start_code >= SLICE_MIN_START_CODE &&
  2808. start_code <= SLICE_MAX_START_CODE) {
  2809. int mb_y= start_code - SLICE_MIN_START_CODE;
  2810. if(s2->last_picture_ptr==NULL){
  2811. /* Skip B-frames if we do not have reference frames. */
  2812. if(s2->pict_type==B_TYPE) break;
  2813. /* Skip P-frames if we do not have reference frame no valid header. */
  2814. // if(s2->pict_type==P_TYPE && s2->first_field && !s2->first_slice) break;
  2815. }
  2816. /* Skip B-frames if we are in a hurry. */
  2817. if(avctx->hurry_up && s2->pict_type==B_TYPE) break;
  2818. if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==B_TYPE)
  2819. ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=I_TYPE)
  2820. || avctx->skip_frame >= AVDISCARD_ALL)
  2821. break;
  2822. /* Skip everything if we are in a hurry>=5. */
  2823. if(avctx->hurry_up>=5) break;
  2824. if (!s->mpeg_enc_ctx_allocated) break;
  2825. if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
  2826. if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
  2827. break;
  2828. }
  2829. if(s2->first_slice){
  2830. s2->first_slice=0;
  2831. if(mpeg_field_start(s2) < 0)
  2832. return -1;
  2833. }
  2834. if(!s2->current_picture_ptr){
  2835. av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n");
  2836. return -1;
  2837. }
  2838. if(avctx->thread_count > 1){
  2839. int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
  2840. if(threshold <= mb_y){
  2841. MpegEncContext *thread_context= s2->thread_context[s->slice_count];
  2842. thread_context->start_mb_y= mb_y;
  2843. thread_context->end_mb_y = s2->mb_height;
  2844. if(s->slice_count){
  2845. s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
  2846. ff_update_duplicate_context(thread_context, s2);
  2847. }
  2848. init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
  2849. s->slice_count++;
  2850. }
  2851. buf_ptr += 2; //FIXME add minimum num of bytes per slice
  2852. }else{
  2853. ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size);
  2854. emms_c();
  2855. if(ret < 0){
  2856. if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0)
  2857. ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
  2858. }else{
  2859. ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
  2860. }
  2861. }
  2862. }
  2863. break;
  2864. }
  2865. }
  2866. }
  2867. static int mpeg_decode_end(AVCodecContext *avctx)
  2868. {
  2869. Mpeg1Context *s = avctx->priv_data;
  2870. if (s->mpeg_enc_ctx_allocated)
  2871. MPV_common_end(&s->mpeg_enc_ctx);
  2872. return 0;
  2873. }
  2874. AVCodec mpeg1video_decoder = {
  2875. "mpeg1video",
  2876. CODEC_TYPE_VIDEO,
  2877. CODEC_ID_MPEG1VIDEO,
  2878. sizeof(Mpeg1Context),
  2879. mpeg_decode_init,
  2880. NULL,
  2881. mpeg_decode_end,
  2882. mpeg_decode_frame,
  2883. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
  2884. .flush= ff_mpeg_flush,
  2885. };
  2886. AVCodec mpeg2video_decoder = {
  2887. "mpeg2video",
  2888. CODEC_TYPE_VIDEO,
  2889. CODEC_ID_MPEG2VIDEO,
  2890. sizeof(Mpeg1Context),
  2891. mpeg_decode_init,
  2892. NULL,
  2893. mpeg_decode_end,
  2894. mpeg_decode_frame,
  2895. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
  2896. .flush= ff_mpeg_flush,
  2897. };
  2898. //legacy decoder
  2899. AVCodec mpegvideo_decoder = {
  2900. "mpegvideo",
  2901. CODEC_TYPE_VIDEO,
  2902. CODEC_ID_MPEG2VIDEO,
  2903. sizeof(Mpeg1Context),
  2904. mpeg_decode_init,
  2905. NULL,
  2906. mpeg_decode_end,
  2907. mpeg_decode_frame,
  2908. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
  2909. .flush= ff_mpeg_flush,
  2910. };
  2911. #ifdef CONFIG_ENCODERS
  2912. AVCodec mpeg1video_encoder = {
  2913. "mpeg1video",
  2914. CODEC_TYPE_VIDEO,
  2915. CODEC_ID_MPEG1VIDEO,
  2916. sizeof(MpegEncContext),
  2917. encode_init,
  2918. MPV_encode_picture,
  2919. MPV_encode_end,
  2920. .supported_framerates= ff_frame_rate_tab+1,
  2921. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  2922. .capabilities= CODEC_CAP_DELAY,
  2923. };
  2924. AVCodec mpeg2video_encoder = {
  2925. "mpeg2video",
  2926. CODEC_TYPE_VIDEO,
  2927. CODEC_ID_MPEG2VIDEO,
  2928. sizeof(MpegEncContext),
  2929. encode_init,
  2930. MPV_encode_picture,
  2931. MPV_encode_end,
  2932. .supported_framerates= ff_frame_rate_tab+1,
  2933. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, -1},
  2934. .capabilities= CODEC_CAP_DELAY,
  2935. };
  2936. #endif
  2937. #ifdef HAVE_XVMC
  2938. static int mpeg_mc_decode_init(AVCodecContext *avctx){
  2939. Mpeg1Context *s;
  2940. if( avctx->thread_count > 1)
  2941. return -1;
  2942. if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) )
  2943. return -1;
  2944. if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){
  2945. dprintf(avctx, "mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n");
  2946. }
  2947. mpeg_decode_init(avctx);
  2948. s = avctx->priv_data;
  2949. avctx->pix_fmt = PIX_FMT_XVMC_MPEG2_IDCT;
  2950. avctx->xvmc_acceleration = 2;//2 - the blocks are packed!
  2951. return 0;
  2952. }
  2953. AVCodec mpeg_xvmc_decoder = {
  2954. "mpegvideo_xvmc",
  2955. CODEC_TYPE_VIDEO,
  2956. CODEC_ID_MPEG2VIDEO_XVMC,
  2957. sizeof(Mpeg1Context),
  2958. mpeg_mc_decode_init,
  2959. NULL,
  2960. mpeg_decode_end,
  2961. mpeg_decode_frame,
  2962. CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY,
  2963. .flush= ff_mpeg_flush,
  2964. };
  2965. #endif
  2966. /* this is ugly i know, but the alternative is too make
  2967. hundreds of vars global and prefix them with ff_mpeg1_
  2968. which is far uglier. */
  2969. #include "mdec.c"