You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2379 lines
74KB

  1. /*
  2. * H263/MPEG4 backend for ffmpeg encoder and decoder
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * H263+ support.
  5. * Copyright (c) 2001 Juan J. Sierralta P
  6. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * ac prediction encoding, B-frame support, error resilience, optimizations,
  9. * qpel decoding, gmc decoding, interlaced decoding
  10. * by Michael Niedermayer <michaelni@gmx.at>
  11. *
  12. * This file is part of FFmpeg.
  13. *
  14. * FFmpeg is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU Lesser General Public
  16. * License as published by the Free Software Foundation; either
  17. * version 2.1 of the License, or (at your option) any later version.
  18. *
  19. * FFmpeg is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * Lesser General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU Lesser General Public
  25. * License along with FFmpeg; if not, write to the Free Software
  26. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  27. */
  28. /**
  29. * @file libavcodec/h263.c
  30. * h263/mpeg4 codec.
  31. */
  32. //#define DEBUG
  33. #include <limits.h>
  34. #include "dsputil.h"
  35. #include "avcodec.h"
  36. #include "mpegvideo.h"
  37. #include "h263.h"
  38. #include "h263data.h"
  39. #include "mathops.h"
  40. #include "unary.h"
  41. #include "flv.h"
  42. #include "mpeg4video.h"
  43. //#undef NDEBUG
  44. //#include <assert.h>
  45. // The defines below define the number of bits that are read at once for
  46. // reading vlc values. Changing these may improve speed and data cache needs
  47. // be aware though that decreasing them may need the number of stages that is
  48. // passed to get_vlc* to be increased.
  49. #define MV_VLC_BITS 9
  50. #define H263_MBTYPE_B_VLC_BITS 6
  51. #define CBPC_B_VLC_BITS 3
  52. #if CONFIG_ENCODERS
  53. /**
  54. * Table of number of bits a motion vector component needs.
  55. */
  56. static uint8_t mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
  57. /**
  58. * Minimal fcode that a motion vector component would need.
  59. */
  60. static uint8_t fcode_tab[MAX_MV*2+1];
  61. /**
  62. * Minimal fcode that a motion vector component would need in umv.
  63. * All entries in this table are 1.
  64. */
  65. static uint8_t umv_fcode_tab[MAX_MV*2+1];
  66. //unified encoding tables for run length encoding of coefficients
  67. //unified in the sense that the specification specifies the encoding in several steps.
  68. static uint8_t uni_h263_intra_aic_rl_len [64*64*2*2];
  69. static uint8_t uni_h263_inter_rl_len [64*64*2*2];
  70. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128 + (run)*256 + (level))
  71. //#define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run) + (level)*64)
  72. #define UNI_MPEG4_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
  73. #endif
  74. static uint8_t static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
  75. int h263_get_picture_format(int width, int height)
  76. {
  77. if (width == 128 && height == 96)
  78. return 1;
  79. else if (width == 176 && height == 144)
  80. return 2;
  81. else if (width == 352 && height == 288)
  82. return 3;
  83. else if (width == 704 && height == 576)
  84. return 4;
  85. else if (width == 1408 && height == 1152)
  86. return 5;
  87. else
  88. return 7;
  89. }
  90. void ff_h263_show_pict_info(MpegEncContext *s){
  91. if(s->avctx->debug&FF_DEBUG_PICT_INFO){
  92. av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s %d/%d\n",
  93. s->qscale, av_get_pict_type_char(s->pict_type),
  94. s->gb.size_in_bits, 1-s->no_rounding,
  95. s->obmc ? " AP" : "",
  96. s->umvplus ? " UMV" : "",
  97. s->h263_long_vectors ? " LONG" : "",
  98. s->h263_plus ? " +" : "",
  99. s->h263_aic ? " AIC" : "",
  100. s->alt_inter_vlc ? " AIV" : "",
  101. s->modified_quant ? " MQ" : "",
  102. s->loop_filter ? " LOOP" : "",
  103. s->h263_slice_structured ? " SS" : "",
  104. s->avctx->time_base.den, s->avctx->time_base.num
  105. );
  106. }
  107. }
  108. #if CONFIG_ENCODERS
  109. /**
  110. * Returns the 4 bit value that specifies the given aspect ratio.
  111. * This may be one of the standard aspect ratios or it specifies
  112. * that the aspect will be stored explicitly later.
  113. */
  114. av_const int ff_h263_aspect_to_info(AVRational aspect){
  115. int i;
  116. if(aspect.num==0) aspect= (AVRational){1,1};
  117. for(i=1; i<6; i++){
  118. if(av_cmp_q(ff_h263_pixel_aspect[i], aspect) == 0){
  119. return i;
  120. }
  121. }
  122. return FF_ASPECT_EXTENDED;
  123. }
  124. void h263_encode_picture_header(MpegEncContext * s, int picture_number)
  125. {
  126. int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
  127. int best_clock_code=1;
  128. int best_divisor=60;
  129. int best_error= INT_MAX;
  130. if(s->h263_plus){
  131. for(i=0; i<2; i++){
  132. int div, error;
  133. div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);
  134. div= av_clip(div, 1, 127);
  135. error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
  136. if(error < best_error){
  137. best_error= error;
  138. best_divisor= div;
  139. best_clock_code= i;
  140. }
  141. }
  142. }
  143. s->custom_pcf= best_clock_code!=1 || best_divisor!=60;
  144. coded_frame_rate= 1800000;
  145. coded_frame_rate_base= (1000+best_clock_code)*best_divisor;
  146. align_put_bits(&s->pb);
  147. /* Update the pointer to last GOB */
  148. s->ptr_lastgob = put_bits_ptr(&s->pb);
  149. put_bits(&s->pb, 22, 0x20); /* PSC */
  150. temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp
  151. (coded_frame_rate_base * (int64_t)s->avctx->time_base.den);
  152. put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */
  153. put_bits(&s->pb, 1, 1); /* marker */
  154. put_bits(&s->pb, 1, 0); /* h263 id */
  155. put_bits(&s->pb, 1, 0); /* split screen off */
  156. put_bits(&s->pb, 1, 0); /* camera off */
  157. put_bits(&s->pb, 1, 0); /* freeze picture release off */
  158. format = h263_get_picture_format(s->width, s->height);
  159. if (!s->h263_plus) {
  160. /* H.263v1 */
  161. put_bits(&s->pb, 3, format);
  162. put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE));
  163. /* By now UMV IS DISABLED ON H.263v1, since the restrictions
  164. of H.263v1 UMV implies to check the predicted MV after
  165. calculation of the current MB to see if we're on the limits */
  166. put_bits(&s->pb, 1, 0); /* Unrestricted Motion Vector: off */
  167. put_bits(&s->pb, 1, 0); /* SAC: off */
  168. put_bits(&s->pb, 1, s->obmc); /* Advanced Prediction */
  169. put_bits(&s->pb, 1, 0); /* only I/P frames, no PB frame */
  170. put_bits(&s->pb, 5, s->qscale);
  171. put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */
  172. } else {
  173. int ufep=1;
  174. /* H.263v2 */
  175. /* H.263 Plus PTYPE */
  176. put_bits(&s->pb, 3, 7);
  177. put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */
  178. if (format == 7)
  179. put_bits(&s->pb,3,6); /* Custom Source Format */
  180. else
  181. put_bits(&s->pb, 3, format);
  182. put_bits(&s->pb,1, s->custom_pcf);
  183. put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */
  184. put_bits(&s->pb,1,0); /* SAC: off */
  185. put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */
  186. put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */
  187. put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */
  188. put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */
  189. put_bits(&s->pb,1,0); /* Reference Picture Selection: off */
  190. put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */
  191. put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */
  192. put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */
  193. put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
  194. put_bits(&s->pb,3,0); /* Reserved */
  195. put_bits(&s->pb, 3, s->pict_type == FF_P_TYPE);
  196. put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
  197. put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
  198. put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */
  199. put_bits(&s->pb,2,0); /* Reserved */
  200. put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
  201. /* This should be here if PLUSPTYPE */
  202. put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */
  203. if (format == 7) {
  204. /* Custom Picture Format (CPFMT) */
  205. s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
  206. put_bits(&s->pb,4,s->aspect_ratio_info);
  207. put_bits(&s->pb,9,(s->width >> 2) - 1);
  208. put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
  209. put_bits(&s->pb,9,(s->height >> 2));
  210. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
  211. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
  212. put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den);
  213. }
  214. }
  215. if(s->custom_pcf){
  216. if(ufep){
  217. put_bits(&s->pb, 1, best_clock_code);
  218. put_bits(&s->pb, 7, best_divisor);
  219. }
  220. put_sbits(&s->pb, 2, temp_ref>>8);
  221. }
  222. /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */
  223. if (s->umvplus)
  224. // put_bits(&s->pb,1,1); /* Limited according tables of Annex D */
  225. //FIXME check actual requested range
  226. put_bits(&s->pb,2,1); /* unlimited */
  227. if(s->h263_slice_structured)
  228. put_bits(&s->pb,2,0); /* no weird submodes */
  229. put_bits(&s->pb, 5, s->qscale);
  230. }
  231. put_bits(&s->pb, 1, 0); /* no PEI */
  232. if(s->h263_slice_structured){
  233. put_bits(&s->pb, 1, 1);
  234. assert(s->mb_x == 0 && s->mb_y == 0);
  235. ff_h263_encode_mba(s);
  236. put_bits(&s->pb, 1, 1);
  237. }
  238. if(s->h263_aic){
  239. s->y_dc_scale_table=
  240. s->c_dc_scale_table= ff_aic_dc_scale_table;
  241. }else{
  242. s->y_dc_scale_table=
  243. s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
  244. }
  245. }
  246. /**
  247. * Encodes a group of blocks header.
  248. */
  249. void h263_encode_gob_header(MpegEncContext * s, int mb_line)
  250. {
  251. put_bits(&s->pb, 17, 1); /* GBSC */
  252. if(s->h263_slice_structured){
  253. put_bits(&s->pb, 1, 1);
  254. ff_h263_encode_mba(s);
  255. if(s->mb_num > 1583)
  256. put_bits(&s->pb, 1, 1);
  257. put_bits(&s->pb, 5, s->qscale); /* GQUANT */
  258. put_bits(&s->pb, 1, 1);
  259. put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
  260. }else{
  261. int gob_number= mb_line / s->gob_index;
  262. put_bits(&s->pb, 5, gob_number); /* GN */
  263. put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
  264. put_bits(&s->pb, 5, s->qscale); /* GQUANT */
  265. }
  266. }
  267. /**
  268. * modify qscale so that encoding is acually possible in h263 (limit difference to -2..2)
  269. */
  270. void ff_clean_h263_qscales(MpegEncContext *s){
  271. int i;
  272. int8_t * const qscale_table= s->current_picture.qscale_table;
  273. ff_init_qscale_tab(s);
  274. for(i=1; i<s->mb_num; i++){
  275. if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i-1] ] >2)
  276. qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i-1] ]+2;
  277. }
  278. for(i=s->mb_num-2; i>=0; i--){
  279. if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i+1] ] >2)
  280. qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i+1] ]+2;
  281. }
  282. if(s->codec_id != CODEC_ID_H263P){
  283. for(i=1; i<s->mb_num; i++){
  284. int mb_xy= s->mb_index2xy[i];
  285. if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTER4V)){
  286. s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_INTER;
  287. }
  288. }
  289. }
  290. }
  291. #endif //CONFIG_ENCODERS
  292. #define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
  293. #define tab_bias (tab_size/2)
  294. //used by mpeg4 and rv10 decoder
  295. void ff_mpeg4_init_direct_mv(MpegEncContext *s){
  296. int i;
  297. for(i=0; i<tab_size; i++){
  298. s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
  299. s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time;
  300. }
  301. }
  302. static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my, int i){
  303. int xy= s->block_index[i];
  304. uint16_t time_pp= s->pp_time;
  305. uint16_t time_pb= s->pb_time;
  306. int p_mx, p_my;
  307. p_mx= s->next_picture.motion_val[0][xy][0];
  308. if((unsigned)(p_mx + tab_bias) < tab_size){
  309. s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
  310. s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
  311. : s->direct_scale_mv[1][p_mx + tab_bias];
  312. }else{
  313. s->mv[0][i][0] = p_mx*time_pb/time_pp + mx;
  314. s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
  315. : p_mx*(time_pb - time_pp)/time_pp;
  316. }
  317. p_my= s->next_picture.motion_val[0][xy][1];
  318. if((unsigned)(p_my + tab_bias) < tab_size){
  319. s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
  320. s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
  321. : s->direct_scale_mv[1][p_my + tab_bias];
  322. }else{
  323. s->mv[0][i][1] = p_my*time_pb/time_pp + my;
  324. s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
  325. : p_my*(time_pb - time_pp)/time_pp;
  326. }
  327. }
  328. #undef tab_size
  329. #undef tab_bias
  330. /**
  331. *
  332. * @return the mb_type
  333. */
  334. int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
  335. const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
  336. const int colocated_mb_type= s->next_picture.mb_type[mb_index];
  337. uint16_t time_pp;
  338. uint16_t time_pb;
  339. int i;
  340. //FIXME avoid divides
  341. // try special case with shifts for 1 and 3 B-frames?
  342. if(IS_8X8(colocated_mb_type)){
  343. s->mv_type = MV_TYPE_8X8;
  344. for(i=0; i<4; i++){
  345. ff_mpeg4_set_one_direct_mv(s, mx, my, i);
  346. }
  347. return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
  348. } else if(IS_INTERLACED(colocated_mb_type)){
  349. s->mv_type = MV_TYPE_FIELD;
  350. for(i=0; i<2; i++){
  351. int field_select= s->next_picture.ref_index[0][s->block_index[2*i]];
  352. s->field_select[0][i]= field_select;
  353. s->field_select[1][i]= i;
  354. if(s->top_field_first){
  355. time_pp= s->pp_field_time - field_select + i;
  356. time_pb= s->pb_field_time - field_select + i;
  357. }else{
  358. time_pp= s->pp_field_time + field_select - i;
  359. time_pb= s->pb_field_time + field_select - i;
  360. }
  361. s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx;
  362. s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my;
  363. s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0]
  364. : s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp;
  365. s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1]
  366. : s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp;
  367. }
  368. return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
  369. }else{
  370. ff_mpeg4_set_one_direct_mv(s, mx, my, 0);
  371. s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->mv[0][0][0];
  372. s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->mv[0][0][1];
  373. s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = s->mv[1][0][0];
  374. s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = s->mv[1][0][1];
  375. if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample)
  376. s->mv_type= MV_TYPE_16X16;
  377. else
  378. s->mv_type= MV_TYPE_8X8;
  379. return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1; //Note see prev line
  380. }
  381. }
  382. void ff_h263_update_motion_val(MpegEncContext * s){
  383. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  384. //FIXME a lot of that is only needed for !low_delay
  385. const int wrap = s->b8_stride;
  386. const int xy = s->block_index[0];
  387. s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
  388. if(s->mv_type != MV_TYPE_8X8){
  389. int motion_x, motion_y;
  390. if (s->mb_intra) {
  391. motion_x = 0;
  392. motion_y = 0;
  393. } else if (s->mv_type == MV_TYPE_16X16) {
  394. motion_x = s->mv[0][0][0];
  395. motion_y = s->mv[0][0][1];
  396. } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
  397. int i;
  398. motion_x = s->mv[0][0][0] + s->mv[0][1][0];
  399. motion_y = s->mv[0][0][1] + s->mv[0][1][1];
  400. motion_x = (motion_x>>1) | (motion_x&1);
  401. for(i=0; i<2; i++){
  402. s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
  403. s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
  404. }
  405. s->current_picture.ref_index[0][xy ]=
  406. s->current_picture.ref_index[0][xy + 1]= s->field_select[0][0];
  407. s->current_picture.ref_index[0][xy + wrap ]=
  408. s->current_picture.ref_index[0][xy + wrap + 1]= s->field_select[0][1];
  409. }
  410. /* no update if 8X8 because it has been done during parsing */
  411. s->current_picture.motion_val[0][xy][0] = motion_x;
  412. s->current_picture.motion_val[0][xy][1] = motion_y;
  413. s->current_picture.motion_val[0][xy + 1][0] = motion_x;
  414. s->current_picture.motion_val[0][xy + 1][1] = motion_y;
  415. s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
  416. s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
  417. s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
  418. s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
  419. }
  420. if(s->encoding){ //FIXME encoding MUST be cleaned up
  421. if (s->mv_type == MV_TYPE_8X8)
  422. s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
  423. else if(s->mb_intra)
  424. s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
  425. else
  426. s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
  427. }
  428. }
  429. #if CONFIG_ENCODERS
  430. static const int dquant_code[5]= {1,0,9,2,3};
  431. /**
  432. * encodes a 8x8 block.
  433. * @param block the 8x8 block
  434. * @param n block index (0-3 are luma, 4-5 are chroma)
  435. */
  436. static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
  437. {
  438. int level, run, last, i, j, last_index, last_non_zero, sign, slevel, code;
  439. RLTable *rl;
  440. rl = &rl_inter;
  441. if (s->mb_intra && !s->h263_aic) {
  442. /* DC coef */
  443. level = block[0];
  444. /* 255 cannot be represented, so we clamp */
  445. if (level > 254) {
  446. level = 254;
  447. block[0] = 254;
  448. }
  449. /* 0 cannot be represented also */
  450. else if (level < 1) {
  451. level = 1;
  452. block[0] = 1;
  453. }
  454. if (level == 128) //FIXME check rv10
  455. put_bits(&s->pb, 8, 0xff);
  456. else
  457. put_bits(&s->pb, 8, level);
  458. i = 1;
  459. } else {
  460. i = 0;
  461. if (s->h263_aic && s->mb_intra)
  462. rl = &rl_intra_aic;
  463. if(s->alt_inter_vlc && !s->mb_intra){
  464. int aic_vlc_bits=0;
  465. int inter_vlc_bits=0;
  466. int wrong_pos=-1;
  467. int aic_code;
  468. last_index = s->block_last_index[n];
  469. last_non_zero = i - 1;
  470. for (; i <= last_index; i++) {
  471. j = s->intra_scantable.permutated[i];
  472. level = block[j];
  473. if (level) {
  474. run = i - last_non_zero - 1;
  475. last = (i == last_index);
  476. if(level<0) level= -level;
  477. code = get_rl_index(rl, last, run, level);
  478. aic_code = get_rl_index(&rl_intra_aic, last, run, level);
  479. inter_vlc_bits += rl->table_vlc[code][1]+1;
  480. aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
  481. if (code == rl->n) {
  482. inter_vlc_bits += 1+6+8-1;
  483. }
  484. if (aic_code == rl_intra_aic.n) {
  485. aic_vlc_bits += 1+6+8-1;
  486. wrong_pos += run + 1;
  487. }else
  488. wrong_pos += wrong_run[aic_code];
  489. last_non_zero = i;
  490. }
  491. }
  492. i = 0;
  493. if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
  494. rl = &rl_intra_aic;
  495. }
  496. }
  497. /* AC coefs */
  498. last_index = s->block_last_index[n];
  499. last_non_zero = i - 1;
  500. for (; i <= last_index; i++) {
  501. j = s->intra_scantable.permutated[i];
  502. level = block[j];
  503. if (level) {
  504. run = i - last_non_zero - 1;
  505. last = (i == last_index);
  506. sign = 0;
  507. slevel = level;
  508. if (level < 0) {
  509. sign = 1;
  510. level = -level;
  511. }
  512. code = get_rl_index(rl, last, run, level);
  513. put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
  514. if (code == rl->n) {
  515. if(!CONFIG_FLV_ENCODER || s->h263_flv <= 1){
  516. put_bits(&s->pb, 1, last);
  517. put_bits(&s->pb, 6, run);
  518. assert(slevel != 0);
  519. if(level < 128)
  520. put_sbits(&s->pb, 8, slevel);
  521. else{
  522. put_bits(&s->pb, 8, 128);
  523. put_sbits(&s->pb, 5, slevel);
  524. put_sbits(&s->pb, 6, slevel>>5);
  525. }
  526. }else{
  527. ff_flv2_encode_ac_esc(&s->pb, slevel, level, run, last);
  528. }
  529. } else {
  530. put_bits(&s->pb, 1, sign);
  531. }
  532. last_non_zero = i;
  533. }
  534. }
  535. }
  536. /* Encode MV differences on H.263+ with Unrestricted MV mode */
  537. static void h263p_encode_umotion(MpegEncContext * s, int val)
  538. {
  539. short sval = 0;
  540. short i = 0;
  541. short n_bits = 0;
  542. short temp_val;
  543. int code = 0;
  544. int tcode;
  545. if ( val == 0)
  546. put_bits(&s->pb, 1, 1);
  547. else if (val == 1)
  548. put_bits(&s->pb, 3, 0);
  549. else if (val == -1)
  550. put_bits(&s->pb, 3, 2);
  551. else {
  552. sval = ((val < 0) ? (short)(-val):(short)val);
  553. temp_val = sval;
  554. while (temp_val != 0) {
  555. temp_val = temp_val >> 1;
  556. n_bits++;
  557. }
  558. i = n_bits - 1;
  559. while (i > 0) {
  560. tcode = (sval & (1 << (i-1))) >> (i-1);
  561. tcode = (tcode << 1) | 1;
  562. code = (code << 2) | tcode;
  563. i--;
  564. }
  565. code = ((code << 1) | (val < 0)) << 1;
  566. put_bits(&s->pb, (2*n_bits)+1, code);
  567. }
  568. }
  569. static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
  570. {
  571. int x, y, wrap, a, c, pred_dc;
  572. int16_t *dc_val;
  573. /* find prediction */
  574. if (n < 4) {
  575. x = 2 * s->mb_x + (n & 1);
  576. y = 2 * s->mb_y + ((n & 2) >> 1);
  577. wrap = s->b8_stride;
  578. dc_val = s->dc_val[0];
  579. } else {
  580. x = s->mb_x;
  581. y = s->mb_y;
  582. wrap = s->mb_stride;
  583. dc_val = s->dc_val[n - 4 + 1];
  584. }
  585. /* B C
  586. * A X
  587. */
  588. a = dc_val[(x - 1) + (y) * wrap];
  589. c = dc_val[(x) + (y - 1) * wrap];
  590. /* No prediction outside GOB boundary */
  591. if(s->first_slice_line && n!=3){
  592. if(n!=2) c= 1024;
  593. if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
  594. }
  595. /* just DC prediction */
  596. if (a != 1024 && c != 1024)
  597. pred_dc = (a + c) >> 1;
  598. else if (a != 1024)
  599. pred_dc = a;
  600. else
  601. pred_dc = c;
  602. /* we assume pred is positive */
  603. *dc_val_ptr = &dc_val[x + y * wrap];
  604. return pred_dc;
  605. }
  606. void h263_encode_mb(MpegEncContext * s,
  607. DCTELEM block[6][64],
  608. int motion_x, int motion_y)
  609. {
  610. int cbpc, cbpy, i, cbp, pred_x, pred_y;
  611. int16_t pred_dc;
  612. int16_t rec_intradc[6];
  613. int16_t *dc_ptr[6];
  614. const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1);
  615. if (!s->mb_intra) {
  616. /* compute cbp */
  617. cbp= get_p_cbp(s, block, motion_x, motion_y);
  618. if ((cbp | motion_x | motion_y | s->dquant | (s->mv_type - MV_TYPE_16X16)) == 0) {
  619. /* skip macroblock */
  620. put_bits(&s->pb, 1, 1);
  621. if(interleaved_stats){
  622. s->misc_bits++;
  623. s->last_bits++;
  624. }
  625. s->skip_count++;
  626. return;
  627. }
  628. put_bits(&s->pb, 1, 0); /* mb coded */
  629. cbpc = cbp & 3;
  630. cbpy = cbp >> 2;
  631. if(s->alt_inter_vlc==0 || cbpc!=3)
  632. cbpy ^= 0xF;
  633. if(s->dquant) cbpc+= 8;
  634. if(s->mv_type==MV_TYPE_16X16){
  635. put_bits(&s->pb,
  636. inter_MCBPC_bits[cbpc],
  637. inter_MCBPC_code[cbpc]);
  638. put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]);
  639. if(s->dquant)
  640. put_bits(&s->pb, 2, dquant_code[s->dquant+2]);
  641. if(interleaved_stats){
  642. s->misc_bits+= get_bits_diff(s);
  643. }
  644. /* motion vectors: 16x16 mode */
  645. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  646. if (!s->umvplus) {
  647. ff_h263_encode_motion_vector(s, motion_x - pred_x,
  648. motion_y - pred_y, 1);
  649. }
  650. else {
  651. h263p_encode_umotion(s, motion_x - pred_x);
  652. h263p_encode_umotion(s, motion_y - pred_y);
  653. if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1))
  654. /* To prevent Start Code emulation */
  655. put_bits(&s->pb,1,1);
  656. }
  657. }else{
  658. put_bits(&s->pb,
  659. inter_MCBPC_bits[cbpc+16],
  660. inter_MCBPC_code[cbpc+16]);
  661. put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]);
  662. if(s->dquant)
  663. put_bits(&s->pb, 2, dquant_code[s->dquant+2]);
  664. if(interleaved_stats){
  665. s->misc_bits+= get_bits_diff(s);
  666. }
  667. for(i=0; i<4; i++){
  668. /* motion vectors: 8x8 mode*/
  669. h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  670. motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
  671. motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
  672. if (!s->umvplus) {
  673. ff_h263_encode_motion_vector(s, motion_x - pred_x,
  674. motion_y - pred_y, 1);
  675. }
  676. else {
  677. h263p_encode_umotion(s, motion_x - pred_x);
  678. h263p_encode_umotion(s, motion_y - pred_y);
  679. if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1))
  680. /* To prevent Start Code emulation */
  681. put_bits(&s->pb,1,1);
  682. }
  683. }
  684. }
  685. if(interleaved_stats){
  686. s->mv_bits+= get_bits_diff(s);
  687. }
  688. } else {
  689. assert(s->mb_intra);
  690. cbp = 0;
  691. if (s->h263_aic) {
  692. /* Predict DC */
  693. for(i=0; i<6; i++) {
  694. int16_t level = block[i][0];
  695. int scale;
  696. if(i<4) scale= s->y_dc_scale;
  697. else scale= s->c_dc_scale;
  698. pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
  699. level -= pred_dc;
  700. /* Quant */
  701. if (level >= 0)
  702. level = (level + (scale>>1))/scale;
  703. else
  704. level = (level - (scale>>1))/scale;
  705. /* AIC can change CBP */
  706. if (level == 0 && s->block_last_index[i] == 0)
  707. s->block_last_index[i] = -1;
  708. if(!s->modified_quant){
  709. if (level < -127)
  710. level = -127;
  711. else if (level > 127)
  712. level = 127;
  713. }
  714. block[i][0] = level;
  715. /* Reconstruction */
  716. rec_intradc[i] = scale*level + pred_dc;
  717. /* Oddify */
  718. rec_intradc[i] |= 1;
  719. //if ((rec_intradc[i] % 2) == 0)
  720. // rec_intradc[i]++;
  721. /* Clipping */
  722. if (rec_intradc[i] < 0)
  723. rec_intradc[i] = 0;
  724. else if (rec_intradc[i] > 2047)
  725. rec_intradc[i] = 2047;
  726. /* Update AC/DC tables */
  727. *dc_ptr[i] = rec_intradc[i];
  728. if (s->block_last_index[i] >= 0)
  729. cbp |= 1 << (5 - i);
  730. }
  731. }else{
  732. for(i=0; i<6; i++) {
  733. /* compute cbp */
  734. if (s->block_last_index[i] >= 1)
  735. cbp |= 1 << (5 - i);
  736. }
  737. }
  738. cbpc = cbp & 3;
  739. if (s->pict_type == FF_I_TYPE) {
  740. if(s->dquant) cbpc+=4;
  741. put_bits(&s->pb,
  742. intra_MCBPC_bits[cbpc],
  743. intra_MCBPC_code[cbpc]);
  744. } else {
  745. if(s->dquant) cbpc+=8;
  746. put_bits(&s->pb, 1, 0); /* mb coded */
  747. put_bits(&s->pb,
  748. inter_MCBPC_bits[cbpc + 4],
  749. inter_MCBPC_code[cbpc + 4]);
  750. }
  751. if (s->h263_aic) {
  752. /* XXX: currently, we do not try to use ac prediction */
  753. put_bits(&s->pb, 1, 0); /* no AC prediction */
  754. }
  755. cbpy = cbp >> 2;
  756. put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]);
  757. if(s->dquant)
  758. put_bits(&s->pb, 2, dquant_code[s->dquant+2]);
  759. if(interleaved_stats){
  760. s->misc_bits+= get_bits_diff(s);
  761. }
  762. }
  763. for(i=0; i<6; i++) {
  764. /* encode each block */
  765. h263_encode_block(s, block[i], i);
  766. /* Update INTRADC for decoding */
  767. if (s->h263_aic && s->mb_intra) {
  768. block[i][0] = rec_intradc[i];
  769. }
  770. }
  771. if(interleaved_stats){
  772. if (!s->mb_intra) {
  773. s->p_tex_bits+= get_bits_diff(s);
  774. s->f_count++;
  775. }else{
  776. s->i_tex_bits+= get_bits_diff(s);
  777. s->i_count++;
  778. }
  779. }
  780. }
  781. #endif
  782. void ff_h263_loop_filter(MpegEncContext * s){
  783. int qp_c;
  784. const int linesize = s->linesize;
  785. const int uvlinesize= s->uvlinesize;
  786. const int xy = s->mb_y * s->mb_stride + s->mb_x;
  787. uint8_t *dest_y = s->dest[0];
  788. uint8_t *dest_cb= s->dest[1];
  789. uint8_t *dest_cr= s->dest[2];
  790. // if(s->pict_type==FF_B_TYPE && !s->readable) return;
  791. /*
  792. Diag Top
  793. Left Center
  794. */
  795. if(!IS_SKIP(s->current_picture.mb_type[xy])){
  796. qp_c= s->qscale;
  797. s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
  798. s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
  799. }else
  800. qp_c= 0;
  801. if(s->mb_y){
  802. int qp_dt, qp_tt, qp_tc;
  803. if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
  804. qp_tt=0;
  805. else
  806. qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
  807. if(qp_c)
  808. qp_tc= qp_c;
  809. else
  810. qp_tc= qp_tt;
  811. if(qp_tc){
  812. const int chroma_qp= s->chroma_qscale_table[qp_tc];
  813. s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc);
  814. s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc);
  815. s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp);
  816. s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp);
  817. }
  818. if(qp_tt)
  819. s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
  820. if(s->mb_x){
  821. if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
  822. qp_dt= qp_tt;
  823. else
  824. qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
  825. if(qp_dt){
  826. const int chroma_qp= s->chroma_qscale_table[qp_dt];
  827. s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt);
  828. s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp);
  829. s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp);
  830. }
  831. }
  832. }
  833. if(qp_c){
  834. s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c);
  835. if(s->mb_y + 1 == s->mb_height)
  836. s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
  837. }
  838. if(s->mb_x){
  839. int qp_lc;
  840. if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
  841. qp_lc= qp_c;
  842. else
  843. qp_lc= s->current_picture.qscale_table[xy-1];
  844. if(qp_lc){
  845. s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
  846. if(s->mb_y + 1 == s->mb_height){
  847. const int chroma_qp= s->chroma_qscale_table[qp_lc];
  848. s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc);
  849. s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp);
  850. s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp);
  851. }
  852. }
  853. }
  854. }
  855. static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
  856. {
  857. int x, y, wrap, a, c, pred_dc, scale, i;
  858. int16_t *dc_val, *ac_val, *ac_val1;
  859. /* find prediction */
  860. if (n < 4) {
  861. x = 2 * s->mb_x + (n & 1);
  862. y = 2 * s->mb_y + (n>> 1);
  863. wrap = s->b8_stride;
  864. dc_val = s->dc_val[0];
  865. ac_val = s->ac_val[0][0];
  866. scale = s->y_dc_scale;
  867. } else {
  868. x = s->mb_x;
  869. y = s->mb_y;
  870. wrap = s->mb_stride;
  871. dc_val = s->dc_val[n - 4 + 1];
  872. ac_val = s->ac_val[n - 4 + 1][0];
  873. scale = s->c_dc_scale;
  874. }
  875. ac_val += ((y) * wrap + (x)) * 16;
  876. ac_val1 = ac_val;
  877. /* B C
  878. * A X
  879. */
  880. a = dc_val[(x - 1) + (y) * wrap];
  881. c = dc_val[(x) + (y - 1) * wrap];
  882. /* No prediction outside GOB boundary */
  883. if(s->first_slice_line && n!=3){
  884. if(n!=2) c= 1024;
  885. if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
  886. }
  887. if (s->ac_pred) {
  888. pred_dc = 1024;
  889. if (s->h263_aic_dir) {
  890. /* left prediction */
  891. if (a != 1024) {
  892. ac_val -= 16;
  893. for(i=1;i<8;i++) {
  894. block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
  895. }
  896. pred_dc = a;
  897. }
  898. } else {
  899. /* top prediction */
  900. if (c != 1024) {
  901. ac_val -= 16 * wrap;
  902. for(i=1;i<8;i++) {
  903. block[s->dsp.idct_permutation[i ]] += ac_val[i + 8];
  904. }
  905. pred_dc = c;
  906. }
  907. }
  908. } else {
  909. /* just DC prediction */
  910. if (a != 1024 && c != 1024)
  911. pred_dc = (a + c) >> 1;
  912. else if (a != 1024)
  913. pred_dc = a;
  914. else
  915. pred_dc = c;
  916. }
  917. /* we assume pred is positive */
  918. block[0]=block[0]*scale + pred_dc;
  919. if (block[0] < 0)
  920. block[0] = 0;
  921. else
  922. block[0] |= 1;
  923. /* Update AC/DC tables */
  924. dc_val[(x) + (y) * wrap] = block[0];
  925. /* left copy */
  926. for(i=1;i<8;i++)
  927. ac_val1[i ] = block[s->dsp.idct_permutation[i<<3]];
  928. /* top copy */
  929. for(i=1;i<8;i++)
  930. ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
  931. }
  932. int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
  933. int *px, int *py)
  934. {
  935. int wrap;
  936. int16_t *A, *B, *C, (*mot_val)[2];
  937. static const int off[4]= {2, 1, 1, -1};
  938. wrap = s->b8_stride;
  939. mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
  940. A = mot_val[ - 1];
  941. /* special case for first (slice) line */
  942. if (s->first_slice_line && block<3) {
  943. // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
  944. // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
  945. if(block==0){ //most common case
  946. if(s->mb_x == s->resync_mb_x){ //rare
  947. *px= *py = 0;
  948. }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
  949. C = mot_val[off[block] - wrap];
  950. if(s->mb_x==0){
  951. *px = C[0];
  952. *py = C[1];
  953. }else{
  954. *px = mid_pred(A[0], 0, C[0]);
  955. *py = mid_pred(A[1], 0, C[1]);
  956. }
  957. }else{
  958. *px = A[0];
  959. *py = A[1];
  960. }
  961. }else if(block==1){
  962. if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
  963. C = mot_val[off[block] - wrap];
  964. *px = mid_pred(A[0], 0, C[0]);
  965. *py = mid_pred(A[1], 0, C[1]);
  966. }else{
  967. *px = A[0];
  968. *py = A[1];
  969. }
  970. }else{ /* block==2*/
  971. B = mot_val[ - wrap];
  972. C = mot_val[off[block] - wrap];
  973. if(s->mb_x == s->resync_mb_x) //rare
  974. A[0]=A[1]=0;
  975. *px = mid_pred(A[0], B[0], C[0]);
  976. *py = mid_pred(A[1], B[1], C[1]);
  977. }
  978. } else {
  979. B = mot_val[ - wrap];
  980. C = mot_val[off[block] - wrap];
  981. *px = mid_pred(A[0], B[0], C[0]);
  982. *py = mid_pred(A[1], B[1], C[1]);
  983. }
  984. return *mot_val;
  985. }
  986. #if CONFIG_ENCODERS
  987. /***************************************************/
  988. void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
  989. {
  990. int range, l, bit_size, sign, code, bits;
  991. if (val == 0) {
  992. /* zero vector */
  993. code = 0;
  994. put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
  995. } else {
  996. bit_size = f_code - 1;
  997. range = 1 << bit_size;
  998. /* modulo encoding */
  999. l= INT_BIT - 6 - bit_size;
  1000. val = (val<<l)>>l;
  1001. sign = val>>31;
  1002. val= (val^sign)-sign;
  1003. sign&=1;
  1004. val--;
  1005. code = (val >> bit_size) + 1;
  1006. bits = val & (range - 1);
  1007. put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
  1008. if (bit_size > 0) {
  1009. put_bits(&s->pb, bit_size, bits);
  1010. }
  1011. }
  1012. }
  1013. static void init_mv_penalty_and_fcode(MpegEncContext *s)
  1014. {
  1015. int f_code;
  1016. int mv;
  1017. for(f_code=1; f_code<=MAX_FCODE; f_code++){
  1018. for(mv=-MAX_MV; mv<=MAX_MV; mv++){
  1019. int len;
  1020. if(mv==0) len= mvtab[0][1];
  1021. else{
  1022. int val, bit_size, code;
  1023. bit_size = f_code - 1;
  1024. val=mv;
  1025. if (val < 0)
  1026. val = -val;
  1027. val--;
  1028. code = (val >> bit_size) + 1;
  1029. if(code<33){
  1030. len= mvtab[code][1] + 1 + bit_size;
  1031. }else{
  1032. len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
  1033. }
  1034. }
  1035. mv_penalty[f_code][mv+MAX_MV]= len;
  1036. }
  1037. }
  1038. for(f_code=MAX_FCODE; f_code>0; f_code--){
  1039. for(mv=-(16<<f_code); mv<(16<<f_code); mv++){
  1040. fcode_tab[mv+MAX_MV]= f_code;
  1041. }
  1042. }
  1043. for(mv=0; mv<MAX_MV*2+1; mv++){
  1044. umv_fcode_tab[mv]= 1;
  1045. }
  1046. }
  1047. static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab){
  1048. int slevel, run, last;
  1049. assert(MAX_LEVEL >= 64);
  1050. assert(MAX_RUN >= 63);
  1051. for(slevel=-64; slevel<64; slevel++){
  1052. if(slevel==0) continue;
  1053. for(run=0; run<64; run++){
  1054. for(last=0; last<=1; last++){
  1055. const int index= UNI_MPEG4_ENC_INDEX(last, run, slevel+64);
  1056. int level= slevel < 0 ? -slevel : slevel;
  1057. int sign= slevel < 0 ? 1 : 0;
  1058. int bits, len, code;
  1059. len_tab[index]= 100;
  1060. /* ESC0 */
  1061. code= get_rl_index(rl, last, run, level);
  1062. bits= rl->table_vlc[code][0];
  1063. len= rl->table_vlc[code][1];
  1064. bits=bits*2+sign; len++;
  1065. if(code!=rl->n && len < len_tab[index]){
  1066. if(bits_tab) bits_tab[index]= bits;
  1067. len_tab [index]= len;
  1068. }
  1069. /* ESC */
  1070. bits= rl->table_vlc[rl->n][0];
  1071. len = rl->table_vlc[rl->n][1];
  1072. bits=bits*2+last; len++;
  1073. bits=bits*64+run; len+=6;
  1074. bits=bits*256+(level&0xff); len+=8;
  1075. if(len < len_tab[index]){
  1076. if(bits_tab) bits_tab[index]= bits;
  1077. len_tab [index]= len;
  1078. }
  1079. }
  1080. }
  1081. }
  1082. }
  1083. void h263_encode_init(MpegEncContext *s)
  1084. {
  1085. static int done = 0;
  1086. if (!done) {
  1087. done = 1;
  1088. init_rl(&rl_inter, static_rl_table_store[0]);
  1089. init_rl(&rl_intra_aic, static_rl_table_store[1]);
  1090. init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
  1091. init_uni_h263_rl_tab(&rl_inter , NULL, uni_h263_inter_rl_len);
  1092. init_mv_penalty_and_fcode(s);
  1093. }
  1094. s->me.mv_penalty= mv_penalty; //FIXME exact table for msmpeg4 & h263p
  1095. s->intra_ac_vlc_length =s->inter_ac_vlc_length = uni_h263_inter_rl_len;
  1096. s->intra_ac_vlc_last_length=s->inter_ac_vlc_last_length= uni_h263_inter_rl_len + 128*64;
  1097. if(s->h263_aic){
  1098. s->intra_ac_vlc_length = uni_h263_intra_aic_rl_len;
  1099. s->intra_ac_vlc_last_length= uni_h263_intra_aic_rl_len + 128*64;
  1100. }
  1101. s->ac_esc_length= 7+1+6+8;
  1102. // use fcodes >1 only for mpeg4 & h263 & h263p FIXME
  1103. switch(s->codec_id){
  1104. case CODEC_ID_MPEG4:
  1105. s->fcode_tab= fcode_tab;
  1106. break;
  1107. case CODEC_ID_H263P:
  1108. if(s->umvplus)
  1109. s->fcode_tab= umv_fcode_tab;
  1110. if(s->modified_quant){
  1111. s->min_qcoeff= -2047;
  1112. s->max_qcoeff= 2047;
  1113. }else{
  1114. s->min_qcoeff= -127;
  1115. s->max_qcoeff= 127;
  1116. }
  1117. break;
  1118. //Note for mpeg4 & h263 the dc-scale table will be set per frame as needed later
  1119. case CODEC_ID_FLV1:
  1120. if (s->h263_flv > 1) {
  1121. s->min_qcoeff= -1023;
  1122. s->max_qcoeff= 1023;
  1123. } else {
  1124. s->min_qcoeff= -127;
  1125. s->max_qcoeff= 127;
  1126. }
  1127. s->y_dc_scale_table=
  1128. s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
  1129. break;
  1130. default: //nothing needed - default table already set in mpegvideo.c
  1131. s->min_qcoeff= -127;
  1132. s->max_qcoeff= 127;
  1133. s->y_dc_scale_table=
  1134. s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
  1135. }
  1136. }
  1137. #endif //CONFIG_ENCODERS
  1138. /***********************************************/
  1139. /* decoding */
  1140. VLC intra_MCBPC_vlc;
  1141. VLC inter_MCBPC_vlc;
  1142. VLC cbpy_vlc;
  1143. static VLC mv_vlc;
  1144. static VLC h263_mbtype_b_vlc;
  1145. static VLC cbpc_b_vlc;
  1146. /* init vlcs */
  1147. /* XXX: find a better solution to handle static init */
  1148. void h263_decode_init_vlc(MpegEncContext *s)
  1149. {
  1150. static int done = 0;
  1151. if (!done) {
  1152. done = 1;
  1153. INIT_VLC_STATIC(&intra_MCBPC_vlc, INTRA_MCBPC_VLC_BITS, 9,
  1154. intra_MCBPC_bits, 1, 1,
  1155. intra_MCBPC_code, 1, 1, 72);
  1156. INIT_VLC_STATIC(&inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 28,
  1157. inter_MCBPC_bits, 1, 1,
  1158. inter_MCBPC_code, 1, 1, 198);
  1159. INIT_VLC_STATIC(&cbpy_vlc, CBPY_VLC_BITS, 16,
  1160. &cbpy_tab[0][1], 2, 1,
  1161. &cbpy_tab[0][0], 2, 1, 64);
  1162. INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
  1163. &mvtab[0][1], 2, 1,
  1164. &mvtab[0][0], 2, 1, 538);
  1165. init_rl(&rl_inter, static_rl_table_store[0]);
  1166. init_rl(&rl_intra_aic, static_rl_table_store[1]);
  1167. INIT_VLC_RL(rl_inter, 554);
  1168. INIT_VLC_RL(rl_intra_aic, 554);
  1169. INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
  1170. &h263_mbtype_b_tab[0][1], 2, 1,
  1171. &h263_mbtype_b_tab[0][0], 2, 1, 80);
  1172. INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
  1173. &cbpc_b_tab[0][1], 2, 1,
  1174. &cbpc_b_tab[0][0], 2, 1, 8);
  1175. }
  1176. }
  1177. /**
  1178. * Get the GOB height based on picture height.
  1179. */
  1180. int ff_h263_get_gob_height(MpegEncContext *s){
  1181. if (s->height <= 400)
  1182. return 1;
  1183. else if (s->height <= 800)
  1184. return 2;
  1185. else
  1186. return 4;
  1187. }
  1188. int ff_h263_decode_mba(MpegEncContext *s)
  1189. {
  1190. int i, mb_pos;
  1191. for(i=0; i<6; i++){
  1192. if(s->mb_num-1 <= ff_mba_max[i]) break;
  1193. }
  1194. mb_pos= get_bits(&s->gb, ff_mba_length[i]);
  1195. s->mb_x= mb_pos % s->mb_width;
  1196. s->mb_y= mb_pos / s->mb_width;
  1197. return mb_pos;
  1198. }
  1199. void ff_h263_encode_mba(MpegEncContext *s)
  1200. {
  1201. int i, mb_pos;
  1202. for(i=0; i<6; i++){
  1203. if(s->mb_num-1 <= ff_mba_max[i]) break;
  1204. }
  1205. mb_pos= s->mb_x + s->mb_width*s->mb_y;
  1206. put_bits(&s->pb, ff_mba_length[i], mb_pos);
  1207. }
  1208. /**
  1209. * decodes the group of blocks header or slice header.
  1210. * @return <0 if an error occurred
  1211. */
  1212. static int h263_decode_gob_header(MpegEncContext *s)
  1213. {
  1214. unsigned int val, gfid, gob_number;
  1215. int left;
  1216. /* Check for GOB Start Code */
  1217. val = show_bits(&s->gb, 16);
  1218. if(val)
  1219. return -1;
  1220. /* We have a GBSC probably with GSTUFF */
  1221. skip_bits(&s->gb, 16); /* Drop the zeros */
  1222. left= get_bits_left(&s->gb);
  1223. //MN: we must check the bits left or we might end in a infinite loop (or segfault)
  1224. for(;left>13; left--){
  1225. if(get_bits1(&s->gb)) break; /* Seek the '1' bit */
  1226. }
  1227. if(left<=13)
  1228. return -1;
  1229. if(s->h263_slice_structured){
  1230. if(get_bits1(&s->gb)==0)
  1231. return -1;
  1232. ff_h263_decode_mba(s);
  1233. if(s->mb_num > 1583)
  1234. if(get_bits1(&s->gb)==0)
  1235. return -1;
  1236. s->qscale = get_bits(&s->gb, 5); /* SQUANT */
  1237. if(get_bits1(&s->gb)==0)
  1238. return -1;
  1239. gfid = get_bits(&s->gb, 2); /* GFID */
  1240. }else{
  1241. gob_number = get_bits(&s->gb, 5); /* GN */
  1242. s->mb_x= 0;
  1243. s->mb_y= s->gob_index* gob_number;
  1244. gfid = get_bits(&s->gb, 2); /* GFID */
  1245. s->qscale = get_bits(&s->gb, 5); /* GQUANT */
  1246. }
  1247. if(s->mb_y >= s->mb_height)
  1248. return -1;
  1249. if(s->qscale==0)
  1250. return -1;
  1251. return 0;
  1252. }
  1253. static inline void memsetw(short *tab, int val, int n)
  1254. {
  1255. int i;
  1256. for(i=0;i<n;i++)
  1257. tab[i] = val;
  1258. }
  1259. /**
  1260. * finds the next resync_marker
  1261. * @param p pointer to buffer to scan
  1262. * @param end pointer to the end of the buffer
  1263. * @return pointer to the next resync_marker, or end if none was found
  1264. */
  1265. const uint8_t *ff_h263_find_resync_marker(const uint8_t *restrict p, const uint8_t * restrict end)
  1266. {
  1267. assert(p < end);
  1268. end-=2;
  1269. p++;
  1270. for(;p<end; p+=2){
  1271. if(!*p){
  1272. if (!p[-1] && p[1]) return p - 1;
  1273. else if(!p[ 1] && p[2]) return p;
  1274. }
  1275. }
  1276. return end+2;
  1277. }
  1278. /**
  1279. * decodes the group of blocks / video packet header.
  1280. * @return bit position of the resync_marker, or <0 if none was found
  1281. */
  1282. int ff_h263_resync(MpegEncContext *s){
  1283. int left, pos, ret;
  1284. if(s->codec_id==CODEC_ID_MPEG4){
  1285. skip_bits1(&s->gb);
  1286. align_get_bits(&s->gb);
  1287. }
  1288. if(show_bits(&s->gb, 16)==0){
  1289. pos= get_bits_count(&s->gb);
  1290. if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
  1291. ret= mpeg4_decode_video_packet_header(s);
  1292. else
  1293. ret= h263_decode_gob_header(s);
  1294. if(ret>=0)
  1295. return pos;
  1296. }
  1297. //OK, it's not where it is supposed to be ...
  1298. s->gb= s->last_resync_gb;
  1299. align_get_bits(&s->gb);
  1300. left= get_bits_left(&s->gb);
  1301. for(;left>16+1+5+5; left-=8){
  1302. if(show_bits(&s->gb, 16)==0){
  1303. GetBitContext bak= s->gb;
  1304. pos= get_bits_count(&s->gb);
  1305. if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4)
  1306. ret= mpeg4_decode_video_packet_header(s);
  1307. else
  1308. ret= h263_decode_gob_header(s);
  1309. if(ret>=0)
  1310. return pos;
  1311. s->gb= bak;
  1312. }
  1313. skip_bits(&s->gb, 8);
  1314. }
  1315. return -1;
  1316. }
  1317. int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
  1318. {
  1319. int code, val, sign, shift, l;
  1320. code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
  1321. if (code == 0)
  1322. return pred;
  1323. if (code < 0)
  1324. return 0xffff;
  1325. sign = get_bits1(&s->gb);
  1326. shift = f_code - 1;
  1327. val = code;
  1328. if (shift) {
  1329. val = (val - 1) << shift;
  1330. val |= get_bits(&s->gb, shift);
  1331. val++;
  1332. }
  1333. if (sign)
  1334. val = -val;
  1335. val += pred;
  1336. /* modulo decoding */
  1337. if (!s->h263_long_vectors) {
  1338. l = INT_BIT - 5 - f_code;
  1339. val = (val<<l)>>l;
  1340. } else {
  1341. /* horrible h263 long vector mode */
  1342. if (pred < -31 && val < -63)
  1343. val += 64;
  1344. if (pred > 32 && val > 63)
  1345. val -= 64;
  1346. }
  1347. return val;
  1348. }
  1349. /* Decodes RVLC of H.263+ UMV */
  1350. static int h263p_decode_umotion(MpegEncContext * s, int pred)
  1351. {
  1352. int code = 0, sign;
  1353. if (get_bits1(&s->gb)) /* Motion difference = 0 */
  1354. return pred;
  1355. code = 2 + get_bits1(&s->gb);
  1356. while (get_bits1(&s->gb))
  1357. {
  1358. code <<= 1;
  1359. code += get_bits1(&s->gb);
  1360. }
  1361. sign = code & 1;
  1362. code >>= 1;
  1363. code = (sign) ? (pred - code) : (pred + code);
  1364. dprintf(s->avctx,"H.263+ UMV Motion = %d\n", code);
  1365. return code;
  1366. }
  1367. /**
  1368. * read the next MVs for OBMC. yes this is a ugly hack, feel free to send a patch :)
  1369. */
  1370. static void preview_obmc(MpegEncContext *s){
  1371. GetBitContext gb= s->gb;
  1372. int cbpc, i, pred_x, pred_y, mx, my;
  1373. int16_t *mot_val;
  1374. const int xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
  1375. const int stride= s->b8_stride*2;
  1376. for(i=0; i<4; i++)
  1377. s->block_index[i]+= 2;
  1378. for(i=4; i<6; i++)
  1379. s->block_index[i]+= 1;
  1380. s->mb_x++;
  1381. assert(s->pict_type == FF_P_TYPE);
  1382. do{
  1383. if (get_bits1(&s->gb)) {
  1384. /* skip mb */
  1385. mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
  1386. mot_val[0 ]= mot_val[2 ]=
  1387. mot_val[0+stride]= mot_val[2+stride]= 0;
  1388. mot_val[1 ]= mot_val[3 ]=
  1389. mot_val[1+stride]= mot_val[3+stride]= 0;
  1390. s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  1391. goto end;
  1392. }
  1393. cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  1394. }while(cbpc == 20);
  1395. if(cbpc & 4){
  1396. s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
  1397. }else{
  1398. get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1399. if (cbpc & 8) {
  1400. if(s->modified_quant){
  1401. if(get_bits1(&s->gb)) skip_bits(&s->gb, 1);
  1402. else skip_bits(&s->gb, 5);
  1403. }else
  1404. skip_bits(&s->gb, 2);
  1405. }
  1406. if ((cbpc & 16) == 0) {
  1407. s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
  1408. /* 16x16 motion prediction */
  1409. mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1410. if (s->umvplus)
  1411. mx = h263p_decode_umotion(s, pred_x);
  1412. else
  1413. mx = h263_decode_motion(s, pred_x, 1);
  1414. if (s->umvplus)
  1415. my = h263p_decode_umotion(s, pred_y);
  1416. else
  1417. my = h263_decode_motion(s, pred_y, 1);
  1418. mot_val[0 ]= mot_val[2 ]=
  1419. mot_val[0+stride]= mot_val[2+stride]= mx;
  1420. mot_val[1 ]= mot_val[3 ]=
  1421. mot_val[1+stride]= mot_val[3+stride]= my;
  1422. } else {
  1423. s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
  1424. for(i=0;i<4;i++) {
  1425. mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  1426. if (s->umvplus)
  1427. mx = h263p_decode_umotion(s, pred_x);
  1428. else
  1429. mx = h263_decode_motion(s, pred_x, 1);
  1430. if (s->umvplus)
  1431. my = h263p_decode_umotion(s, pred_y);
  1432. else
  1433. my = h263_decode_motion(s, pred_y, 1);
  1434. if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
  1435. skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
  1436. mot_val[0] = mx;
  1437. mot_val[1] = my;
  1438. }
  1439. }
  1440. }
  1441. end:
  1442. for(i=0; i<4; i++)
  1443. s->block_index[i]-= 2;
  1444. for(i=4; i<6; i++)
  1445. s->block_index[i]-= 1;
  1446. s->mb_x--;
  1447. s->gb= gb;
  1448. }
  1449. static void h263_decode_dquant(MpegEncContext *s){
  1450. static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
  1451. if(s->modified_quant){
  1452. if(get_bits1(&s->gb))
  1453. s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
  1454. else
  1455. s->qscale= get_bits(&s->gb, 5);
  1456. }else
  1457. s->qscale += quant_tab[get_bits(&s->gb, 2)];
  1458. ff_set_qscale(s, s->qscale);
  1459. }
  1460. static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
  1461. int n, int coded)
  1462. {
  1463. int code, level, i, j, last, run;
  1464. RLTable *rl = &rl_inter;
  1465. const uint8_t *scan_table;
  1466. GetBitContext gb= s->gb;
  1467. scan_table = s->intra_scantable.permutated;
  1468. if (s->h263_aic && s->mb_intra) {
  1469. rl = &rl_intra_aic;
  1470. i = 0;
  1471. if (s->ac_pred) {
  1472. if (s->h263_aic_dir)
  1473. scan_table = s->intra_v_scantable.permutated; /* left */
  1474. else
  1475. scan_table = s->intra_h_scantable.permutated; /* top */
  1476. }
  1477. } else if (s->mb_intra) {
  1478. /* DC coef */
  1479. if(s->codec_id == CODEC_ID_RV10){
  1480. #if CONFIG_RV10_DECODER
  1481. if (s->rv10_version == 3 && s->pict_type == FF_I_TYPE) {
  1482. int component, diff;
  1483. component = (n <= 3 ? 0 : n - 4 + 1);
  1484. level = s->last_dc[component];
  1485. if (s->rv10_first_dc_coded[component]) {
  1486. diff = rv_decode_dc(s, n);
  1487. if (diff == 0xffff)
  1488. return -1;
  1489. level += diff;
  1490. level = level & 0xff; /* handle wrap round */
  1491. s->last_dc[component] = level;
  1492. } else {
  1493. s->rv10_first_dc_coded[component] = 1;
  1494. }
  1495. } else {
  1496. level = get_bits(&s->gb, 8);
  1497. if (level == 255)
  1498. level = 128;
  1499. }
  1500. #endif
  1501. }else{
  1502. level = get_bits(&s->gb, 8);
  1503. if((level&0x7F) == 0){
  1504. av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
  1505. if(s->error_recognition >= FF_ER_COMPLIANT)
  1506. return -1;
  1507. }
  1508. if (level == 255)
  1509. level = 128;
  1510. }
  1511. block[0] = level;
  1512. i = 1;
  1513. } else {
  1514. i = 0;
  1515. }
  1516. if (!coded) {
  1517. if (s->mb_intra && s->h263_aic)
  1518. goto not_coded;
  1519. s->block_last_index[n] = i - 1;
  1520. return 0;
  1521. }
  1522. retry:
  1523. for(;;) {
  1524. code = get_vlc2(&s->gb, rl->vlc.table, TEX_VLC_BITS, 2);
  1525. if (code < 0){
  1526. av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y);
  1527. return -1;
  1528. }
  1529. if (code == rl->n) {
  1530. /* escape */
  1531. if (CONFIG_FLV_DECODER && s->h263_flv > 1) {
  1532. ff_flv2_decode_ac_esc(&s->gb, &level, &run, &last);
  1533. } else {
  1534. last = get_bits1(&s->gb);
  1535. run = get_bits(&s->gb, 6);
  1536. level = (int8_t)get_bits(&s->gb, 8);
  1537. if(level == -128){
  1538. if (s->codec_id == CODEC_ID_RV10) {
  1539. /* XXX: should patch encoder too */
  1540. level = get_sbits(&s->gb, 12);
  1541. }else{
  1542. level = get_bits(&s->gb, 5);
  1543. level |= get_sbits(&s->gb, 6)<<5;
  1544. }
  1545. }
  1546. }
  1547. } else {
  1548. run = rl->table_run[code];
  1549. level = rl->table_level[code];
  1550. last = code >= rl->last;
  1551. if (get_bits1(&s->gb))
  1552. level = -level;
  1553. }
  1554. i += run;
  1555. if (i >= 64){
  1556. if(s->alt_inter_vlc && rl == &rl_inter && !s->mb_intra){
  1557. //Looks like a hack but no, it's the way it is supposed to work ...
  1558. rl = &rl_intra_aic;
  1559. i = 0;
  1560. s->gb= gb;
  1561. s->dsp.clear_block(block);
  1562. goto retry;
  1563. }
  1564. av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d i:%d\n", s->mb_x, s->mb_y, s->mb_intra);
  1565. return -1;
  1566. }
  1567. j = scan_table[i];
  1568. block[j] = level;
  1569. if (last)
  1570. break;
  1571. i++;
  1572. }
  1573. not_coded:
  1574. if (s->mb_intra && s->h263_aic) {
  1575. h263_pred_acdc(s, block, n);
  1576. i = 63;
  1577. }
  1578. s->block_last_index[n] = i;
  1579. return 0;
  1580. }
  1581. static int h263_skip_b_part(MpegEncContext *s, int cbp)
  1582. {
  1583. DECLARE_ALIGNED(16, DCTELEM, dblock[64]);
  1584. int i, mbi;
  1585. /* we have to set s->mb_intra to zero to decode B-part of PB-frame correctly
  1586. * but real value should be restored in order to be used later (in OBMC condition)
  1587. */
  1588. mbi = s->mb_intra;
  1589. s->mb_intra = 0;
  1590. for (i = 0; i < 6; i++) {
  1591. if (h263_decode_block(s, dblock, i, cbp&32) < 0)
  1592. return -1;
  1593. cbp+=cbp;
  1594. }
  1595. s->mb_intra = mbi;
  1596. return 0;
  1597. }
  1598. static int h263_get_modb(GetBitContext *gb, int pb_frame, int *cbpb)
  1599. {
  1600. int c, mv = 1;
  1601. if (pb_frame < 3) { // h.263 Annex G and i263 PB-frame
  1602. c = get_bits1(gb);
  1603. if (pb_frame == 2 && c)
  1604. mv = !get_bits1(gb);
  1605. } else { // h.263 Annex M improved PB-frame
  1606. mv = get_unary(gb, 0, 4) + 1;
  1607. c = mv & 1;
  1608. mv = !!(mv & 2);
  1609. }
  1610. if(c)
  1611. *cbpb = get_bits(gb, 6);
  1612. return mv;
  1613. }
  1614. int ff_h263_decode_mb(MpegEncContext *s,
  1615. DCTELEM block[6][64])
  1616. {
  1617. int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
  1618. int16_t *mot_val;
  1619. const int xy= s->mb_x + s->mb_y * s->mb_stride;
  1620. int cbpb = 0, pb_mv_count = 0;
  1621. assert(!s->h263_pred);
  1622. if (s->pict_type == FF_P_TYPE) {
  1623. do{
  1624. if (get_bits1(&s->gb)) {
  1625. /* skip mb */
  1626. s->mb_intra = 0;
  1627. for(i=0;i<6;i++)
  1628. s->block_last_index[i] = -1;
  1629. s->mv_dir = MV_DIR_FORWARD;
  1630. s->mv_type = MV_TYPE_16X16;
  1631. s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  1632. s->mv[0][0][0] = 0;
  1633. s->mv[0][0][1] = 0;
  1634. s->mb_skipped = !(s->obmc | s->loop_filter);
  1635. goto end;
  1636. }
  1637. cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
  1638. if (cbpc < 0){
  1639. av_log(s->avctx, AV_LOG_ERROR, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1640. return -1;
  1641. }
  1642. }while(cbpc == 20);
  1643. s->dsp.clear_blocks(s->block[0]);
  1644. dquant = cbpc & 8;
  1645. s->mb_intra = ((cbpc & 4) != 0);
  1646. if (s->mb_intra) goto intra;
  1647. if(s->pb_frame && get_bits1(&s->gb))
  1648. pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb);
  1649. cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1650. if(s->alt_inter_vlc==0 || (cbpc & 3)!=3)
  1651. cbpy ^= 0xF;
  1652. cbp = (cbpc & 3) | (cbpy << 2);
  1653. if (dquant) {
  1654. h263_decode_dquant(s);
  1655. }
  1656. s->mv_dir = MV_DIR_FORWARD;
  1657. if ((cbpc & 16) == 0) {
  1658. s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
  1659. /* 16x16 motion prediction */
  1660. s->mv_type = MV_TYPE_16X16;
  1661. h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
  1662. if (s->umvplus)
  1663. mx = h263p_decode_umotion(s, pred_x);
  1664. else
  1665. mx = h263_decode_motion(s, pred_x, 1);
  1666. if (mx >= 0xffff)
  1667. return -1;
  1668. if (s->umvplus)
  1669. my = h263p_decode_umotion(s, pred_y);
  1670. else
  1671. my = h263_decode_motion(s, pred_y, 1);
  1672. if (my >= 0xffff)
  1673. return -1;
  1674. s->mv[0][0][0] = mx;
  1675. s->mv[0][0][1] = my;
  1676. if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
  1677. skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
  1678. } else {
  1679. s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
  1680. s->mv_type = MV_TYPE_8X8;
  1681. for(i=0;i<4;i++) {
  1682. mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
  1683. if (s->umvplus)
  1684. mx = h263p_decode_umotion(s, pred_x);
  1685. else
  1686. mx = h263_decode_motion(s, pred_x, 1);
  1687. if (mx >= 0xffff)
  1688. return -1;
  1689. if (s->umvplus)
  1690. my = h263p_decode_umotion(s, pred_y);
  1691. else
  1692. my = h263_decode_motion(s, pred_y, 1);
  1693. if (my >= 0xffff)
  1694. return -1;
  1695. s->mv[0][i][0] = mx;
  1696. s->mv[0][i][1] = my;
  1697. if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
  1698. skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
  1699. mot_val[0] = mx;
  1700. mot_val[1] = my;
  1701. }
  1702. }
  1703. } else if(s->pict_type==FF_B_TYPE) {
  1704. int mb_type;
  1705. const int stride= s->b8_stride;
  1706. int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
  1707. int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
  1708. // const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
  1709. //FIXME ugly
  1710. mot_val0[0 ]= mot_val0[2 ]= mot_val0[0+2*stride]= mot_val0[2+2*stride]=
  1711. mot_val0[1 ]= mot_val0[3 ]= mot_val0[1+2*stride]= mot_val0[3+2*stride]=
  1712. mot_val1[0 ]= mot_val1[2 ]= mot_val1[0+2*stride]= mot_val1[2+2*stride]=
  1713. mot_val1[1 ]= mot_val1[3 ]= mot_val1[1+2*stride]= mot_val1[3+2*stride]= 0;
  1714. do{
  1715. mb_type= get_vlc2(&s->gb, h263_mbtype_b_vlc.table, H263_MBTYPE_B_VLC_BITS, 2);
  1716. if (mb_type < 0){
  1717. av_log(s->avctx, AV_LOG_ERROR, "b mb_type damaged at %d %d\n", s->mb_x, s->mb_y);
  1718. return -1;
  1719. }
  1720. mb_type= h263_mb_type_b_map[ mb_type ];
  1721. }while(!mb_type);
  1722. s->mb_intra = IS_INTRA(mb_type);
  1723. if(HAS_CBP(mb_type)){
  1724. s->dsp.clear_blocks(s->block[0]);
  1725. cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1);
  1726. if(s->mb_intra){
  1727. dquant = IS_QUANT(mb_type);
  1728. goto intra;
  1729. }
  1730. cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1731. if (cbpy < 0){
  1732. av_log(s->avctx, AV_LOG_ERROR, "b cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1733. return -1;
  1734. }
  1735. if(s->alt_inter_vlc==0 || (cbpc & 3)!=3)
  1736. cbpy ^= 0xF;
  1737. cbp = (cbpc & 3) | (cbpy << 2);
  1738. }else
  1739. cbp=0;
  1740. assert(!s->mb_intra);
  1741. if(IS_QUANT(mb_type)){
  1742. h263_decode_dquant(s);
  1743. }
  1744. if(IS_DIRECT(mb_type)){
  1745. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
  1746. mb_type |= ff_mpeg4_set_direct_mv(s, 0, 0);
  1747. }else{
  1748. s->mv_dir = 0;
  1749. s->mv_type= MV_TYPE_16X16;
  1750. //FIXME UMV
  1751. if(USES_LIST(mb_type, 0)){
  1752. int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
  1753. s->mv_dir = MV_DIR_FORWARD;
  1754. mx = h263_decode_motion(s, mx, 1);
  1755. my = h263_decode_motion(s, my, 1);
  1756. s->mv[0][0][0] = mx;
  1757. s->mv[0][0][1] = my;
  1758. mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx;
  1759. mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my;
  1760. }
  1761. if(USES_LIST(mb_type, 1)){
  1762. int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
  1763. s->mv_dir |= MV_DIR_BACKWARD;
  1764. mx = h263_decode_motion(s, mx, 1);
  1765. my = h263_decode_motion(s, my, 1);
  1766. s->mv[1][0][0] = mx;
  1767. s->mv[1][0][1] = my;
  1768. mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx;
  1769. mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my;
  1770. }
  1771. }
  1772. s->current_picture.mb_type[xy]= mb_type;
  1773. } else { /* I-Frame */
  1774. do{
  1775. cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
  1776. if (cbpc < 0){
  1777. av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y);
  1778. return -1;
  1779. }
  1780. }while(cbpc == 8);
  1781. s->dsp.clear_blocks(s->block[0]);
  1782. dquant = cbpc & 4;
  1783. s->mb_intra = 1;
  1784. intra:
  1785. s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
  1786. if (s->h263_aic) {
  1787. s->ac_pred = get_bits1(&s->gb);
  1788. if(s->ac_pred){
  1789. s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
  1790. s->h263_aic_dir = get_bits1(&s->gb);
  1791. }
  1792. }else
  1793. s->ac_pred = 0;
  1794. if(s->pb_frame && get_bits1(&s->gb))
  1795. pb_mv_count = h263_get_modb(&s->gb, s->pb_frame, &cbpb);
  1796. cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
  1797. if(cbpy<0){
  1798. av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
  1799. return -1;
  1800. }
  1801. cbp = (cbpc & 3) | (cbpy << 2);
  1802. if (dquant) {
  1803. h263_decode_dquant(s);
  1804. }
  1805. pb_mv_count += !!s->pb_frame;
  1806. }
  1807. while(pb_mv_count--){
  1808. h263_decode_motion(s, 0, 1);
  1809. h263_decode_motion(s, 0, 1);
  1810. }
  1811. /* decode each block */
  1812. for (i = 0; i < 6; i++) {
  1813. if (h263_decode_block(s, block[i], i, cbp&32) < 0)
  1814. return -1;
  1815. cbp+=cbp;
  1816. }
  1817. if(s->pb_frame && h263_skip_b_part(s, cbpb) < 0)
  1818. return -1;
  1819. if(s->obmc && !s->mb_intra){
  1820. if(s->pict_type == FF_P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
  1821. preview_obmc(s);
  1822. }
  1823. end:
  1824. /* per-MB end of slice check */
  1825. {
  1826. int v= show_bits(&s->gb, 16);
  1827. if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){
  1828. v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits;
  1829. }
  1830. if(v==0)
  1831. return SLICE_END;
  1832. }
  1833. return SLICE_OK;
  1834. }
  1835. /* most is hardcoded. should extend to handle all h263 streams */
  1836. int h263_decode_picture_header(MpegEncContext *s)
  1837. {
  1838. int format, width, height, i;
  1839. uint32_t startcode;
  1840. align_get_bits(&s->gb);
  1841. startcode= get_bits(&s->gb, 22-8);
  1842. for(i= get_bits_left(&s->gb); i>24; i-=8) {
  1843. startcode = ((startcode << 8) | get_bits(&s->gb, 8)) & 0x003FFFFF;
  1844. if(startcode == 0x20)
  1845. break;
  1846. }
  1847. if (startcode != 0x20) {
  1848. av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
  1849. return -1;
  1850. }
  1851. /* temporal reference */
  1852. i = get_bits(&s->gb, 8); /* picture timestamp */
  1853. if( (s->picture_number&~0xFF)+i < s->picture_number)
  1854. i+= 256;
  1855. s->current_picture_ptr->pts=
  1856. s->picture_number= (s->picture_number&~0xFF) + i;
  1857. /* PTYPE starts here */
  1858. if (get_bits1(&s->gb) != 1) {
  1859. /* marker */
  1860. av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n");
  1861. return -1;
  1862. }
  1863. if (get_bits1(&s->gb) != 0) {
  1864. av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n");
  1865. return -1; /* h263 id */
  1866. }
  1867. skip_bits1(&s->gb); /* split screen off */
  1868. skip_bits1(&s->gb); /* camera off */
  1869. skip_bits1(&s->gb); /* freeze picture release off */
  1870. format = get_bits(&s->gb, 3);
  1871. /*
  1872. 0 forbidden
  1873. 1 sub-QCIF
  1874. 10 QCIF
  1875. 7 extended PTYPE (PLUSPTYPE)
  1876. */
  1877. if (format != 7 && format != 6) {
  1878. s->h263_plus = 0;
  1879. /* H.263v1 */
  1880. width = h263_format[format][0];
  1881. height = h263_format[format][1];
  1882. if (!width)
  1883. return -1;
  1884. s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
  1885. s->h263_long_vectors = get_bits1(&s->gb);
  1886. if (get_bits1(&s->gb) != 0) {
  1887. av_log(s->avctx, AV_LOG_ERROR, "H263 SAC not supported\n");
  1888. return -1; /* SAC: off */
  1889. }
  1890. s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */
  1891. s->unrestricted_mv = s->h263_long_vectors || s->obmc;
  1892. s->pb_frame = get_bits1(&s->gb);
  1893. s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
  1894. skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */
  1895. s->width = width;
  1896. s->height = height;
  1897. s->avctx->sample_aspect_ratio= (AVRational){12,11};
  1898. s->avctx->time_base= (AVRational){1001, 30000};
  1899. } else {
  1900. int ufep;
  1901. /* H.263v2 */
  1902. s->h263_plus = 1;
  1903. ufep = get_bits(&s->gb, 3); /* Update Full Extended PTYPE */
  1904. /* ufep other than 0 and 1 are reserved */
  1905. if (ufep == 1) {
  1906. /* OPPTYPE */
  1907. format = get_bits(&s->gb, 3);
  1908. dprintf(s->avctx, "ufep=1, format: %d\n", format);
  1909. s->custom_pcf= get_bits1(&s->gb);
  1910. s->umvplus = get_bits1(&s->gb); /* Unrestricted Motion Vector */
  1911. if (get_bits1(&s->gb) != 0) {
  1912. av_log(s->avctx, AV_LOG_ERROR, "Syntax-based Arithmetic Coding (SAC) not supported\n");
  1913. }
  1914. s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */
  1915. s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
  1916. s->loop_filter= get_bits1(&s->gb);
  1917. s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
  1918. s->h263_slice_structured= get_bits1(&s->gb);
  1919. if (get_bits1(&s->gb) != 0) {
  1920. av_log(s->avctx, AV_LOG_ERROR, "Reference Picture Selection not supported\n");
  1921. }
  1922. if (get_bits1(&s->gb) != 0) {
  1923. av_log(s->avctx, AV_LOG_ERROR, "Independent Segment Decoding not supported\n");
  1924. }
  1925. s->alt_inter_vlc= get_bits1(&s->gb);
  1926. s->modified_quant= get_bits1(&s->gb);
  1927. if(s->modified_quant)
  1928. s->chroma_qscale_table= ff_h263_chroma_qscale_table;
  1929. skip_bits(&s->gb, 1); /* Prevent start code emulation */
  1930. skip_bits(&s->gb, 3); /* Reserved */
  1931. } else if (ufep != 0) {
  1932. av_log(s->avctx, AV_LOG_ERROR, "Bad UFEP type (%d)\n", ufep);
  1933. return -1;
  1934. }
  1935. /* MPPTYPE */
  1936. s->pict_type = get_bits(&s->gb, 3);
  1937. switch(s->pict_type){
  1938. case 0: s->pict_type= FF_I_TYPE;break;
  1939. case 1: s->pict_type= FF_P_TYPE;break;
  1940. case 2: s->pict_type= FF_P_TYPE;s->pb_frame = 3;break;
  1941. case 3: s->pict_type= FF_B_TYPE;break;
  1942. case 7: s->pict_type= FF_I_TYPE;break; //ZYGO
  1943. default:
  1944. return -1;
  1945. }
  1946. skip_bits(&s->gb, 2);
  1947. s->no_rounding = get_bits1(&s->gb);
  1948. skip_bits(&s->gb, 4);
  1949. /* Get the picture dimensions */
  1950. if (ufep) {
  1951. if (format == 6) {
  1952. /* Custom Picture Format (CPFMT) */
  1953. s->aspect_ratio_info = get_bits(&s->gb, 4);
  1954. dprintf(s->avctx, "aspect: %d\n", s->aspect_ratio_info);
  1955. /* aspect ratios:
  1956. 0 - forbidden
  1957. 1 - 1:1
  1958. 2 - 12:11 (CIF 4:3)
  1959. 3 - 10:11 (525-type 4:3)
  1960. 4 - 16:11 (CIF 16:9)
  1961. 5 - 40:33 (525-type 16:9)
  1962. 6-14 - reserved
  1963. */
  1964. width = (get_bits(&s->gb, 9) + 1) * 4;
  1965. skip_bits1(&s->gb);
  1966. height = get_bits(&s->gb, 9) * 4;
  1967. dprintf(s->avctx, "\nH.263+ Custom picture: %dx%d\n",width,height);
  1968. if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) {
  1969. /* aspected dimensions */
  1970. s->avctx->sample_aspect_ratio.num= get_bits(&s->gb, 8);
  1971. s->avctx->sample_aspect_ratio.den= get_bits(&s->gb, 8);
  1972. }else{
  1973. s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
  1974. }
  1975. } else {
  1976. width = h263_format[format][0];
  1977. height = h263_format[format][1];
  1978. s->avctx->sample_aspect_ratio= (AVRational){12,11};
  1979. }
  1980. if ((width == 0) || (height == 0))
  1981. return -1;
  1982. s->width = width;
  1983. s->height = height;
  1984. if(s->custom_pcf){
  1985. int gcd;
  1986. s->avctx->time_base.den= 1800000;
  1987. s->avctx->time_base.num= 1000 + get_bits1(&s->gb);
  1988. s->avctx->time_base.num*= get_bits(&s->gb, 7);
  1989. if(s->avctx->time_base.num == 0){
  1990. av_log(s, AV_LOG_ERROR, "zero framerate\n");
  1991. return -1;
  1992. }
  1993. gcd= av_gcd(s->avctx->time_base.den, s->avctx->time_base.num);
  1994. s->avctx->time_base.den /= gcd;
  1995. s->avctx->time_base.num /= gcd;
  1996. }else{
  1997. s->avctx->time_base= (AVRational){1001, 30000};
  1998. }
  1999. }
  2000. if(s->custom_pcf){
  2001. skip_bits(&s->gb, 2); //extended Temporal reference
  2002. }
  2003. if (ufep) {
  2004. if (s->umvplus) {
  2005. if(get_bits1(&s->gb)==0) /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */
  2006. skip_bits1(&s->gb);
  2007. }
  2008. if(s->h263_slice_structured){
  2009. if (get_bits1(&s->gb) != 0) {
  2010. av_log(s->avctx, AV_LOG_ERROR, "rectangular slices not supported\n");
  2011. }
  2012. if (get_bits1(&s->gb) != 0) {
  2013. av_log(s->avctx, AV_LOG_ERROR, "unordered slices not supported\n");
  2014. }
  2015. }
  2016. }
  2017. s->qscale = get_bits(&s->gb, 5);
  2018. }
  2019. s->mb_width = (s->width + 15) / 16;
  2020. s->mb_height = (s->height + 15) / 16;
  2021. s->mb_num = s->mb_width * s->mb_height;
  2022. if (s->pb_frame) {
  2023. skip_bits(&s->gb, 3); /* Temporal reference for B-pictures */
  2024. if (s->custom_pcf)
  2025. skip_bits(&s->gb, 2); //extended Temporal reference
  2026. skip_bits(&s->gb, 2); /* Quantization information for B-pictures */
  2027. }
  2028. /* PEI */
  2029. while (get_bits1(&s->gb) != 0) {
  2030. skip_bits(&s->gb, 8);
  2031. }
  2032. if(s->h263_slice_structured){
  2033. if (get_bits1(&s->gb) != 1) {
  2034. av_log(s->avctx, AV_LOG_ERROR, "SEPB1 marker missing\n");
  2035. return -1;
  2036. }
  2037. ff_h263_decode_mba(s);
  2038. if (get_bits1(&s->gb) != 1) {
  2039. av_log(s->avctx, AV_LOG_ERROR, "SEPB2 marker missing\n");
  2040. return -1;
  2041. }
  2042. }
  2043. s->f_code = 1;
  2044. if(s->h263_aic){
  2045. s->y_dc_scale_table=
  2046. s->c_dc_scale_table= ff_aic_dc_scale_table;
  2047. }else{
  2048. s->y_dc_scale_table=
  2049. s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
  2050. }
  2051. ff_h263_show_pict_info(s);
  2052. if (s->pict_type == FF_I_TYPE && s->codec_tag == AV_RL32("ZYGO")){
  2053. int i,j;
  2054. for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
  2055. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2056. for(i=0; i<13; i++){
  2057. for(j=0; j<3; j++){
  2058. int v= get_bits(&s->gb, 8);
  2059. v |= get_sbits(&s->gb, 8)<<8;
  2060. av_log(s->avctx, AV_LOG_DEBUG, " %5d", v);
  2061. }
  2062. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2063. }
  2064. for(i=0; i<50; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
  2065. }
  2066. return 0;
  2067. }