You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

816 lines
22KB

  1. /*
  2. * H261 decoder
  3. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  4. * Copyright (c) 2004 Maarten Daniels
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. /**
  21. * @file h261.c
  22. * h261codec.
  23. */
  24. #include "common.h"
  25. #include "dsputil.h"
  26. #include "avcodec.h"
  27. #include "mpegvideo.h"
  28. #include "h261data.h"
  29. #define H261_MBA_VLC_BITS 9
  30. #define H261_MTYPE_VLC_BITS 6
  31. #define H261_MV_VLC_BITS 7
  32. #define H261_CBP_VLC_BITS 9
  33. #define TCOEFF_VLC_BITS 9
  34. #define MAX_MBA 33
  35. #define IS_FIL(a) ((a)&MB_TYPE_H261_FIL)
  36. /**
  37. * H261Context
  38. */
  39. typedef struct H261Context{
  40. MpegEncContext s;
  41. int current_mba;
  42. int mba_diff;
  43. int mtype;
  44. int current_mv_x;
  45. int current_mv_y;
  46. int gob_number;
  47. int loop_filter;
  48. int bits_left; //8 - nr of bits left of the following frame in the last byte in this frame
  49. int last_bits; //bits left of the following frame in the last byte in this frame
  50. }H261Context;
  51. void ff_h261_loop_filter(H261Context * h){
  52. MpegEncContext * const s = &h->s;
  53. const int linesize = s->linesize;
  54. const int uvlinesize= s->uvlinesize;
  55. uint8_t *dest_y = s->dest[0];
  56. uint8_t *dest_cb= s->dest[1];
  57. uint8_t *dest_cr= s->dest[2];
  58. s->dsp.h261_loop_filter(dest_y , linesize);
  59. s->dsp.h261_loop_filter(dest_y + 8, linesize);
  60. s->dsp.h261_loop_filter(dest_y + 8 * linesize , linesize);
  61. s->dsp.h261_loop_filter(dest_y + 8 * linesize + 8, linesize);
  62. s->dsp.h261_loop_filter(dest_cb, uvlinesize);
  63. s->dsp.h261_loop_filter(dest_cr, uvlinesize);
  64. }
  65. static int h261_decode_block(H261Context *h, DCTELEM *block,
  66. int n, int coded);
  67. static int h261_decode_mb(H261Context *h,
  68. DCTELEM block[6][64]);
  69. void ff_set_qscale(MpegEncContext * s, int qscale);
  70. /***********************************************/
  71. /* decoding */
  72. static VLC h261_mba_vlc;
  73. static VLC h261_mtype_vlc;
  74. static VLC h261_mv_vlc;
  75. static VLC h261_cbp_vlc;
  76. void init_vlc_rl(RLTable *rl);
  77. static void h261_decode_init_vlc(H261Context *h){
  78. static int done = 0;
  79. if(!done){
  80. done = 1;
  81. init_vlc(&h261_mba_vlc, H261_MBA_VLC_BITS, 34,
  82. h261_mba_bits, 1, 1,
  83. h261_mba_code, 1, 1);
  84. init_vlc(&h261_mtype_vlc, H261_MTYPE_VLC_BITS, 10,
  85. h261_mtype_bits, 1, 1,
  86. h261_mtype_code, 1, 1);
  87. init_vlc(&h261_mv_vlc, H261_MV_VLC_BITS, 17,
  88. &h261_mv_tab[0][1], 2, 1,
  89. &h261_mv_tab[0][0], 2, 1);
  90. init_vlc(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63,
  91. &h261_cbp_tab[0][1], 2, 1,
  92. &h261_cbp_tab[0][0], 2, 1);
  93. init_rl(&h261_rl_tcoeff);
  94. init_vlc_rl(&h261_rl_tcoeff);
  95. }
  96. }
  97. static int h261_decode_init(AVCodecContext *avctx){
  98. H261Context *h= avctx->priv_data;
  99. MpegEncContext * const s = &h->s;
  100. // set defaults
  101. MPV_decode_defaults(s);
  102. s->avctx = avctx;
  103. s->width = s->avctx->width;
  104. s->height = s->avctx->height;
  105. s->codec_id = s->avctx->codec->id;
  106. s->out_format = FMT_H261;
  107. s->low_delay= 1;
  108. avctx->pix_fmt= PIX_FMT_YUV420P;
  109. s->codec_id= avctx->codec->id;
  110. h261_decode_init_vlc(h);
  111. h->bits_left = 0;
  112. h->last_bits = 0;
  113. return 0;
  114. }
  115. /**
  116. * decodes the group of blocks header or slice header.
  117. * @return <0 if an error occured
  118. */
  119. static int h261_decode_gob_header(H261Context *h){
  120. unsigned int val;
  121. MpegEncContext * const s = &h->s;
  122. /* Check for GOB Start Code */
  123. val = show_bits(&s->gb, 15);
  124. if(val)
  125. return -1;
  126. /* We have a GBSC */
  127. skip_bits(&s->gb, 16);
  128. h->gob_number = get_bits(&s->gb, 4); /* GN */
  129. s->qscale = get_bits(&s->gb, 5); /* GQUANT */
  130. /* GEI */
  131. while (get_bits1(&s->gb) != 0) {
  132. skip_bits(&s->gb, 8);
  133. }
  134. if(s->qscale==0)
  135. return -1;
  136. // For the first transmitted macroblock in a GOB, MBA is the absolute address. For
  137. // subsequent macroblocks, MBA is the difference between the absolute addresses of
  138. // the macroblock and the last transmitted macroblock.
  139. h->current_mba = 0;
  140. h->mba_diff = 0;
  141. return 0;
  142. }
  143. /**
  144. * decodes the group of blocks / video packet header.
  145. * @return <0 if no resync found
  146. */
  147. static int ff_h261_resync(H261Context *h){
  148. MpegEncContext * const s = &h->s;
  149. int left, ret;
  150. if(show_bits(&s->gb, 15)==0){
  151. ret= h261_decode_gob_header(h);
  152. if(ret>=0)
  153. return 0;
  154. }
  155. //ok, its not where its supposed to be ...
  156. s->gb= s->last_resync_gb;
  157. align_get_bits(&s->gb);
  158. left= s->gb.size_in_bits - get_bits_count(&s->gb);
  159. for(;left>15+1+4+5; left-=8){
  160. if(show_bits(&s->gb, 15)==0){
  161. GetBitContext bak= s->gb;
  162. ret= h261_decode_gob_header(h);
  163. if(ret>=0)
  164. return 0;
  165. s->gb= bak;
  166. }
  167. skip_bits(&s->gb, 8);
  168. }
  169. return -1;
  170. }
  171. /**
  172. * decodes skipped macroblocks
  173. * @return 0
  174. */
  175. static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
  176. {
  177. MpegEncContext * const s = &h->s;
  178. int i;
  179. s->mb_intra = 0;
  180. for(i=mba1; i<mba2; i++){
  181. int j, xy;
  182. s->mb_x= ((h->gob_number-1) % 2) * 11 + i % 11;
  183. s->mb_y= ((h->gob_number-1) / 2) * 3 + i / 11;
  184. xy = s->mb_x + s->mb_y * s->mb_stride;
  185. ff_init_block_index(s);
  186. ff_update_block_index(s);
  187. s->dsp.clear_blocks(s->block[0]);
  188. for(j=0;j<6;j++)
  189. s->block_last_index[j] = -1;
  190. s->mv_dir = MV_DIR_FORWARD;
  191. s->mv_type = MV_TYPE_16X16;
  192. s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
  193. s->mv[0][0][0] = 0;
  194. s->mv[0][0][1] = 0;
  195. s->mb_skiped = 1;
  196. MPV_decode_mb(s, s->block);
  197. }
  198. return 0;
  199. }
  200. static int decode_mv_component(GetBitContext *gb, int v){
  201. int mv_diff = get_vlc2(gb, h261_mv_vlc.table, H261_MV_VLC_BITS, 2);
  202. mv_diff = mvmap[mv_diff];
  203. if(mv_diff && !get_bits1(gb))
  204. mv_diff= -mv_diff;
  205. v += mv_diff;
  206. if (v <=-16) v+= 32;
  207. else if(v >= 16) v-= 32;
  208. return v;
  209. }
  210. static int h261_decode_mb(H261Context *h,
  211. DCTELEM block[6][64])
  212. {
  213. MpegEncContext * const s = &h->s;
  214. int i, cbp, xy, old_mtype;
  215. cbp = 63;
  216. // Read mba
  217. do{
  218. h->mba_diff = get_vlc2(&s->gb, h261_mba_vlc.table, H261_MBA_VLC_BITS, 2)+1;
  219. }
  220. while( h->mba_diff == MAX_MBA + 1 ); // stuffing
  221. if ( h->mba_diff < 0 )
  222. return -1;
  223. h->current_mba += h->mba_diff;
  224. if ( h->current_mba > MAX_MBA )
  225. return -1;
  226. s->mb_x= ((h->gob_number-1) % 2) * 11 + ((h->current_mba-1) % 11);
  227. s->mb_y= ((h->gob_number-1) / 2) * 3 + ((h->current_mba-1) / 11);
  228. xy = s->mb_x + s->mb_y * s->mb_stride;
  229. ff_init_block_index(s);
  230. ff_update_block_index(s);
  231. s->dsp.clear_blocks(s->block[0]);
  232. // Read mtype
  233. old_mtype = h->mtype;
  234. h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
  235. h->mtype = h261_mtype_map[h->mtype];
  236. if (IS_FIL (h->mtype))
  237. h->loop_filter = 1;
  238. // Read mquant
  239. if ( IS_QUANT ( h->mtype ) ){
  240. ff_set_qscale(s, get_bits(&s->gb, 5));
  241. }
  242. s->mb_intra = IS_INTRA4x4(h->mtype);
  243. // Read mv
  244. if ( IS_16X16 ( h->mtype ) ){
  245. // Motion vector data is included for all MC macroblocks. MVD is obtained from the macroblock vector by subtracting the
  246. // vector of the preceding macroblock. For this calculation the vector of the preceding macroblock is regarded as zero in the
  247. // following three situations:
  248. // 1) evaluating MVD for macroblocks 1, 12 and 23;
  249. // 2) evaluating MVD for macroblocks in which MBA does not represent a difference of 1;
  250. // 3) MTYPE of the previous macroblock was not MC.
  251. if ( ( h->current_mba == 1 ) || ( h->current_mba == 12 ) || ( h->current_mba == 23 ) ||
  252. ( h->mba_diff != 1) || ( !IS_16X16 ( old_mtype ) ))
  253. {
  254. h->current_mv_x = 0;
  255. h->current_mv_y = 0;
  256. }
  257. h->current_mv_x= decode_mv_component(&s->gb, h->current_mv_x);
  258. h->current_mv_y= decode_mv_component(&s->gb, h->current_mv_y);
  259. }
  260. // Read cbp
  261. if ( HAS_CBP( h->mtype ) ){
  262. cbp = get_vlc2(&s->gb, h261_cbp_vlc.table, H261_CBP_VLC_BITS, 2) + 1;
  263. }
  264. if(s->mb_intra){
  265. s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
  266. goto intra;
  267. }
  268. //set motion vectors
  269. s->mv_dir = MV_DIR_FORWARD;
  270. s->mv_type = MV_TYPE_16X16;
  271. s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
  272. if(IS_16X16 ( h->mtype )){
  273. s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
  274. s->mv[0][0][1] = h->current_mv_y * 2;
  275. }
  276. else{
  277. h->current_mv_x = s->mv[0][0][0] = 0;
  278. h->current_mv_x = s->mv[0][0][1] = 0;
  279. }
  280. intra:
  281. /* decode each block */
  282. if(s->mb_intra || HAS_CBP(h->mtype)){
  283. for (i = 0; i < 6; i++) {
  284. if (h261_decode_block(h, block[i], i, cbp&32) < 0){
  285. return -1;
  286. }
  287. cbp+=cbp;
  288. }
  289. }
  290. /* per-MB end of slice check */
  291. {
  292. int v= show_bits(&s->gb, 15);
  293. if(get_bits_count(&s->gb) + 15 > s->gb.size_in_bits){
  294. v>>= get_bits_count(&s->gb) + 15 - s->gb.size_in_bits;
  295. }
  296. if(v==0){
  297. return SLICE_END;
  298. }
  299. }
  300. return SLICE_OK;
  301. }
  302. /**
  303. * decodes a macroblock
  304. * @return <0 if an error occured
  305. */
  306. static int h261_decode_block(H261Context * h, DCTELEM * block,
  307. int n, int coded)
  308. {
  309. MpegEncContext * const s = &h->s;
  310. int code, level, i, j, run;
  311. RLTable *rl = &h261_rl_tcoeff;
  312. const uint8_t *scan_table;
  313. // For the variable length encoding there are two code tables, one being used for
  314. // the first transmitted LEVEL in INTER, INTER+MC and INTER+MC+FIL blocks, the second
  315. // for all other LEVELs except the first one in INTRA blocks which is fixed length
  316. // coded with 8 bits.
  317. // NOTE: the two code tables only differ in one VLC so we handle that manually.
  318. scan_table = s->intra_scantable.permutated;
  319. if (s->mb_intra){
  320. /* DC coef */
  321. level = get_bits(&s->gb, 8);
  322. // 0 (00000000b) and -128 (10000000b) are FORBIDDEN
  323. if((level&0x7F) == 0){
  324. av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y);
  325. return -1;
  326. }
  327. // The code 1000 0000 is not used, the reconstruction level of 1024 being coded as 1111 1111.
  328. if (level == 255)
  329. level = 128;
  330. block[0] = level;
  331. i = 1;
  332. }else if(coded){
  333. // Run Level Code
  334. // EOB Not possible for first level when cbp is available (that's why the table is different)
  335. // 0 1 1s
  336. // * * 0*
  337. int check = show_bits(&s->gb, 2);
  338. i = 0;
  339. if ( check & 0x2 ){
  340. skip_bits(&s->gb, 2);
  341. block[0] = ( check & 0x1 ) ? -1 : 1;
  342. i = 1;
  343. }
  344. }else{
  345. i = 0;
  346. }
  347. if(!coded){
  348. s->block_last_index[n] = i - 1;
  349. return 0;
  350. }
  351. for(;;){
  352. code = get_vlc2(&s->gb, rl->vlc.table, TCOEFF_VLC_BITS, 2);
  353. if (code < 0){
  354. av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y);
  355. return -1;
  356. }
  357. if (code == rl->n) {
  358. /* escape */
  359. // The remaining combinations of (run, level) are encoded with a 20-bit word consisting of 6 bits escape, 6 bits run and 8 bits level.
  360. run = get_bits(&s->gb, 6);
  361. level = (int8_t)get_bits(&s->gb, 8);
  362. }else if(code == 0){
  363. break;
  364. }else{
  365. run = rl->table_run[code];
  366. level = rl->table_level[code];
  367. if (get_bits1(&s->gb))
  368. level = -level;
  369. }
  370. i += run;
  371. if (i >= 64){
  372. av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d\n", s->mb_x, s->mb_y);
  373. return -1;
  374. }
  375. j = scan_table[i];
  376. block[j] = level;
  377. i++;
  378. }
  379. s->block_last_index[n] = i-1;
  380. return 0;
  381. }
  382. /**
  383. * decodes the H261 picture header.
  384. * @return <0 if no startcode found
  385. */
  386. int h261_decode_picture_header(H261Context *h){
  387. MpegEncContext * const s = &h->s;
  388. int format, i;
  389. static int h261_framecounter = 0;
  390. uint32_t startcode;
  391. align_get_bits(&s->gb);
  392. startcode = (h->last_bits << (12 - (8-h->bits_left))) | get_bits(&s->gb, 20-8 - (8- h->bits_left));
  393. for(i= s->gb.size_in_bits - get_bits_count(&s->gb); i>24; i-=1){
  394. startcode = ((startcode << 1) | get_bits(&s->gb, 1)) & 0x000FFFFF;
  395. if(startcode == 0x10)
  396. break;
  397. }
  398. if (startcode != 0x10){
  399. av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
  400. return -1;
  401. }
  402. /* temporal reference */
  403. s->picture_number = get_bits(&s->gb, 5); /* picture timestamp */
  404. /* PTYPE starts here */
  405. skip_bits1(&s->gb); /* split screen off */
  406. skip_bits1(&s->gb); /* camera off */
  407. skip_bits1(&s->gb); /* freeze picture release off */
  408. format = get_bits1(&s->gb);
  409. //only 2 formats possible
  410. if (format == 0){//QCIF
  411. s->width = 176;
  412. s->height = 144;
  413. s->mb_width = 11;
  414. s->mb_height = 9;
  415. }else{//CIF
  416. s->width = 352;
  417. s->height = 288;
  418. s->mb_width = 22;
  419. s->mb_height = 18;
  420. }
  421. s->mb_num = s->mb_width * s->mb_height;
  422. skip_bits1(&s->gb); /* still image mode off */
  423. skip_bits1(&s->gb); /* Reserved */
  424. /* PEI */
  425. while (get_bits1(&s->gb) != 0){
  426. skip_bits(&s->gb, 8);
  427. }
  428. //h261 has no I-FRAMES, pass the test in MPV_frame_start in mpegvideo.c
  429. if(h261_framecounter > 1)
  430. s->pict_type = P_TYPE;
  431. else
  432. s->pict_type = I_TYPE;
  433. h261_framecounter++;
  434. h->gob_number = 0;
  435. return 0;
  436. }
  437. static int h261_decode_gob(H261Context *h){
  438. MpegEncContext * const s = &h->s;
  439. int v;
  440. ff_set_qscale(s, s->qscale);
  441. /* check for empty gob */
  442. v= show_bits(&s->gb, 15);
  443. if(get_bits_count(&s->gb) + 15 > s->gb.size_in_bits){
  444. v>>= get_bits_count(&s->gb) + 15 - s->gb.size_in_bits;
  445. }
  446. if(v==0){
  447. h261_decode_mb_skipped(h, 0, 33);
  448. return 0;
  449. }
  450. /* decode mb's */
  451. while(h->current_mba <= MAX_MBA)
  452. {
  453. int ret;
  454. /* DCT & quantize */
  455. ret= h261_decode_mb(h, s->block);
  456. if(ret<0){
  457. const int xy= s->mb_x + s->mb_y*s->mb_stride;
  458. if(ret==SLICE_END){
  459. MPV_decode_mb(s, s->block);
  460. if(h->loop_filter){
  461. ff_h261_loop_filter(h);
  462. }
  463. h->loop_filter = 0;
  464. h261_decode_mb_skipped(h, h->current_mba-h->mba_diff, h->current_mba-1);
  465. h261_decode_mb_skipped(h, h->current_mba, 33);
  466. return 0;
  467. }else if(ret==SLICE_NOEND){
  468. av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy);
  469. return -1;
  470. }
  471. av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
  472. return -1;
  473. }
  474. MPV_decode_mb(s, s->block);
  475. if(h->loop_filter){
  476. ff_h261_loop_filter(h);
  477. }
  478. h->loop_filter = 0;
  479. h261_decode_mb_skipped(h, h->current_mba-h->mba_diff, h->current_mba-1);
  480. }
  481. return -1;
  482. }
  483. static int h261_find_frame_end(ParseContext *pc, AVCodecContext* avctx, const uint8_t *buf, int buf_size){
  484. int vop_found, i, j, bits_left, last_bits;
  485. uint32_t state;
  486. H261Context *h = avctx->priv_data;
  487. if(h){
  488. bits_left = h->bits_left;
  489. last_bits = h->last_bits;
  490. }
  491. else{
  492. bits_left = 0;
  493. last_bits = 0;
  494. }
  495. vop_found= pc->frame_start_found;
  496. state= pc->state;
  497. if(bits_left!=0 && !vop_found)
  498. state = state << (8-bits_left) | last_bits;
  499. i=0;
  500. if(!vop_found){
  501. for(i=0; i<buf_size; i++){
  502. state= (state<<8) | buf[i];
  503. for(j=0; j<8; j++){
  504. if(( ( (state<<j) | (buf[i]>>(8-j)) )>>(32-20) == 0x10 )&&(((state >> (17-j)) & 0x4000) == 0x0)){
  505. i++;
  506. vop_found=1;
  507. break;
  508. }
  509. }
  510. if(vop_found)
  511. break;
  512. }
  513. }
  514. if(vop_found){
  515. for(; i<buf_size; i++){
  516. if(avctx->flags & CODEC_FLAG_TRUNCATED)//XXX ffplay workaround, someone a better solution?
  517. state= (state<<8) | buf[i];
  518. for(j=0; j<8; j++){
  519. if(( ( (state<<j) | (buf[i]>>(8-j)) )>>(32-20) == 0x10 )&&(((state >> (17-j)) & 0x4000) == 0x0)){
  520. pc->frame_start_found=0;
  521. pc->state=-1;
  522. return i-3;
  523. }
  524. }
  525. }
  526. }
  527. pc->frame_start_found= vop_found;
  528. pc->state= state;
  529. return END_NOT_FOUND;
  530. }
  531. static int h261_parse(AVCodecParserContext *s,
  532. AVCodecContext *avctx,
  533. uint8_t **poutbuf, int *poutbuf_size,
  534. const uint8_t *buf, int buf_size)
  535. {
  536. ParseContext *pc = s->priv_data;
  537. int next;
  538. next= h261_find_frame_end(pc,avctx, buf, buf_size);
  539. if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
  540. *poutbuf = NULL;
  541. *poutbuf_size = 0;
  542. return buf_size;
  543. }
  544. *poutbuf = (uint8_t *)buf;
  545. *poutbuf_size = buf_size;
  546. return next;
  547. }
  548. /**
  549. * returns the number of bytes consumed for building the current frame
  550. */
  551. static int get_consumed_bytes(MpegEncContext *s, int buf_size){
  552. int pos= (get_bits_count(&s->gb)+7)>>3;
  553. if(s->flags&CODEC_FLAG_TRUNCATED){
  554. pos -= s->parse_context.last_index;
  555. if(pos<0) pos=0;// padding is not really read so this might be -1
  556. return pos;
  557. }else{
  558. if(pos==0) pos=1; //avoid infinite loops (i doubt thats needed but ...)
  559. if(pos+10>buf_size) pos=buf_size; // oops ;)
  560. return pos;
  561. }
  562. }
  563. static int h261_decode_frame(AVCodecContext *avctx,
  564. void *data, int *data_size,
  565. uint8_t *buf, int buf_size)
  566. {
  567. H261Context *h= avctx->priv_data;
  568. MpegEncContext *s = &h->s;
  569. int ret;
  570. AVFrame *pict = data;
  571. #ifdef DEBUG
  572. printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
  573. printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
  574. #endif
  575. s->flags= avctx->flags;
  576. s->flags2= avctx->flags2;
  577. /* no supplementary picture */
  578. if (buf_size == 0) {
  579. return 0;
  580. }
  581. if(s->flags&CODEC_FLAG_TRUNCATED){
  582. int next;
  583. next= h261_find_frame_end(&s->parse_context,avctx, buf, buf_size);
  584. if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 )
  585. return buf_size;
  586. }
  587. retry:
  588. init_get_bits(&s->gb, buf, buf_size*8);
  589. if(!s->context_initialized){
  590. if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
  591. return -1;
  592. }
  593. //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
  594. if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
  595. int i= ff_find_unused_picture(s, 0);
  596. s->current_picture_ptr= &s->picture[i];
  597. }
  598. ret = h261_decode_picture_header(h);
  599. /* skip if the header was thrashed */
  600. if (ret < 0){
  601. av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
  602. return -1;
  603. }
  604. if (s->width != avctx->width || s->height != avctx->height){
  605. ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
  606. s->parse_context.buffer=0;
  607. MPV_common_end(s);
  608. s->parse_context= pc;
  609. }
  610. if (!s->context_initialized) {
  611. avctx->width = s->width;
  612. avctx->height = s->height;
  613. goto retry;
  614. }
  615. // for hurry_up==5
  616. s->current_picture.pict_type= s->pict_type;
  617. s->current_picture.key_frame= s->pict_type == I_TYPE;
  618. /* skip everything if we are in a hurry>=5 */
  619. if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
  620. if(MPV_frame_start(s, avctx) < 0)
  621. return -1;
  622. ff_er_frame_start(s);
  623. /* decode each macroblock */
  624. s->mb_x=0;
  625. s->mb_y=0;
  626. while(h->gob_number < (s->mb_height==18 ? 12 : 5)){
  627. if(ff_h261_resync(h)<0)
  628. break;
  629. h261_decode_gob(h);
  630. }
  631. MPV_frame_end(s);
  632. // h261 doesn't have byte aligned codes
  633. // store the bits of the next frame that are left in the last byte
  634. // in the H261Context and remember the number of stored bits
  635. {
  636. int bitsleft;
  637. int current_pos= get_bits_count(&s->gb)>>3;
  638. bitsleft = (current_pos<<3) - get_bits_count(&s->gb);
  639. h->bits_left = - bitsleft;
  640. if(bitsleft > 0)
  641. h->last_bits= get_bits(&s->gb, 8 - h->bits_left);
  642. else
  643. h->last_bits = 0;
  644. }
  645. assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
  646. assert(s->current_picture.pict_type == s->pict_type);
  647. *pict= *(AVFrame*)&s->current_picture;
  648. ff_print_debug_info(s, pict);
  649. /* Return the Picture timestamp as the frame number */
  650. /* we substract 1 because it is added on utils.c */
  651. avctx->frame_number = s->picture_number - 1;
  652. *data_size = sizeof(AVFrame);
  653. return get_consumed_bytes(s, buf_size);
  654. }
  655. static int h261_decode_end(AVCodecContext *avctx)
  656. {
  657. H261Context *h= avctx->priv_data;
  658. MpegEncContext *s = &h->s;
  659. MPV_common_end(s);
  660. return 0;
  661. }
  662. AVCodec h261_decoder = {
  663. "h261",
  664. CODEC_TYPE_VIDEO,
  665. CODEC_ID_H261,
  666. sizeof(H261Context),
  667. h261_decode_init,
  668. NULL,
  669. h261_decode_end,
  670. h261_decode_frame,
  671. CODEC_CAP_TRUNCATED,
  672. };
  673. AVCodecParser h261_parser = {
  674. { CODEC_ID_H261 },
  675. sizeof(ParseContext),
  676. NULL,
  677. h261_parse,
  678. ff_parse_close,
  679. };