You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

749 lines
26KB

  1. /*
  2. * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
  3. * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
  24. * @author Stefan Gehrer <stefan.gehrer@gmx.de>
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "golomb.h"
  30. #include "cavs.h"
  31. static const uint8_t mv_scan[4] = {
  32. MV_FWD_X0,MV_FWD_X1,
  33. MV_FWD_X2,MV_FWD_X3
  34. };
  35. static const uint8_t cbp_tab[64][2] = {
  36. {63, 0},{15,15},{31,63},{47,31},{ 0,16},{14,32},{13,47},{11,13},
  37. { 7,14},{ 5,11},{10,12},{ 8, 5},{12,10},{61, 7},{ 4,48},{55, 3},
  38. { 1, 2},{ 2, 8},{59, 4},{ 3, 1},{62,61},{ 9,55},{ 6,59},{29,62},
  39. {45,29},{51,27},{23,23},{39,19},{27,30},{46,28},{53, 9},{30, 6},
  40. {43,60},{37,21},{60,44},{16,26},{21,51},{28,35},{19,18},{35,20},
  41. {42,24},{26,53},{44,17},{32,37},{58,39},{24,45},{20,58},{17,43},
  42. {18,42},{48,46},{22,36},{33,33},{25,34},{49,40},{40,52},{36,49},
  43. {34,50},{50,56},{52,25},{54,22},{41,54},{56,57},{38,41},{57,38}
  44. };
  45. /*****************************************************************************
  46. *
  47. * motion vector prediction
  48. *
  49. ****************************************************************************/
  50. static inline void store_mvs(AVSContext *h) {
  51. h->col_mv[h->mbidx*4 + 0] = h->mv[MV_FWD_X0];
  52. h->col_mv[h->mbidx*4 + 1] = h->mv[MV_FWD_X1];
  53. h->col_mv[h->mbidx*4 + 2] = h->mv[MV_FWD_X2];
  54. h->col_mv[h->mbidx*4 + 3] = h->mv[MV_FWD_X3];
  55. }
  56. static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw,
  57. cavs_vector *col_mv) {
  58. cavs_vector *pmv_bw = pmv_fw + MV_BWD_OFFS;
  59. int den = h->direct_den[col_mv->ref];
  60. int m = col_mv->x >> 31;
  61. pmv_fw->dist = h->dist[1];
  62. pmv_bw->dist = h->dist[0];
  63. pmv_fw->ref = 1;
  64. pmv_bw->ref = 0;
  65. /* scale the co-located motion vector according to its temporal span */
  66. pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
  67. pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
  68. m = col_mv->y >> 31;
  69. pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
  70. pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
  71. }
  72. static inline void mv_pred_sym(AVSContext *h, cavs_vector *src, enum cavs_block size) {
  73. cavs_vector *dst = src + MV_BWD_OFFS;
  74. /* backward mv is the scaled and negated forward mv */
  75. dst->x = -((src->x * h->sym_factor + 256) >> 9);
  76. dst->y = -((src->y * h->sym_factor + 256) >> 9);
  77. dst->ref = 0;
  78. dst->dist = h->dist[0];
  79. set_mvs(dst, size);
  80. }
  81. /*****************************************************************************
  82. *
  83. * residual data decoding
  84. *
  85. ****************************************************************************/
  86. /** kth-order exponential golomb code */
  87. static inline int get_ue_code(GetBitContext *gb, int order) {
  88. if(order) {
  89. int ret = get_ue_golomb(gb) << order;
  90. return ret + get_bits(gb,order);
  91. }
  92. return get_ue_golomb(gb);
  93. }
  94. /**
  95. * decode coefficients from one 8x8 block, dequantize, inverse transform
  96. * and add them to sample block
  97. * @param r pointer to 2D VLC table
  98. * @param esc_golomb_order escape codes are k-golomb with this order k
  99. * @param qp quantizer
  100. * @param dst location of sample block
  101. * @param stride line stride in frame buffer
  102. */
  103. static int decode_residual_block(AVSContext *h, GetBitContext *gb,
  104. const struct dec_2dvlc *r, int esc_golomb_order,
  105. int qp, uint8_t *dst, int stride) {
  106. int i, esc_code, level, mask;
  107. unsigned int level_code, run;
  108. DCTELEM level_buf[65];
  109. uint8_t run_buf[65];
  110. DCTELEM *block = h->block;
  111. for(i=0;i<65;i++) {
  112. level_code = get_ue_code(gb,r->golomb_order);
  113. if(level_code >= ESCAPE_CODE) {
  114. run = ((level_code - ESCAPE_CODE) >> 1) + 1;
  115. if(run > 64)
  116. return -1;
  117. esc_code = get_ue_code(gb,esc_golomb_order);
  118. level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
  119. while(level > r->inc_limit)
  120. r++;
  121. mask = -(level_code & 1);
  122. level = (level^mask) - mask;
  123. } else {
  124. level = r->rltab[level_code][0];
  125. if(!level) //end of block signal
  126. break;
  127. run = r->rltab[level_code][1];
  128. r += r->rltab[level_code][2];
  129. }
  130. level_buf[i] = level;
  131. run_buf[i] = run;
  132. }
  133. if(dequant(h,level_buf, run_buf, block, ff_cavs_dequant_mul[qp],
  134. ff_cavs_dequant_shift[qp], i))
  135. return -1;
  136. h->cdsp.cavs_idct8_add(dst,block,stride);
  137. h->s.dsp.clear_block(block);
  138. return 0;
  139. }
  140. static inline void decode_residual_chroma(AVSContext *h) {
  141. if(h->cbp & (1<<4))
  142. decode_residual_block(h,&h->s.gb,ff_cavs_chroma_dec,0,
  143. ff_cavs_chroma_qp[h->qp],h->cu,h->c_stride);
  144. if(h->cbp & (1<<5))
  145. decode_residual_block(h,&h->s.gb,ff_cavs_chroma_dec,0,
  146. ff_cavs_chroma_qp[h->qp],h->cv,h->c_stride);
  147. }
  148. static inline int decode_residual_inter(AVSContext *h) {
  149. int block;
  150. /* get coded block pattern */
  151. int cbp= get_ue_golomb(&h->s.gb);
  152. if(cbp > 63U){
  153. av_log(h->s.avctx, AV_LOG_ERROR, "illegal inter cbp\n");
  154. return -1;
  155. }
  156. h->cbp = cbp_tab[cbp][1];
  157. /* get quantizer */
  158. if(h->cbp && !h->qp_fixed)
  159. h->qp = (h->qp + get_se_golomb(&h->s.gb)) & 63;
  160. for(block=0;block<4;block++)
  161. if(h->cbp & (1<<block))
  162. decode_residual_block(h,&h->s.gb,ff_cavs_inter_dec,0,h->qp,
  163. h->cy + h->luma_scan[block], h->l_stride);
  164. decode_residual_chroma(h);
  165. return 0;
  166. }
  167. /*****************************************************************************
  168. *
  169. * macroblock level
  170. *
  171. ****************************************************************************/
  172. static int decode_mb_i(AVSContext *h, int cbp_code) {
  173. GetBitContext *gb = &h->s.gb;
  174. unsigned pred_mode_uv;
  175. int block;
  176. uint8_t top[18];
  177. uint8_t *left = NULL;
  178. uint8_t *d;
  179. ff_cavs_init_mb(h);
  180. /* get intra prediction modes from stream */
  181. for(block=0;block<4;block++) {
  182. int nA,nB,predpred;
  183. int pos = ff_cavs_scan3x3[block];
  184. nA = h->pred_mode_Y[pos-1];
  185. nB = h->pred_mode_Y[pos-3];
  186. predpred = FFMIN(nA,nB);
  187. if(predpred == NOT_AVAIL) // if either is not available
  188. predpred = INTRA_L_LP;
  189. if(!get_bits1(gb)){
  190. int rem_mode= get_bits(gb, 2);
  191. predpred = rem_mode + (rem_mode >= predpred);
  192. }
  193. h->pred_mode_Y[pos] = predpred;
  194. }
  195. pred_mode_uv = get_ue_golomb(gb);
  196. if(pred_mode_uv > 6) {
  197. av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
  198. return -1;
  199. }
  200. ff_cavs_modify_mb_i(h, &pred_mode_uv);
  201. /* get coded block pattern */
  202. if(h->pic_type == AV_PICTURE_TYPE_I)
  203. cbp_code = get_ue_golomb(gb);
  204. if(cbp_code > 63U){
  205. av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
  206. return -1;
  207. }
  208. h->cbp = cbp_tab[cbp_code][0];
  209. if(h->cbp && !h->qp_fixed)
  210. h->qp = (h->qp + get_se_golomb(gb)) & 63; //qp_delta
  211. /* luma intra prediction interleaved with residual decode/transform/add */
  212. for(block=0;block<4;block++) {
  213. d = h->cy + h->luma_scan[block];
  214. ff_cavs_load_intra_pred_luma(h, top, &left, block);
  215. h->intra_pred_l[h->pred_mode_Y[ff_cavs_scan3x3[block]]]
  216. (d, top, left, h->l_stride);
  217. if(h->cbp & (1<<block))
  218. decode_residual_block(h,gb,ff_cavs_intra_dec,1,h->qp,d,h->l_stride);
  219. }
  220. /* chroma intra prediction */
  221. ff_cavs_load_intra_pred_chroma(h);
  222. h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10],
  223. h->left_border_u, h->c_stride);
  224. h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10],
  225. h->left_border_v, h->c_stride);
  226. decode_residual_chroma(h);
  227. ff_cavs_filter(h,I_8X8);
  228. set_mv_intra(h);
  229. return 0;
  230. }
  231. static void decode_mb_p(AVSContext *h, enum cavs_mb mb_type) {
  232. GetBitContext *gb = &h->s.gb;
  233. int ref[4];
  234. ff_cavs_init_mb(h);
  235. switch(mb_type) {
  236. case P_SKIP:
  237. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
  238. break;
  239. case P_16X16:
  240. ref[0] = h->ref_flag ? 0 : get_bits1(gb);
  241. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
  242. break;
  243. case P_16X8:
  244. ref[0] = h->ref_flag ? 0 : get_bits1(gb);
  245. ref[2] = h->ref_flag ? 0 : get_bits1(gb);
  246. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
  247. ff_cavs_mv(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
  248. break;
  249. case P_8X16:
  250. ref[0] = h->ref_flag ? 0 : get_bits1(gb);
  251. ref[1] = h->ref_flag ? 0 : get_bits1(gb);
  252. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
  253. ff_cavs_mv(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, ref[1]);
  254. break;
  255. case P_8X8:
  256. ref[0] = h->ref_flag ? 0 : get_bits1(gb);
  257. ref[1] = h->ref_flag ? 0 : get_bits1(gb);
  258. ref[2] = h->ref_flag ? 0 : get_bits1(gb);
  259. ref[3] = h->ref_flag ? 0 : get_bits1(gb);
  260. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
  261. ff_cavs_mv(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
  262. ff_cavs_mv(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
  263. ff_cavs_mv(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
  264. }
  265. ff_cavs_inter(h, mb_type);
  266. set_intra_mode_default(h);
  267. store_mvs(h);
  268. if(mb_type != P_SKIP)
  269. decode_residual_inter(h);
  270. ff_cavs_filter(h,mb_type);
  271. h->col_type_base[h->mbidx] = mb_type;
  272. }
  273. static void decode_mb_b(AVSContext *h, enum cavs_mb mb_type) {
  274. int block;
  275. enum cavs_sub_mb sub_type[4];
  276. int flags;
  277. ff_cavs_init_mb(h);
  278. /* reset all MVs */
  279. h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
  280. set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
  281. h->mv[MV_BWD_X0] = ff_cavs_dir_mv;
  282. set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
  283. switch(mb_type) {
  284. case B_SKIP:
  285. case B_DIRECT:
  286. if(!h->col_type_base[h->mbidx]) {
  287. /* intra MB at co-location, do in-plane prediction */
  288. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
  289. ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
  290. } else
  291. /* direct prediction from co-located P MB, block-wise */
  292. for(block=0;block<4;block++)
  293. mv_pred_direct(h,&h->mv[mv_scan[block]],
  294. &h->col_mv[h->mbidx*4 + block]);
  295. break;
  296. case B_FWD_16X16:
  297. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
  298. break;
  299. case B_SYM_16X16:
  300. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
  301. mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
  302. break;
  303. case B_BWD_16X16:
  304. ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
  305. break;
  306. case B_8X8:
  307. for(block=0;block<4;block++)
  308. sub_type[block] = get_bits(&h->s.gb,2);
  309. for(block=0;block<4;block++) {
  310. switch(sub_type[block]) {
  311. case B_SUB_DIRECT:
  312. if(!h->col_type_base[h->mbidx]) {
  313. /* intra MB at co-location, do in-plane prediction */
  314. ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3,
  315. MV_PRED_BSKIP, BLK_8X8, 1);
  316. ff_cavs_mv(h, mv_scan[block]+MV_BWD_OFFS,
  317. mv_scan[block]-3+MV_BWD_OFFS,
  318. MV_PRED_BSKIP, BLK_8X8, 0);
  319. } else
  320. mv_pred_direct(h,&h->mv[mv_scan[block]],
  321. &h->col_mv[h->mbidx*4 + block]);
  322. break;
  323. case B_SUB_FWD:
  324. ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3,
  325. MV_PRED_MEDIAN, BLK_8X8, 1);
  326. break;
  327. case B_SUB_SYM:
  328. ff_cavs_mv(h, mv_scan[block], mv_scan[block]-3,
  329. MV_PRED_MEDIAN, BLK_8X8, 1);
  330. mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
  331. break;
  332. }
  333. }
  334. for(block=0;block<4;block++) {
  335. if(sub_type[block] == B_SUB_BWD)
  336. ff_cavs_mv(h, mv_scan[block]+MV_BWD_OFFS,
  337. mv_scan[block]+MV_BWD_OFFS-3,
  338. MV_PRED_MEDIAN, BLK_8X8, 0);
  339. }
  340. break;
  341. default:
  342. av_assert2((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
  343. flags = ff_cavs_partition_flags[mb_type];
  344. if(mb_type & 1) { /* 16x8 macroblock types */
  345. if(flags & FWD0)
  346. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
  347. if(flags & SYM0)
  348. mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
  349. if(flags & FWD1)
  350. ff_cavs_mv(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
  351. if(flags & SYM1)
  352. mv_pred_sym(h, &h->mv[MV_FWD_X2], BLK_16X8);
  353. if(flags & BWD0)
  354. ff_cavs_mv(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
  355. if(flags & BWD1)
  356. ff_cavs_mv(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
  357. } else { /* 8x16 macroblock types */
  358. if(flags & FWD0)
  359. ff_cavs_mv(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
  360. if(flags & SYM0)
  361. mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
  362. if(flags & FWD1)
  363. ff_cavs_mv(h,MV_FWD_X1,MV_FWD_C2,MV_PRED_TOPRIGHT,BLK_8X16,1);
  364. if(flags & SYM1)
  365. mv_pred_sym(h, &h->mv[MV_FWD_X1], BLK_8X16);
  366. if(flags & BWD0)
  367. ff_cavs_mv(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
  368. if(flags & BWD1)
  369. ff_cavs_mv(h,MV_BWD_X1,MV_BWD_C2,MV_PRED_TOPRIGHT,BLK_8X16,0);
  370. }
  371. }
  372. ff_cavs_inter(h, mb_type);
  373. set_intra_mode_default(h);
  374. if(mb_type != B_SKIP)
  375. decode_residual_inter(h);
  376. ff_cavs_filter(h,mb_type);
  377. }
  378. /*****************************************************************************
  379. *
  380. * slice level
  381. *
  382. ****************************************************************************/
  383. static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
  384. if(h->stc > 0xAF)
  385. av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
  386. if (h->stc >= h->mb_height)
  387. return -1;
  388. h->mby = h->stc;
  389. h->mbidx = h->mby*h->mb_width;
  390. /* mark top macroblocks as unavailable */
  391. h->flags &= ~(B_AVAIL|C_AVAIL);
  392. if((h->mby == 0) && (!h->qp_fixed)){
  393. h->qp_fixed = get_bits1(gb);
  394. h->qp = get_bits(gb,6);
  395. }
  396. /* inter frame or second slice can have weighting params */
  397. if((h->pic_type != AV_PICTURE_TYPE_I) || (!h->pic_structure && h->mby >= h->mb_width/2))
  398. if(get_bits1(gb)) { //slice_weighting_flag
  399. av_log(h->s.avctx, AV_LOG_ERROR,
  400. "weighted prediction not yet supported\n");
  401. }
  402. return 0;
  403. }
  404. static inline int check_for_slice(AVSContext *h) {
  405. GetBitContext *gb = &h->s.gb;
  406. int align;
  407. if(h->mbx)
  408. return 0;
  409. align = (-get_bits_count(gb)) & 7;
  410. /* check for stuffing byte */
  411. if(!align && (show_bits(gb,8) == 0x80))
  412. align = 8;
  413. if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
  414. skip_bits_long(gb,24+align);
  415. h->stc = get_bits(gb,8);
  416. if (h->stc >= h->mb_height)
  417. return 0;
  418. decode_slice_header(h,gb);
  419. return 1;
  420. }
  421. return 0;
  422. }
  423. /*****************************************************************************
  424. *
  425. * frame level
  426. *
  427. ****************************************************************************/
  428. static int decode_pic(AVSContext *h) {
  429. MpegEncContext *s = &h->s;
  430. int skip_count = -1;
  431. enum cavs_mb mb_type;
  432. if (!s->context_initialized) {
  433. s->avctx->idct_algo = FF_IDCT_CAVS;
  434. if (ff_MPV_common_init(s) < 0)
  435. return -1;
  436. ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct);
  437. }
  438. skip_bits(&s->gb,16);//bbv_dwlay
  439. if(h->stc == PIC_PB_START_CODE) {
  440. h->pic_type = get_bits(&s->gb,2) + AV_PICTURE_TYPE_I;
  441. if(h->pic_type > AV_PICTURE_TYPE_B) {
  442. av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n");
  443. return -1;
  444. }
  445. /* make sure we have the reference frames we need */
  446. if(!h->DPB[0].f.data[0] ||
  447. (!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B))
  448. return -1;
  449. } else {
  450. h->pic_type = AV_PICTURE_TYPE_I;
  451. if(get_bits1(&s->gb))
  452. skip_bits(&s->gb,24);//time_code
  453. /* old sample clips were all progressive and no low_delay,
  454. bump stream revision if detected otherwise */
  455. if (s->low_delay || !(show_bits(&s->gb,9) & 1))
  456. h->stream_revision = 1;
  457. /* similarly test top_field_first and repeat_first_field */
  458. else if(show_bits(&s->gb,11) & 3)
  459. h->stream_revision = 1;
  460. if(h->stream_revision > 0)
  461. skip_bits(&s->gb,1); //marker_bit
  462. }
  463. /* release last B frame */
  464. if(h->picture.f.data[0])
  465. s->avctx->release_buffer(s->avctx, &h->picture.f);
  466. s->avctx->get_buffer(s->avctx, &h->picture.f);
  467. ff_cavs_init_pic(h);
  468. h->picture.poc = get_bits(&s->gb,8)*2;
  469. /* get temporal distances and MV scaling factors */
  470. if(h->pic_type != AV_PICTURE_TYPE_B) {
  471. h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
  472. } else {
  473. h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
  474. }
  475. h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
  476. h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
  477. h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
  478. if(h->pic_type == AV_PICTURE_TYPE_B) {
  479. h->sym_factor = h->dist[0]*h->scale_den[1];
  480. } else {
  481. h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
  482. h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
  483. }
  484. if(s->low_delay)
  485. get_ue_golomb(&s->gb); //bbv_check_times
  486. h->progressive = get_bits1(&s->gb);
  487. h->pic_structure = 1;
  488. if(!h->progressive)
  489. h->pic_structure = get_bits1(&s->gb);
  490. if(!h->pic_structure && h->stc == PIC_PB_START_CODE)
  491. skip_bits1(&s->gb); //advanced_pred_mode_disable
  492. skip_bits1(&s->gb); //top_field_first
  493. skip_bits1(&s->gb); //repeat_first_field
  494. h->qp_fixed = get_bits1(&s->gb);
  495. h->qp = get_bits(&s->gb,6);
  496. if(h->pic_type == AV_PICTURE_TYPE_I) {
  497. if(!h->progressive && !h->pic_structure)
  498. skip_bits1(&s->gb);//what is this?
  499. skip_bits(&s->gb,4); //reserved bits
  500. } else {
  501. if(!(h->pic_type == AV_PICTURE_TYPE_B && h->pic_structure == 1))
  502. h->ref_flag = get_bits1(&s->gb);
  503. skip_bits(&s->gb,4); //reserved bits
  504. h->skip_mode_flag = get_bits1(&s->gb);
  505. }
  506. h->loop_filter_disable = get_bits1(&s->gb);
  507. if(!h->loop_filter_disable && get_bits1(&s->gb)) {
  508. h->alpha_offset = get_se_golomb(&s->gb);
  509. h->beta_offset = get_se_golomb(&s->gb);
  510. } else {
  511. h->alpha_offset = h->beta_offset = 0;
  512. }
  513. if(h->pic_type == AV_PICTURE_TYPE_I) {
  514. do {
  515. check_for_slice(h);
  516. decode_mb_i(h, 0);
  517. } while(ff_cavs_next_mb(h));
  518. } else if(h->pic_type == AV_PICTURE_TYPE_P) {
  519. do {
  520. if(check_for_slice(h))
  521. skip_count = -1;
  522. if(h->skip_mode_flag && (skip_count < 0))
  523. skip_count = get_ue_golomb(&s->gb);
  524. if(h->skip_mode_flag && skip_count--) {
  525. decode_mb_p(h,P_SKIP);
  526. } else {
  527. mb_type = get_ue_golomb(&s->gb) + P_SKIP + h->skip_mode_flag;
  528. if(mb_type > P_8X8)
  529. decode_mb_i(h, mb_type - P_8X8 - 1);
  530. else
  531. decode_mb_p(h,mb_type);
  532. }
  533. } while(ff_cavs_next_mb(h));
  534. } else { /* AV_PICTURE_TYPE_B */
  535. do {
  536. if(check_for_slice(h))
  537. skip_count = -1;
  538. if(h->skip_mode_flag && (skip_count < 0))
  539. skip_count = get_ue_golomb(&s->gb);
  540. if(h->skip_mode_flag && skip_count--) {
  541. decode_mb_b(h,B_SKIP);
  542. } else {
  543. mb_type = get_ue_golomb(&s->gb) + B_SKIP + h->skip_mode_flag;
  544. if(mb_type > B_8X8)
  545. decode_mb_i(h, mb_type - B_8X8 - 1);
  546. else
  547. decode_mb_b(h,mb_type);
  548. }
  549. } while(ff_cavs_next_mb(h));
  550. }
  551. if(h->pic_type != AV_PICTURE_TYPE_B) {
  552. if(h->DPB[1].f.data[0])
  553. s->avctx->release_buffer(s->avctx, &h->DPB[1].f);
  554. h->DPB[1] = h->DPB[0];
  555. h->DPB[0] = h->picture;
  556. memset(&h->picture,0,sizeof(Picture));
  557. }
  558. return 0;
  559. }
  560. /*****************************************************************************
  561. *
  562. * headers and interface
  563. *
  564. ****************************************************************************/
  565. static int decode_seq_header(AVSContext *h) {
  566. MpegEncContext *s = &h->s;
  567. int frame_rate_code;
  568. int width, height;
  569. h->profile = get_bits(&s->gb,8);
  570. h->level = get_bits(&s->gb,8);
  571. skip_bits1(&s->gb); //progressive sequence
  572. width = get_bits(&s->gb,14);
  573. height = get_bits(&s->gb,14);
  574. if ((s->width || s->height) && (s->width != width || s->height != height)) {
  575. av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
  576. return -1;
  577. }
  578. if (width <= 0 || height <= 0) {
  579. av_log(s, AV_LOG_ERROR, "Dimensions invalid\n");
  580. return AVERROR_INVALIDDATA;
  581. }
  582. s->width = width;
  583. s->height = height;
  584. skip_bits(&s->gb,2); //chroma format
  585. skip_bits(&s->gb,3); //sample_precision
  586. h->aspect_ratio = get_bits(&s->gb,4);
  587. frame_rate_code = get_bits(&s->gb,4);
  588. skip_bits(&s->gb,18);//bit_rate_lower
  589. skip_bits1(&s->gb); //marker_bit
  590. skip_bits(&s->gb,12);//bit_rate_upper
  591. s->low_delay = get_bits1(&s->gb);
  592. h->mb_width = (s->width + 15) >> 4;
  593. h->mb_height = (s->height + 15) >> 4;
  594. h->s.avctx->time_base.den = avpriv_frame_rate_tab[frame_rate_code].num;
  595. h->s.avctx->time_base.num = avpriv_frame_rate_tab[frame_rate_code].den;
  596. h->s.avctx->width = s->width;
  597. h->s.avctx->height = s->height;
  598. if(!h->top_qp)
  599. ff_cavs_init_top_lines(h);
  600. return 0;
  601. }
  602. static void cavs_flush(AVCodecContext * avctx) {
  603. AVSContext *h = avctx->priv_data;
  604. h->got_keyframe = 0;
  605. }
  606. static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
  607. AVPacket *avpkt) {
  608. const uint8_t *buf = avpkt->data;
  609. int buf_size = avpkt->size;
  610. AVSContext *h = avctx->priv_data;
  611. MpegEncContext *s = &h->s;
  612. int input_size;
  613. const uint8_t *buf_end;
  614. const uint8_t *buf_ptr;
  615. AVFrame *picture = data;
  616. uint32_t stc = -1;
  617. s->avctx = avctx;
  618. if (buf_size == 0) {
  619. if (!s->low_delay && h->DPB[0].f.data[0]) {
  620. *data_size = sizeof(AVPicture);
  621. *picture = h->DPB[0].f;
  622. memset(&h->DPB[0], 0, sizeof(h->DPB[0]));
  623. }
  624. return 0;
  625. }
  626. buf_ptr = buf;
  627. buf_end = buf + buf_size;
  628. for(;;) {
  629. buf_ptr = avpriv_mpv_find_start_code(buf_ptr,buf_end, &stc);
  630. if((stc & 0xFFFFFE00) || buf_ptr == buf_end)
  631. return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
  632. input_size = (buf_end - buf_ptr)*8;
  633. switch(stc) {
  634. case CAVS_START_CODE:
  635. init_get_bits(&s->gb, buf_ptr, input_size);
  636. decode_seq_header(h);
  637. break;
  638. case PIC_I_START_CODE:
  639. if(!h->got_keyframe) {
  640. if(h->DPB[0].f.data[0])
  641. avctx->release_buffer(avctx, &h->DPB[0].f);
  642. if(h->DPB[1].f.data[0])
  643. avctx->release_buffer(avctx, &h->DPB[1].f);
  644. h->got_keyframe = 1;
  645. }
  646. case PIC_PB_START_CODE:
  647. *data_size = 0;
  648. if(!h->got_keyframe)
  649. break;
  650. if(!h->top_qp)
  651. break;
  652. init_get_bits(&s->gb, buf_ptr, input_size);
  653. h->stc = stc;
  654. if(decode_pic(h))
  655. break;
  656. *data_size = sizeof(AVPicture);
  657. if(h->pic_type != AV_PICTURE_TYPE_B) {
  658. if(h->DPB[1].f.data[0]) {
  659. *picture = h->DPB[1].f;
  660. } else {
  661. *data_size = 0;
  662. }
  663. } else
  664. *picture = h->picture.f;
  665. break;
  666. case EXT_START_CODE:
  667. //mpeg_decode_extension(avctx,buf_ptr, input_size);
  668. break;
  669. case USER_START_CODE:
  670. //mpeg_decode_user_data(avctx,buf_ptr, input_size);
  671. break;
  672. default:
  673. if (stc <= SLICE_MAX_START_CODE) {
  674. init_get_bits(&s->gb, buf_ptr, input_size);
  675. decode_slice_header(h, &s->gb);
  676. }
  677. break;
  678. }
  679. }
  680. }
  681. AVCodec ff_cavs_decoder = {
  682. .name = "cavs",
  683. .type = AVMEDIA_TYPE_VIDEO,
  684. .id = AV_CODEC_ID_CAVS,
  685. .priv_data_size = sizeof(AVSContext),
  686. .init = ff_cavs_init,
  687. .close = ff_cavs_end,
  688. .decode = cavs_decode_frame,
  689. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  690. .flush = cavs_flush,
  691. .long_name = NULL_IF_CONFIG_SMALL("Chinese AVS (Audio Video Standard) (AVS1-P2, JiZhun profile)"),
  692. };