You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1077 lines
39KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... cavlc bitstream decoding
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 cavlc bitstream decoding.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #define CABAC 0
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "h264data.h" // FIXME FIXME FIXME
  32. #include "h264_mvpred.h"
  33. #include "golomb.h"
  34. //#undef NDEBUG
  35. #include <assert.h>
  36. static const uint8_t golomb_to_inter_cbp_gray[16]={
  37. 0, 1, 2, 4, 8, 3, 5,10,12,15, 7,11,13,14, 6, 9,
  38. };
  39. static const uint8_t golomb_to_intra4x4_cbp_gray[16]={
  40. 15, 0, 7,11,13,14, 3, 5,10,12, 1, 2, 4, 8, 6, 9,
  41. };
  42. static const uint8_t chroma_dc_coeff_token_len[4*5]={
  43. 2, 0, 0, 0,
  44. 6, 1, 0, 0,
  45. 6, 6, 3, 0,
  46. 6, 7, 7, 6,
  47. 6, 8, 8, 7,
  48. };
  49. static const uint8_t chroma_dc_coeff_token_bits[4*5]={
  50. 1, 0, 0, 0,
  51. 7, 1, 0, 0,
  52. 4, 6, 1, 0,
  53. 3, 3, 2, 5,
  54. 2, 3, 2, 0,
  55. };
  56. static const uint8_t coeff_token_len[4][4*17]={
  57. {
  58. 1, 0, 0, 0,
  59. 6, 2, 0, 0, 8, 6, 3, 0, 9, 8, 7, 5, 10, 9, 8, 6,
  60. 11,10, 9, 7, 13,11,10, 8, 13,13,11, 9, 13,13,13,10,
  61. 14,14,13,11, 14,14,14,13, 15,15,14,14, 15,15,15,14,
  62. 16,15,15,15, 16,16,16,15, 16,16,16,16, 16,16,16,16,
  63. },
  64. {
  65. 2, 0, 0, 0,
  66. 6, 2, 0, 0, 6, 5, 3, 0, 7, 6, 6, 4, 8, 6, 6, 4,
  67. 8, 7, 7, 5, 9, 8, 8, 6, 11, 9, 9, 6, 11,11,11, 7,
  68. 12,11,11, 9, 12,12,12,11, 12,12,12,11, 13,13,13,12,
  69. 13,13,13,13, 13,14,13,13, 14,14,14,13, 14,14,14,14,
  70. },
  71. {
  72. 4, 0, 0, 0,
  73. 6, 4, 0, 0, 6, 5, 4, 0, 6, 5, 5, 4, 7, 5, 5, 4,
  74. 7, 5, 5, 4, 7, 6, 6, 4, 7, 6, 6, 4, 8, 7, 7, 5,
  75. 8, 8, 7, 6, 9, 8, 8, 7, 9, 9, 8, 8, 9, 9, 9, 8,
  76. 10, 9, 9, 9, 10,10,10,10, 10,10,10,10, 10,10,10,10,
  77. },
  78. {
  79. 6, 0, 0, 0,
  80. 6, 6, 0, 0, 6, 6, 6, 0, 6, 6, 6, 6, 6, 6, 6, 6,
  81. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  82. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  83. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  84. }
  85. };
  86. static const uint8_t coeff_token_bits[4][4*17]={
  87. {
  88. 1, 0, 0, 0,
  89. 5, 1, 0, 0, 7, 4, 1, 0, 7, 6, 5, 3, 7, 6, 5, 3,
  90. 7, 6, 5, 4, 15, 6, 5, 4, 11,14, 5, 4, 8,10,13, 4,
  91. 15,14, 9, 4, 11,10,13,12, 15,14, 9,12, 11,10,13, 8,
  92. 15, 1, 9,12, 11,14,13, 8, 7,10, 9,12, 4, 6, 5, 8,
  93. },
  94. {
  95. 3, 0, 0, 0,
  96. 11, 2, 0, 0, 7, 7, 3, 0, 7,10, 9, 5, 7, 6, 5, 4,
  97. 4, 6, 5, 6, 7, 6, 5, 8, 15, 6, 5, 4, 11,14,13, 4,
  98. 15,10, 9, 4, 11,14,13,12, 8,10, 9, 8, 15,14,13,12,
  99. 11,10, 9,12, 7,11, 6, 8, 9, 8,10, 1, 7, 6, 5, 4,
  100. },
  101. {
  102. 15, 0, 0, 0,
  103. 15,14, 0, 0, 11,15,13, 0, 8,12,14,12, 15,10,11,11,
  104. 11, 8, 9,10, 9,14,13, 9, 8,10, 9, 8, 15,14,13,13,
  105. 11,14,10,12, 15,10,13,12, 11,14, 9,12, 8,10,13, 8,
  106. 13, 7, 9,12, 9,12,11,10, 5, 8, 7, 6, 1, 4, 3, 2,
  107. },
  108. {
  109. 3, 0, 0, 0,
  110. 0, 1, 0, 0, 4, 5, 6, 0, 8, 9,10,11, 12,13,14,15,
  111. 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31,
  112. 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47,
  113. 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63,
  114. }
  115. };
  116. static const uint8_t total_zeros_len[16][16]= {
  117. {1,3,3,4,4,5,5,6,6,7,7,8,8,9,9,9},
  118. {3,3,3,3,3,4,4,4,4,5,5,6,6,6,6},
  119. {4,3,3,3,4,4,3,3,4,5,5,6,5,6},
  120. {5,3,4,4,3,3,3,4,3,4,5,5,5},
  121. {4,4,4,3,3,3,3,3,4,5,4,5},
  122. {6,5,3,3,3,3,3,3,4,3,6},
  123. {6,5,3,3,3,2,3,4,3,6},
  124. {6,4,5,3,2,2,3,3,6},
  125. {6,6,4,2,2,3,2,5},
  126. {5,5,3,2,2,2,4},
  127. {4,4,3,3,1,3},
  128. {4,4,2,1,3},
  129. {3,3,1,2},
  130. {2,2,1},
  131. {1,1},
  132. };
  133. static const uint8_t total_zeros_bits[16][16]= {
  134. {1,3,2,3,2,3,2,3,2,3,2,3,2,3,2,1},
  135. {7,6,5,4,3,5,4,3,2,3,2,3,2,1,0},
  136. {5,7,6,5,4,3,4,3,2,3,2,1,1,0},
  137. {3,7,5,4,6,5,4,3,3,2,2,1,0},
  138. {5,4,3,7,6,5,4,3,2,1,1,0},
  139. {1,1,7,6,5,4,3,2,1,1,0},
  140. {1,1,5,4,3,3,2,1,1,0},
  141. {1,1,1,3,3,2,2,1,0},
  142. {1,0,1,3,2,1,1,1},
  143. {1,0,1,3,2,1,1},
  144. {0,1,1,2,1,3},
  145. {0,1,1,1,1},
  146. {0,1,1,1},
  147. {0,1,1},
  148. {0,1},
  149. };
  150. static const uint8_t chroma_dc_total_zeros_len[3][4]= {
  151. { 1, 2, 3, 3,},
  152. { 1, 2, 2, 0,},
  153. { 1, 1, 0, 0,},
  154. };
  155. static const uint8_t chroma_dc_total_zeros_bits[3][4]= {
  156. { 1, 1, 1, 0,},
  157. { 1, 1, 0, 0,},
  158. { 1, 0, 0, 0,},
  159. };
  160. static const uint8_t run_len[7][16]={
  161. {1,1},
  162. {1,2,2},
  163. {2,2,2,2},
  164. {2,2,2,3,3},
  165. {2,2,3,3,3,3},
  166. {2,3,3,3,3,3,3},
  167. {3,3,3,3,3,3,3,4,5,6,7,8,9,10,11},
  168. };
  169. static const uint8_t run_bits[7][16]={
  170. {1,0},
  171. {1,1,0},
  172. {3,2,1,0},
  173. {3,2,1,1,0},
  174. {3,2,3,2,1,0},
  175. {3,0,1,3,2,5,4},
  176. {7,6,5,4,3,2,1,1,1,1,1,1,1,1,1},
  177. };
  178. static VLC coeff_token_vlc[4];
  179. static VLC_TYPE coeff_token_vlc_tables[520+332+280+256][2];
  180. static const int coeff_token_vlc_tables_size[4]={520,332,280,256};
  181. static VLC chroma_dc_coeff_token_vlc;
  182. static VLC_TYPE chroma_dc_coeff_token_vlc_table[256][2];
  183. static const int chroma_dc_coeff_token_vlc_table_size = 256;
  184. static VLC total_zeros_vlc[15];
  185. static VLC_TYPE total_zeros_vlc_tables[15][512][2];
  186. static const int total_zeros_vlc_tables_size = 512;
  187. static VLC chroma_dc_total_zeros_vlc[3];
  188. static VLC_TYPE chroma_dc_total_zeros_vlc_tables[3][8][2];
  189. static const int chroma_dc_total_zeros_vlc_tables_size = 8;
  190. static VLC run_vlc[6];
  191. static VLC_TYPE run_vlc_tables[6][8][2];
  192. static const int run_vlc_tables_size = 8;
  193. static VLC run7_vlc;
  194. static VLC_TYPE run7_vlc_table[96][2];
  195. static const int run7_vlc_table_size = 96;
  196. #define LEVEL_TAB_BITS 8
  197. static int8_t cavlc_level_tab[7][1<<LEVEL_TAB_BITS][2];
  198. /**
  199. * gets the predicted number of non-zero coefficients.
  200. * @param n block index
  201. */
  202. static inline int pred_non_zero_count(H264Context *h, int n){
  203. const int index8= scan8[n];
  204. const int left= h->non_zero_count_cache[index8 - 1];
  205. const int top = h->non_zero_count_cache[index8 - 8];
  206. int i= left + top;
  207. if(i<64) i= (i+1)>>1;
  208. tprintf(h->s.avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
  209. return i&31;
  210. }
  211. static av_cold void init_cavlc_level_tab(void){
  212. int suffix_length, mask;
  213. unsigned int i;
  214. for(suffix_length=0; suffix_length<7; suffix_length++){
  215. for(i=0; i<(1<<LEVEL_TAB_BITS); i++){
  216. int prefix= LEVEL_TAB_BITS - av_log2(2*i);
  217. int level_code= (prefix<<suffix_length) + (i>>(LEVEL_TAB_BITS-prefix-1-suffix_length)) - (1<<suffix_length);
  218. mask= -(level_code&1);
  219. level_code= (((2+level_code)>>1) ^ mask) - mask;
  220. if(prefix + 1 + suffix_length <= LEVEL_TAB_BITS){
  221. cavlc_level_tab[suffix_length][i][0]= level_code;
  222. cavlc_level_tab[suffix_length][i][1]= prefix + 1 + suffix_length;
  223. }else if(prefix + 1 <= LEVEL_TAB_BITS){
  224. cavlc_level_tab[suffix_length][i][0]= prefix+100;
  225. cavlc_level_tab[suffix_length][i][1]= prefix + 1;
  226. }else{
  227. cavlc_level_tab[suffix_length][i][0]= LEVEL_TAB_BITS+100;
  228. cavlc_level_tab[suffix_length][i][1]= LEVEL_TAB_BITS;
  229. }
  230. }
  231. }
  232. }
  233. av_cold void ff_h264_decode_init_vlc(void){
  234. static int done = 0;
  235. if (!done) {
  236. int i;
  237. int offset;
  238. done = 1;
  239. chroma_dc_coeff_token_vlc.table = chroma_dc_coeff_token_vlc_table;
  240. chroma_dc_coeff_token_vlc.table_allocated = chroma_dc_coeff_token_vlc_table_size;
  241. init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
  242. &chroma_dc_coeff_token_len [0], 1, 1,
  243. &chroma_dc_coeff_token_bits[0], 1, 1,
  244. INIT_VLC_USE_NEW_STATIC);
  245. offset = 0;
  246. for(i=0; i<4; i++){
  247. coeff_token_vlc[i].table = coeff_token_vlc_tables+offset;
  248. coeff_token_vlc[i].table_allocated = coeff_token_vlc_tables_size[i];
  249. init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
  250. &coeff_token_len [i][0], 1, 1,
  251. &coeff_token_bits[i][0], 1, 1,
  252. INIT_VLC_USE_NEW_STATIC);
  253. offset += coeff_token_vlc_tables_size[i];
  254. }
  255. /*
  256. * This is a one time safety check to make sure that
  257. * the packed static coeff_token_vlc table sizes
  258. * were initialized correctly.
  259. */
  260. assert(offset == FF_ARRAY_ELEMS(coeff_token_vlc_tables));
  261. for(i=0; i<3; i++){
  262. chroma_dc_total_zeros_vlc[i].table = chroma_dc_total_zeros_vlc_tables[i];
  263. chroma_dc_total_zeros_vlc[i].table_allocated = chroma_dc_total_zeros_vlc_tables_size;
  264. init_vlc(&chroma_dc_total_zeros_vlc[i],
  265. CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
  266. &chroma_dc_total_zeros_len [i][0], 1, 1,
  267. &chroma_dc_total_zeros_bits[i][0], 1, 1,
  268. INIT_VLC_USE_NEW_STATIC);
  269. }
  270. for(i=0; i<15; i++){
  271. total_zeros_vlc[i].table = total_zeros_vlc_tables[i];
  272. total_zeros_vlc[i].table_allocated = total_zeros_vlc_tables_size;
  273. init_vlc(&total_zeros_vlc[i],
  274. TOTAL_ZEROS_VLC_BITS, 16,
  275. &total_zeros_len [i][0], 1, 1,
  276. &total_zeros_bits[i][0], 1, 1,
  277. INIT_VLC_USE_NEW_STATIC);
  278. }
  279. for(i=0; i<6; i++){
  280. run_vlc[i].table = run_vlc_tables[i];
  281. run_vlc[i].table_allocated = run_vlc_tables_size;
  282. init_vlc(&run_vlc[i],
  283. RUN_VLC_BITS, 7,
  284. &run_len [i][0], 1, 1,
  285. &run_bits[i][0], 1, 1,
  286. INIT_VLC_USE_NEW_STATIC);
  287. }
  288. run7_vlc.table = run7_vlc_table,
  289. run7_vlc.table_allocated = run7_vlc_table_size;
  290. init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
  291. &run_len [6][0], 1, 1,
  292. &run_bits[6][0], 1, 1,
  293. INIT_VLC_USE_NEW_STATIC);
  294. init_cavlc_level_tab();
  295. }
  296. }
  297. /**
  298. *
  299. */
  300. static inline int get_level_prefix(GetBitContext *gb){
  301. unsigned int buf;
  302. int log;
  303. OPEN_READER(re, gb);
  304. UPDATE_CACHE(re, gb);
  305. buf=GET_CACHE(re, gb);
  306. log= 32 - av_log2(buf);
  307. #ifdef TRACE
  308. print_bin(buf>>(32-log), log);
  309. av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d lpr @%5d in %s get_level_prefix\n", buf>>(32-log), log, log-1, get_bits_count(gb), __FILE__);
  310. #endif
  311. LAST_SKIP_BITS(re, gb, log);
  312. CLOSE_READER(re, gb);
  313. return log-1;
  314. }
  315. /**
  316. * decodes a residual block.
  317. * @param n block index
  318. * @param scantable scantable
  319. * @param max_coeff number of coefficients in the block
  320. * @return <0 if an error occurred
  321. */
  322. static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff){
  323. MpegEncContext * const s = &h->s;
  324. static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
  325. int level[16];
  326. int zeros_left, coeff_token, total_coeff, i, trailing_ones, run_before;
  327. //FIXME put trailing_onex into the context
  328. if(max_coeff <= 8){
  329. coeff_token= get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1);
  330. total_coeff= coeff_token>>2;
  331. }else{
  332. if(n >= LUMA_DC_BLOCK_INDEX){
  333. total_coeff= pred_non_zero_count(h, (n - LUMA_DC_BLOCK_INDEX)*16);
  334. coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  335. total_coeff= coeff_token>>2;
  336. }else{
  337. total_coeff= pred_non_zero_count(h, n);
  338. coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  339. total_coeff= coeff_token>>2;
  340. }
  341. }
  342. h->non_zero_count_cache[ scan8[n] ]= total_coeff;
  343. //FIXME set last_non_zero?
  344. if(total_coeff==0)
  345. return 0;
  346. if(total_coeff > (unsigned)max_coeff) {
  347. av_log(h->s.avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", s->mb_x, s->mb_y, total_coeff);
  348. return -1;
  349. }
  350. trailing_ones= coeff_token&3;
  351. tprintf(h->s.avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff);
  352. assert(total_coeff<=16);
  353. i = show_bits(gb, 3);
  354. skip_bits(gb, trailing_ones);
  355. level[0] = 1-((i&4)>>1);
  356. level[1] = 1-((i&2) );
  357. level[2] = 1-((i&1)<<1);
  358. if(trailing_ones<total_coeff) {
  359. int mask, prefix;
  360. int suffix_length = total_coeff > 10 & trailing_ones < 3;
  361. int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  362. int level_code= cavlc_level_tab[suffix_length][bitsi][0];
  363. skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  364. if(level_code >= 100){
  365. prefix= level_code - 100;
  366. if(prefix == LEVEL_TAB_BITS)
  367. prefix += get_level_prefix(gb);
  368. //first coefficient has suffix_length equal to 0 or 1
  369. if(prefix<14){ //FIXME try to build a large unified VLC table for all this
  370. if(suffix_length)
  371. level_code= (prefix<<1) + get_bits1(gb); //part
  372. else
  373. level_code= prefix; //part
  374. }else if(prefix==14){
  375. if(suffix_length)
  376. level_code= (prefix<<1) + get_bits1(gb); //part
  377. else
  378. level_code= prefix + get_bits(gb, 4); //part
  379. }else{
  380. level_code= 30 + get_bits(gb, prefix-3); //part
  381. if(prefix>=16){
  382. if(prefix > 25+3){
  383. av_log(h->s.avctx, AV_LOG_ERROR, "Invalid level prefix\n");
  384. return -1;
  385. }
  386. level_code += (1<<(prefix-3))-4096;
  387. }
  388. }
  389. if(trailing_ones < 3) level_code += 2;
  390. suffix_length = 2;
  391. mask= -(level_code&1);
  392. level[trailing_ones]= (((2+level_code)>>1) ^ mask) - mask;
  393. }else{
  394. level_code += ((level_code>>31)|1) & -(trailing_ones < 3);
  395. suffix_length = 1 + (level_code + 3U > 6U);
  396. level[trailing_ones]= level_code;
  397. }
  398. //remaining coefficients have suffix_length > 0
  399. for(i=trailing_ones+1;i<total_coeff;i++) {
  400. static const unsigned int suffix_limit[7] = {0,3,6,12,24,48,INT_MAX };
  401. int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  402. level_code= cavlc_level_tab[suffix_length][bitsi][0];
  403. skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  404. if(level_code >= 100){
  405. prefix= level_code - 100;
  406. if(prefix == LEVEL_TAB_BITS){
  407. prefix += get_level_prefix(gb);
  408. }
  409. if(prefix<15){
  410. level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
  411. }else{
  412. level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
  413. if(prefix>=16)
  414. level_code += (1<<(prefix-3))-4096;
  415. }
  416. mask= -(level_code&1);
  417. level_code= (((2+level_code)>>1) ^ mask) - mask;
  418. }
  419. level[i]= level_code;
  420. suffix_length+= suffix_limit[suffix_length] + level_code > 2U*suffix_limit[suffix_length];
  421. }
  422. }
  423. if(total_coeff == max_coeff)
  424. zeros_left=0;
  425. else{
  426. /* FIXME: we don't actually support 4:2:2 yet. */
  427. if(max_coeff <= 8)
  428. zeros_left= get_vlc2(gb, (chroma_dc_total_zeros_vlc-1)[ total_coeff ].table, CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1);
  429. else
  430. zeros_left= get_vlc2(gb, (total_zeros_vlc-1)[ total_coeff ].table, TOTAL_ZEROS_VLC_BITS, 1);
  431. }
  432. #define STORE_BLOCK(type) \
  433. scantable += zeros_left + total_coeff - 1; \
  434. if(n >= LUMA_DC_BLOCK_INDEX){ \
  435. ((type*)block)[*scantable] = level[0]; \
  436. for(i=1;i<total_coeff && zeros_left > 0;i++) { \
  437. if(zeros_left < 7) \
  438. run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1); \
  439. else \
  440. run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
  441. zeros_left -= run_before; \
  442. scantable -= 1 + run_before; \
  443. ((type*)block)[*scantable]= level[i]; \
  444. } \
  445. for(;i<total_coeff;i++) { \
  446. scantable--; \
  447. ((type*)block)[*scantable]= level[i]; \
  448. } \
  449. }else{ \
  450. ((type*)block)[*scantable] = ((int)(level[0] * qmul[*scantable] + 32))>>6; \
  451. for(i=1;i<total_coeff && zeros_left > 0;i++) { \
  452. if(zeros_left < 7) \
  453. run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1); \
  454. else \
  455. run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
  456. zeros_left -= run_before; \
  457. scantable -= 1 + run_before; \
  458. ((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
  459. } \
  460. for(;i<total_coeff;i++) { \
  461. scantable--; \
  462. ((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
  463. } \
  464. }
  465. if (h->pixel_shift) {
  466. STORE_BLOCK(int32_t)
  467. } else {
  468. STORE_BLOCK(int16_t)
  469. }
  470. if(zeros_left<0){
  471. av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y);
  472. return -1;
  473. }
  474. return 0;
  475. }
  476. static av_always_inline int decode_luma_residual(H264Context *h, GetBitContext *gb, const uint8_t *scan, const uint8_t *scan8x8, int pixel_shift, int mb_type, int cbp, int p){
  477. int i4x4, i8x8;
  478. MpegEncContext * const s = &h->s;
  479. int qscale = p == 0 ? s->qscale : h->chroma_qp[p-1];
  480. if(IS_INTRA16x16(mb_type)){
  481. AV_ZERO128(h->mb_luma_dc[p]+0);
  482. AV_ZERO128(h->mb_luma_dc[p]+8);
  483. AV_ZERO128(h->mb_luma_dc[p]+16);
  484. AV_ZERO128(h->mb_luma_dc[p]+24);
  485. if( decode_residual(h, h->intra_gb_ptr, h->mb_luma_dc[p], LUMA_DC_BLOCK_INDEX+p, scan, NULL, 16) < 0){
  486. return -1; //FIXME continue if partitioned and other return -1 too
  487. }
  488. assert((cbp&15) == 0 || (cbp&15) == 15);
  489. if(cbp&15){
  490. for(i8x8=0; i8x8<4; i8x8++){
  491. for(i4x4=0; i4x4<4; i4x4++){
  492. const int index= i4x4 + 4*i8x8 + p*16;
  493. if( decode_residual(h, h->intra_gb_ptr, h->mb + (16*index << pixel_shift),
  494. index, scan + 1, h->dequant4_coeff[p][qscale], 15) < 0 ){
  495. return -1;
  496. }
  497. }
  498. }
  499. return 0xf;
  500. }else{
  501. fill_rectangle(&h->non_zero_count_cache[scan8[p*16]], 4, 4, 8, 0, 1);
  502. return 0;
  503. }
  504. }else{
  505. int cqm = (IS_INTRA( mb_type ) ? 0:3)+p;
  506. /* For CAVLC 4:4:4, we need to keep track of the luma 8x8 CBP for deblocking nnz purposes. */
  507. int new_cbp = 0;
  508. for(i8x8=0; i8x8<4; i8x8++){
  509. if(cbp & (1<<i8x8)){
  510. if(IS_8x8DCT(mb_type)){
  511. DCTELEM *buf = &h->mb[64*i8x8+256*p << pixel_shift];
  512. uint8_t *nnz;
  513. for(i4x4=0; i4x4<4; i4x4++){
  514. const int index= i4x4 + 4*i8x8 + p*16;
  515. if( decode_residual(h, gb, buf, index, scan8x8+16*i4x4,
  516. h->dequant8_coeff[cqm][qscale], 16) < 0 )
  517. return -1;
  518. }
  519. nnz= &h->non_zero_count_cache[ scan8[4*i8x8+p*16] ];
  520. nnz[0] += nnz[1] + nnz[8] + nnz[9];
  521. new_cbp |= !!nnz[0] << i8x8;
  522. }else{
  523. for(i4x4=0; i4x4<4; i4x4++){
  524. const int index= i4x4 + 4*i8x8 + p*16;
  525. if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index,
  526. scan, h->dequant4_coeff[cqm][qscale], 16) < 0 ){
  527. return -1;
  528. }
  529. new_cbp |= h->non_zero_count_cache[ scan8[index] ] << i8x8;
  530. }
  531. }
  532. }else{
  533. uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8+p*16] ];
  534. nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
  535. }
  536. }
  537. return new_cbp;
  538. }
  539. }
  540. int ff_h264_decode_mb_cavlc(H264Context *h){
  541. MpegEncContext * const s = &h->s;
  542. int mb_xy;
  543. int partition_count;
  544. unsigned int mb_type, cbp;
  545. int dct8x8_allowed= h->pps.transform_8x8_mode;
  546. int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
  547. const int pixel_shift = h->pixel_shift;
  548. mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
  549. tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
  550. cbp = 0; /* avoid warning. FIXME: find a solution without slowing
  551. down the code */
  552. if(h->slice_type_nos != AV_PICTURE_TYPE_I){
  553. if(s->mb_skip_run==-1)
  554. s->mb_skip_run= get_ue_golomb(&s->gb);
  555. if (s->mb_skip_run--) {
  556. if(FRAME_MBAFF && (s->mb_y&1) == 0){
  557. if(s->mb_skip_run==0)
  558. h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
  559. }
  560. decode_mb_skip(h);
  561. return 0;
  562. }
  563. }
  564. if(FRAME_MBAFF){
  565. if( (s->mb_y&1) == 0 )
  566. h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb);
  567. }
  568. h->prev_mb_skipped= 0;
  569. mb_type= get_ue_golomb(&s->gb);
  570. if(h->slice_type_nos == AV_PICTURE_TYPE_B){
  571. if(mb_type < 23){
  572. partition_count= b_mb_type_info[mb_type].partition_count;
  573. mb_type= b_mb_type_info[mb_type].type;
  574. }else{
  575. mb_type -= 23;
  576. goto decode_intra_mb;
  577. }
  578. }else if(h->slice_type_nos == AV_PICTURE_TYPE_P){
  579. if(mb_type < 5){
  580. partition_count= p_mb_type_info[mb_type].partition_count;
  581. mb_type= p_mb_type_info[mb_type].type;
  582. }else{
  583. mb_type -= 5;
  584. goto decode_intra_mb;
  585. }
  586. }else{
  587. assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
  588. if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
  589. mb_type--;
  590. decode_intra_mb:
  591. if(mb_type > 25){
  592. av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), s->mb_x, s->mb_y);
  593. return -1;
  594. }
  595. partition_count=0;
  596. cbp= i_mb_type_info[mb_type].cbp;
  597. h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode;
  598. mb_type= i_mb_type_info[mb_type].type;
  599. }
  600. if(MB_FIELD)
  601. mb_type |= MB_TYPE_INTERLACED;
  602. h->slice_table[ mb_xy ]= h->slice_num;
  603. if(IS_INTRA_PCM(mb_type)){
  604. unsigned int x;
  605. static const uint16_t mb_sizes[4] = {256,384,512,768};
  606. const int mb_size = mb_sizes[h->sps.chroma_format_idc]*h->sps.bit_depth_luma >> 3;
  607. // We assume these blocks are very rare so we do not optimize it.
  608. align_get_bits(&s->gb);
  609. // The pixels are stored in the same order as levels in h->mb array.
  610. for(x=0; x < mb_size; x++){
  611. ((uint8_t*)h->mb)[x]= get_bits(&s->gb, 8);
  612. }
  613. // In deblocking, the quantizer is 0
  614. s->current_picture.qscale_table[mb_xy]= 0;
  615. // All coeffs are present
  616. memset(h->non_zero_count[mb_xy], 16, 48);
  617. s->current_picture.mb_type[mb_xy]= mb_type;
  618. return 0;
  619. }
  620. if(MB_MBAFF){
  621. h->ref_count[0] <<= 1;
  622. h->ref_count[1] <<= 1;
  623. }
  624. fill_decode_neighbors(h, mb_type);
  625. fill_decode_caches(h, mb_type);
  626. //mb_pred
  627. if(IS_INTRA(mb_type)){
  628. int pred_mode;
  629. // init_top_left_availability(h);
  630. if(IS_INTRA4x4(mb_type)){
  631. int i;
  632. int di = 1;
  633. if(dct8x8_allowed && get_bits1(&s->gb)){
  634. mb_type |= MB_TYPE_8x8DCT;
  635. di = 4;
  636. }
  637. // fill_intra4x4_pred_table(h);
  638. for(i=0; i<16; i+=di){
  639. int mode= pred_intra_mode(h, i);
  640. if(!get_bits1(&s->gb)){
  641. const int rem_mode= get_bits(&s->gb, 3);
  642. mode = rem_mode + (rem_mode >= mode);
  643. }
  644. if(di==4)
  645. fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 );
  646. else
  647. h->intra4x4_pred_mode_cache[ scan8[i] ] = mode;
  648. }
  649. ff_h264_write_back_intra_pred_mode(h);
  650. if( ff_h264_check_intra4x4_pred_mode(h) < 0)
  651. return -1;
  652. }else{
  653. h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode);
  654. if(h->intra16x16_pred_mode < 0)
  655. return -1;
  656. }
  657. if(decode_chroma){
  658. pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb));
  659. if(pred_mode < 0)
  660. return -1;
  661. h->chroma_pred_mode= pred_mode;
  662. } else {
  663. h->chroma_pred_mode = DC_128_PRED8x8;
  664. }
  665. }else if(partition_count==4){
  666. int i, j, sub_partition_count[4], list, ref[2][4];
  667. if(h->slice_type_nos == AV_PICTURE_TYPE_B){
  668. for(i=0; i<4; i++){
  669. h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
  670. if(h->sub_mb_type[i] >=13){
  671. av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
  672. return -1;
  673. }
  674. sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
  675. h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type;
  676. }
  677. if( IS_DIRECT(h->sub_mb_type[0]|h->sub_mb_type[1]|h->sub_mb_type[2]|h->sub_mb_type[3])) {
  678. ff_h264_pred_direct_motion(h, &mb_type);
  679. h->ref_cache[0][scan8[4]] =
  680. h->ref_cache[1][scan8[4]] =
  681. h->ref_cache[0][scan8[12]] =
  682. h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
  683. }
  684. }else{
  685. assert(h->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
  686. for(i=0; i<4; i++){
  687. h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
  688. if(h->sub_mb_type[i] >=4){
  689. av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y);
  690. return -1;
  691. }
  692. sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
  693. h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type;
  694. }
  695. }
  696. for(list=0; list<h->list_count; list++){
  697. int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list];
  698. for(i=0; i<4; i++){
  699. if(IS_DIRECT(h->sub_mb_type[i])) continue;
  700. if(IS_DIR(h->sub_mb_type[i], 0, list)){
  701. unsigned int tmp;
  702. if(ref_count == 1){
  703. tmp= 0;
  704. }else if(ref_count == 2){
  705. tmp= get_bits1(&s->gb)^1;
  706. }else{
  707. tmp= get_ue_golomb_31(&s->gb);
  708. if(tmp>=ref_count){
  709. av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp);
  710. return -1;
  711. }
  712. }
  713. ref[list][i]= tmp;
  714. }else{
  715. //FIXME
  716. ref[list][i] = -1;
  717. }
  718. }
  719. }
  720. if(dct8x8_allowed)
  721. dct8x8_allowed = get_dct8x8_allowed(h);
  722. for(list=0; list<h->list_count; list++){
  723. for(i=0; i<4; i++){
  724. if(IS_DIRECT(h->sub_mb_type[i])) {
  725. h->ref_cache[list][ scan8[4*i] ] = h->ref_cache[list][ scan8[4*i]+1 ];
  726. continue;
  727. }
  728. h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]=
  729. h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
  730. if(IS_DIR(h->sub_mb_type[i], 0, list)){
  731. const int sub_mb_type= h->sub_mb_type[i];
  732. const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
  733. for(j=0; j<sub_partition_count[i]; j++){
  734. int mx, my;
  735. const int index= 4*i + block_width*j;
  736. int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ];
  737. pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my);
  738. mx += get_se_golomb(&s->gb);
  739. my += get_se_golomb(&s->gb);
  740. tprintf(s->avctx, "final mv:%d %d\n", mx, my);
  741. if(IS_SUB_8X8(sub_mb_type)){
  742. mv_cache[ 1 ][0]=
  743. mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
  744. mv_cache[ 1 ][1]=
  745. mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
  746. }else if(IS_SUB_8X4(sub_mb_type)){
  747. mv_cache[ 1 ][0]= mx;
  748. mv_cache[ 1 ][1]= my;
  749. }else if(IS_SUB_4X8(sub_mb_type)){
  750. mv_cache[ 8 ][0]= mx;
  751. mv_cache[ 8 ][1]= my;
  752. }
  753. mv_cache[ 0 ][0]= mx;
  754. mv_cache[ 0 ][1]= my;
  755. }
  756. }else{
  757. uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0];
  758. p[0] = p[1]=
  759. p[8] = p[9]= 0;
  760. }
  761. }
  762. }
  763. }else if(IS_DIRECT(mb_type)){
  764. ff_h264_pred_direct_motion(h, &mb_type);
  765. dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
  766. }else{
  767. int list, mx, my, i;
  768. //FIXME we should set ref_idx_l? to 0 if we use that later ...
  769. if(IS_16X16(mb_type)){
  770. for(list=0; list<h->list_count; list++){
  771. unsigned int val;
  772. if(IS_DIR(mb_type, 0, list)){
  773. if(h->ref_count[list]==1){
  774. val= 0;
  775. }else if(h->ref_count[list]==2){
  776. val= get_bits1(&s->gb)^1;
  777. }else{
  778. val= get_ue_golomb_31(&s->gb);
  779. if(val >= h->ref_count[list]){
  780. av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  781. return -1;
  782. }
  783. }
  784. fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
  785. }
  786. }
  787. for(list=0; list<h->list_count; list++){
  788. if(IS_DIR(mb_type, 0, list)){
  789. pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my);
  790. mx += get_se_golomb(&s->gb);
  791. my += get_se_golomb(&s->gb);
  792. tprintf(s->avctx, "final mv:%d %d\n", mx, my);
  793. fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
  794. }
  795. }
  796. }
  797. else if(IS_16X8(mb_type)){
  798. for(list=0; list<h->list_count; list++){
  799. for(i=0; i<2; i++){
  800. unsigned int val;
  801. if(IS_DIR(mb_type, i, list)){
  802. if(h->ref_count[list] == 1){
  803. val= 0;
  804. }else if(h->ref_count[list] == 2){
  805. val= get_bits1(&s->gb)^1;
  806. }else{
  807. val= get_ue_golomb_31(&s->gb);
  808. if(val >= h->ref_count[list]){
  809. av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  810. return -1;
  811. }
  812. }
  813. }else
  814. val= LIST_NOT_USED&0xFF;
  815. fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
  816. }
  817. }
  818. for(list=0; list<h->list_count; list++){
  819. for(i=0; i<2; i++){
  820. unsigned int val;
  821. if(IS_DIR(mb_type, i, list)){
  822. pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my);
  823. mx += get_se_golomb(&s->gb);
  824. my += get_se_golomb(&s->gb);
  825. tprintf(s->avctx, "final mv:%d %d\n", mx, my);
  826. val= pack16to32(mx,my);
  827. }else
  828. val=0;
  829. fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4);
  830. }
  831. }
  832. }else{
  833. assert(IS_8X16(mb_type));
  834. for(list=0; list<h->list_count; list++){
  835. for(i=0; i<2; i++){
  836. unsigned int val;
  837. if(IS_DIR(mb_type, i, list)){ //FIXME optimize
  838. if(h->ref_count[list]==1){
  839. val= 0;
  840. }else if(h->ref_count[list]==2){
  841. val= get_bits1(&s->gb)^1;
  842. }else{
  843. val= get_ue_golomb_31(&s->gb);
  844. if(val >= h->ref_count[list]){
  845. av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  846. return -1;
  847. }
  848. }
  849. }else
  850. val= LIST_NOT_USED&0xFF;
  851. fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
  852. }
  853. }
  854. for(list=0; list<h->list_count; list++){
  855. for(i=0; i<2; i++){
  856. unsigned int val;
  857. if(IS_DIR(mb_type, i, list)){
  858. pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
  859. mx += get_se_golomb(&s->gb);
  860. my += get_se_golomb(&s->gb);
  861. tprintf(s->avctx, "final mv:%d %d\n", mx, my);
  862. val= pack16to32(mx,my);
  863. }else
  864. val=0;
  865. fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4);
  866. }
  867. }
  868. }
  869. }
  870. if(IS_INTER(mb_type))
  871. write_back_motion(h, mb_type);
  872. if(!IS_INTRA16x16(mb_type)){
  873. cbp= get_ue_golomb(&s->gb);
  874. if(decode_chroma){
  875. if(cbp > 47){
  876. av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y);
  877. return -1;
  878. }
  879. if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp];
  880. else cbp= golomb_to_inter_cbp [cbp];
  881. }else{
  882. if(cbp > 15){
  883. av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y);
  884. return -1;
  885. }
  886. if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
  887. else cbp= golomb_to_inter_cbp_gray[cbp];
  888. }
  889. }
  890. if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
  891. mb_type |= MB_TYPE_8x8DCT*get_bits1(&s->gb);
  892. }
  893. h->cbp=
  894. h->cbp_table[mb_xy]= cbp;
  895. s->current_picture.mb_type[mb_xy]= mb_type;
  896. if(cbp || IS_INTRA16x16(mb_type)){
  897. int i4x4, chroma_idx;
  898. int dquant;
  899. int ret;
  900. GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr;
  901. const uint8_t *scan, *scan8x8;
  902. const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8);
  903. if(IS_INTERLACED(mb_type)){
  904. scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
  905. scan= s->qscale ? h->field_scan : h->field_scan_q0;
  906. }else{
  907. scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
  908. scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
  909. }
  910. dquant= get_se_golomb(&s->gb);
  911. s->qscale += dquant;
  912. if(((unsigned)s->qscale) > max_qp){
  913. if(s->qscale<0) s->qscale+= max_qp+1;
  914. else s->qscale-= max_qp+1;
  915. if(((unsigned)s->qscale) > max_qp){
  916. av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y);
  917. return -1;
  918. }
  919. }
  920. h->chroma_qp[0]= get_chroma_qp(h, 0, s->qscale);
  921. h->chroma_qp[1]= get_chroma_qp(h, 1, s->qscale);
  922. if( (ret = decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ){
  923. return -1;
  924. }
  925. h->cbp_table[mb_xy] |= ret << 12;
  926. if(CHROMA444){
  927. if( decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 1) < 0 ){
  928. return -1;
  929. }
  930. if( decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 2) < 0 ){
  931. return -1;
  932. }
  933. } else {
  934. if(cbp&0x30){
  935. for(chroma_idx=0; chroma_idx<2; chroma_idx++)
  936. if( decode_residual(h, gb, h->mb + ((256 + 16*16*chroma_idx) << pixel_shift), CHROMA_DC_BLOCK_INDEX+chroma_idx, chroma_dc_scan, NULL, 4) < 0){
  937. return -1;
  938. }
  939. }
  940. if(cbp&0x20){
  941. for(chroma_idx=0; chroma_idx<2; chroma_idx++){
  942. const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]];
  943. for(i4x4=0; i4x4<4; i4x4++){
  944. const int index= 16 + 16*chroma_idx + i4x4;
  945. if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){
  946. return -1;
  947. }
  948. }
  949. }
  950. }else{
  951. fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
  952. fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
  953. }
  954. }
  955. }else{
  956. fill_rectangle(&h->non_zero_count_cache[scan8[ 0]], 4, 4, 8, 0, 1);
  957. fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
  958. fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
  959. }
  960. s->current_picture.qscale_table[mb_xy]= s->qscale;
  961. write_back_non_zero_count(h);
  962. if(MB_MBAFF){
  963. h->ref_count[0] >>= 1;
  964. h->ref_count[1] >>= 1;
  965. }
  966. return 0;
  967. }