You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1192 lines
45KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... cavlc bitstream decoding
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG-4 part10 cavlc bitstream decoding.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #define CABAC(h) 0
  27. #include "internal.h"
  28. #include "avcodec.h"
  29. #include "h264dec.h"
  30. #include "h264_mvpred.h"
  31. #include "h264data.h"
  32. #include "golomb_legacy.h"
  33. #include "mpegutils.h"
  34. #include <assert.h>
  35. static const uint8_t golomb_to_inter_cbp_gray[16]={
  36. 0, 1, 2, 4, 8, 3, 5,10,12,15, 7,11,13,14, 6, 9,
  37. };
  38. static const uint8_t golomb_to_intra4x4_cbp_gray[16]={
  39. 15, 0, 7,11,13,14, 3, 5,10,12, 1, 2, 4, 8, 6, 9,
  40. };
  41. static const uint8_t chroma_dc_coeff_token_len[4*5]={
  42. 2, 0, 0, 0,
  43. 6, 1, 0, 0,
  44. 6, 6, 3, 0,
  45. 6, 7, 7, 6,
  46. 6, 8, 8, 7,
  47. };
  48. static const uint8_t chroma_dc_coeff_token_bits[4*5]={
  49. 1, 0, 0, 0,
  50. 7, 1, 0, 0,
  51. 4, 6, 1, 0,
  52. 3, 3, 2, 5,
  53. 2, 3, 2, 0,
  54. };
  55. static const uint8_t chroma422_dc_coeff_token_len[4*9]={
  56. 1, 0, 0, 0,
  57. 7, 2, 0, 0,
  58. 7, 7, 3, 0,
  59. 9, 7, 7, 5,
  60. 9, 9, 7, 6,
  61. 10, 10, 9, 7,
  62. 11, 11, 10, 7,
  63. 12, 12, 11, 10,
  64. 13, 12, 12, 11,
  65. };
  66. static const uint8_t chroma422_dc_coeff_token_bits[4*9]={
  67. 1, 0, 0, 0,
  68. 15, 1, 0, 0,
  69. 14, 13, 1, 0,
  70. 7, 12, 11, 1,
  71. 6, 5, 10, 1,
  72. 7, 6, 4, 9,
  73. 7, 6, 5, 8,
  74. 7, 6, 5, 4,
  75. 7, 5, 4, 4,
  76. };
  77. static const uint8_t coeff_token_len[4][4*17]={
  78. {
  79. 1, 0, 0, 0,
  80. 6, 2, 0, 0, 8, 6, 3, 0, 9, 8, 7, 5, 10, 9, 8, 6,
  81. 11,10, 9, 7, 13,11,10, 8, 13,13,11, 9, 13,13,13,10,
  82. 14,14,13,11, 14,14,14,13, 15,15,14,14, 15,15,15,14,
  83. 16,15,15,15, 16,16,16,15, 16,16,16,16, 16,16,16,16,
  84. },
  85. {
  86. 2, 0, 0, 0,
  87. 6, 2, 0, 0, 6, 5, 3, 0, 7, 6, 6, 4, 8, 6, 6, 4,
  88. 8, 7, 7, 5, 9, 8, 8, 6, 11, 9, 9, 6, 11,11,11, 7,
  89. 12,11,11, 9, 12,12,12,11, 12,12,12,11, 13,13,13,12,
  90. 13,13,13,13, 13,14,13,13, 14,14,14,13, 14,14,14,14,
  91. },
  92. {
  93. 4, 0, 0, 0,
  94. 6, 4, 0, 0, 6, 5, 4, 0, 6, 5, 5, 4, 7, 5, 5, 4,
  95. 7, 5, 5, 4, 7, 6, 6, 4, 7, 6, 6, 4, 8, 7, 7, 5,
  96. 8, 8, 7, 6, 9, 8, 8, 7, 9, 9, 8, 8, 9, 9, 9, 8,
  97. 10, 9, 9, 9, 10,10,10,10, 10,10,10,10, 10,10,10,10,
  98. },
  99. {
  100. 6, 0, 0, 0,
  101. 6, 6, 0, 0, 6, 6, 6, 0, 6, 6, 6, 6, 6, 6, 6, 6,
  102. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  103. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  104. 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
  105. }
  106. };
  107. static const uint8_t coeff_token_bits[4][4*17]={
  108. {
  109. 1, 0, 0, 0,
  110. 5, 1, 0, 0, 7, 4, 1, 0, 7, 6, 5, 3, 7, 6, 5, 3,
  111. 7, 6, 5, 4, 15, 6, 5, 4, 11,14, 5, 4, 8,10,13, 4,
  112. 15,14, 9, 4, 11,10,13,12, 15,14, 9,12, 11,10,13, 8,
  113. 15, 1, 9,12, 11,14,13, 8, 7,10, 9,12, 4, 6, 5, 8,
  114. },
  115. {
  116. 3, 0, 0, 0,
  117. 11, 2, 0, 0, 7, 7, 3, 0, 7,10, 9, 5, 7, 6, 5, 4,
  118. 4, 6, 5, 6, 7, 6, 5, 8, 15, 6, 5, 4, 11,14,13, 4,
  119. 15,10, 9, 4, 11,14,13,12, 8,10, 9, 8, 15,14,13,12,
  120. 11,10, 9,12, 7,11, 6, 8, 9, 8,10, 1, 7, 6, 5, 4,
  121. },
  122. {
  123. 15, 0, 0, 0,
  124. 15,14, 0, 0, 11,15,13, 0, 8,12,14,12, 15,10,11,11,
  125. 11, 8, 9,10, 9,14,13, 9, 8,10, 9, 8, 15,14,13,13,
  126. 11,14,10,12, 15,10,13,12, 11,14, 9,12, 8,10,13, 8,
  127. 13, 7, 9,12, 9,12,11,10, 5, 8, 7, 6, 1, 4, 3, 2,
  128. },
  129. {
  130. 3, 0, 0, 0,
  131. 0, 1, 0, 0, 4, 5, 6, 0, 8, 9,10,11, 12,13,14,15,
  132. 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31,
  133. 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47,
  134. 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63,
  135. }
  136. };
  137. static const uint8_t total_zeros_len[16][16]= {
  138. {1,3,3,4,4,5,5,6,6,7,7,8,8,9,9,9},
  139. {3,3,3,3,3,4,4,4,4,5,5,6,6,6,6},
  140. {4,3,3,3,4,4,3,3,4,5,5,6,5,6},
  141. {5,3,4,4,3,3,3,4,3,4,5,5,5},
  142. {4,4,4,3,3,3,3,3,4,5,4,5},
  143. {6,5,3,3,3,3,3,3,4,3,6},
  144. {6,5,3,3,3,2,3,4,3,6},
  145. {6,4,5,3,2,2,3,3,6},
  146. {6,6,4,2,2,3,2,5},
  147. {5,5,3,2,2,2,4},
  148. {4,4,3,3,1,3},
  149. {4,4,2,1,3},
  150. {3,3,1,2},
  151. {2,2,1},
  152. {1,1},
  153. };
  154. static const uint8_t total_zeros_bits[16][16]= {
  155. {1,3,2,3,2,3,2,3,2,3,2,3,2,3,2,1},
  156. {7,6,5,4,3,5,4,3,2,3,2,3,2,1,0},
  157. {5,7,6,5,4,3,4,3,2,3,2,1,1,0},
  158. {3,7,5,4,6,5,4,3,3,2,2,1,0},
  159. {5,4,3,7,6,5,4,3,2,1,1,0},
  160. {1,1,7,6,5,4,3,2,1,1,0},
  161. {1,1,5,4,3,3,2,1,1,0},
  162. {1,1,1,3,3,2,2,1,0},
  163. {1,0,1,3,2,1,1,1},
  164. {1,0,1,3,2,1,1},
  165. {0,1,1,2,1,3},
  166. {0,1,1,1,1},
  167. {0,1,1,1},
  168. {0,1,1},
  169. {0,1},
  170. };
  171. static const uint8_t chroma_dc_total_zeros_len[3][4]= {
  172. { 1, 2, 3, 3,},
  173. { 1, 2, 2, 0,},
  174. { 1, 1, 0, 0,},
  175. };
  176. static const uint8_t chroma_dc_total_zeros_bits[3][4]= {
  177. { 1, 1, 1, 0,},
  178. { 1, 1, 0, 0,},
  179. { 1, 0, 0, 0,},
  180. };
  181. static const uint8_t chroma422_dc_total_zeros_len[7][8]= {
  182. { 1, 3, 3, 4, 4, 4, 5, 5 },
  183. { 3, 2, 3, 3, 3, 3, 3 },
  184. { 3, 3, 2, 2, 3, 3 },
  185. { 3, 2, 2, 2, 3 },
  186. { 2, 2, 2, 2 },
  187. { 2, 2, 1 },
  188. { 1, 1 },
  189. };
  190. static const uint8_t chroma422_dc_total_zeros_bits[7][8]= {
  191. { 1, 2, 3, 2, 3, 1, 1, 0 },
  192. { 0, 1, 1, 4, 5, 6, 7 },
  193. { 0, 1, 1, 2, 6, 7 },
  194. { 6, 0, 1, 2, 7 },
  195. { 0, 1, 2, 3 },
  196. { 0, 1, 1 },
  197. { 0, 1 },
  198. };
  199. static const uint8_t run_len[7][16]={
  200. {1,1},
  201. {1,2,2},
  202. {2,2,2,2},
  203. {2,2,2,3,3},
  204. {2,2,3,3,3,3},
  205. {2,3,3,3,3,3,3},
  206. {3,3,3,3,3,3,3,4,5,6,7,8,9,10,11},
  207. };
  208. static const uint8_t run_bits[7][16]={
  209. {1,0},
  210. {1,1,0},
  211. {3,2,1,0},
  212. {3,2,1,1,0},
  213. {3,2,3,2,1,0},
  214. {3,0,1,3,2,5,4},
  215. {7,6,5,4,3,2,1,1,1,1,1,1,1,1,1},
  216. };
  217. static VLC coeff_token_vlc[4];
  218. static VLC_TYPE coeff_token_vlc_tables[520+332+280+256][2];
  219. static const int coeff_token_vlc_tables_size[4]={520,332,280,256};
  220. static VLC chroma_dc_coeff_token_vlc;
  221. static VLC_TYPE chroma_dc_coeff_token_vlc_table[256][2];
  222. static const int chroma_dc_coeff_token_vlc_table_size = 256;
  223. static VLC chroma422_dc_coeff_token_vlc;
  224. static VLC_TYPE chroma422_dc_coeff_token_vlc_table[8192][2];
  225. static const int chroma422_dc_coeff_token_vlc_table_size = 8192;
  226. static VLC total_zeros_vlc[15];
  227. static VLC_TYPE total_zeros_vlc_tables[15][512][2];
  228. static const int total_zeros_vlc_tables_size = 512;
  229. static VLC chroma_dc_total_zeros_vlc[3];
  230. static VLC_TYPE chroma_dc_total_zeros_vlc_tables[3][8][2];
  231. static const int chroma_dc_total_zeros_vlc_tables_size = 8;
  232. static VLC chroma422_dc_total_zeros_vlc[7];
  233. static VLC_TYPE chroma422_dc_total_zeros_vlc_tables[7][32][2];
  234. static const int chroma422_dc_total_zeros_vlc_tables_size = 32;
  235. static VLC run_vlc[6];
  236. static VLC_TYPE run_vlc_tables[6][8][2];
  237. static const int run_vlc_tables_size = 8;
  238. static VLC run7_vlc;
  239. static VLC_TYPE run7_vlc_table[96][2];
  240. static const int run7_vlc_table_size = 96;
  241. #define LEVEL_TAB_BITS 8
  242. static int8_t cavlc_level_tab[7][1<<LEVEL_TAB_BITS][2];
  243. #define CHROMA_DC_COEFF_TOKEN_VLC_BITS 8
  244. #define CHROMA422_DC_COEFF_TOKEN_VLC_BITS 13
  245. #define COEFF_TOKEN_VLC_BITS 8
  246. #define TOTAL_ZEROS_VLC_BITS 9
  247. #define CHROMA_DC_TOTAL_ZEROS_VLC_BITS 3
  248. #define CHROMA422_DC_TOTAL_ZEROS_VLC_BITS 5
  249. #define RUN_VLC_BITS 3
  250. #define RUN7_VLC_BITS 6
  251. /**
  252. * Get the predicted number of non-zero coefficients.
  253. * @param n block index
  254. */
  255. static inline int pred_non_zero_count(const H264Context *h, H264SliceContext *sl, int n)
  256. {
  257. const int index8= scan8[n];
  258. const int left = sl->non_zero_count_cache[index8 - 1];
  259. const int top = sl->non_zero_count_cache[index8 - 8];
  260. int i= left + top;
  261. if(i<64) i= (i+1)>>1;
  262. ff_tlog(h->avctx, "pred_nnz L%X T%X n%d s%d P%X\n", left, top, n, scan8[n], i&31);
  263. return i&31;
  264. }
  265. static av_cold void init_cavlc_level_tab(void){
  266. int suffix_length;
  267. unsigned int i;
  268. for(suffix_length=0; suffix_length<7; suffix_length++){
  269. for(i=0; i<(1<<LEVEL_TAB_BITS); i++){
  270. int prefix= LEVEL_TAB_BITS - av_log2(2*i);
  271. if(prefix + 1 + suffix_length <= LEVEL_TAB_BITS){
  272. int level_code = (prefix << suffix_length) +
  273. (i >> (av_log2(i) - suffix_length)) - (1 << suffix_length);
  274. int mask = -(level_code&1);
  275. level_code = (((2 + level_code) >> 1) ^ mask) - mask;
  276. cavlc_level_tab[suffix_length][i][0]= level_code;
  277. cavlc_level_tab[suffix_length][i][1]= prefix + 1 + suffix_length;
  278. }else if(prefix + 1 <= LEVEL_TAB_BITS){
  279. cavlc_level_tab[suffix_length][i][0]= prefix+100;
  280. cavlc_level_tab[suffix_length][i][1]= prefix + 1;
  281. }else{
  282. cavlc_level_tab[suffix_length][i][0]= LEVEL_TAB_BITS+100;
  283. cavlc_level_tab[suffix_length][i][1]= LEVEL_TAB_BITS;
  284. }
  285. }
  286. }
  287. }
  288. av_cold void ff_h264_decode_init_vlc(void){
  289. static int done = 0;
  290. if (!done) {
  291. int i;
  292. int offset;
  293. done = 1;
  294. chroma_dc_coeff_token_vlc.table = chroma_dc_coeff_token_vlc_table;
  295. chroma_dc_coeff_token_vlc.table_allocated = chroma_dc_coeff_token_vlc_table_size;
  296. init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5,
  297. &chroma_dc_coeff_token_len [0], 1, 1,
  298. &chroma_dc_coeff_token_bits[0], 1, 1,
  299. INIT_VLC_USE_NEW_STATIC);
  300. chroma422_dc_coeff_token_vlc.table = chroma422_dc_coeff_token_vlc_table;
  301. chroma422_dc_coeff_token_vlc.table_allocated = chroma422_dc_coeff_token_vlc_table_size;
  302. init_vlc(&chroma422_dc_coeff_token_vlc, CHROMA422_DC_COEFF_TOKEN_VLC_BITS, 4*9,
  303. &chroma422_dc_coeff_token_len [0], 1, 1,
  304. &chroma422_dc_coeff_token_bits[0], 1, 1,
  305. INIT_VLC_USE_NEW_STATIC);
  306. offset = 0;
  307. for(i=0; i<4; i++){
  308. coeff_token_vlc[i].table = coeff_token_vlc_tables+offset;
  309. coeff_token_vlc[i].table_allocated = coeff_token_vlc_tables_size[i];
  310. init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17,
  311. &coeff_token_len [i][0], 1, 1,
  312. &coeff_token_bits[i][0], 1, 1,
  313. INIT_VLC_USE_NEW_STATIC);
  314. offset += coeff_token_vlc_tables_size[i];
  315. }
  316. /*
  317. * This is a one time safety check to make sure that
  318. * the packed static coeff_token_vlc table sizes
  319. * were initialized correctly.
  320. */
  321. assert(offset == FF_ARRAY_ELEMS(coeff_token_vlc_tables));
  322. for(i=0; i<3; i++){
  323. chroma_dc_total_zeros_vlc[i].table = chroma_dc_total_zeros_vlc_tables[i];
  324. chroma_dc_total_zeros_vlc[i].table_allocated = chroma_dc_total_zeros_vlc_tables_size;
  325. init_vlc(&chroma_dc_total_zeros_vlc[i],
  326. CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4,
  327. &chroma_dc_total_zeros_len [i][0], 1, 1,
  328. &chroma_dc_total_zeros_bits[i][0], 1, 1,
  329. INIT_VLC_USE_NEW_STATIC);
  330. }
  331. for(i=0; i<7; i++){
  332. chroma422_dc_total_zeros_vlc[i].table = chroma422_dc_total_zeros_vlc_tables[i];
  333. chroma422_dc_total_zeros_vlc[i].table_allocated = chroma422_dc_total_zeros_vlc_tables_size;
  334. init_vlc(&chroma422_dc_total_zeros_vlc[i],
  335. CHROMA422_DC_TOTAL_ZEROS_VLC_BITS, 8,
  336. &chroma422_dc_total_zeros_len [i][0], 1, 1,
  337. &chroma422_dc_total_zeros_bits[i][0], 1, 1,
  338. INIT_VLC_USE_NEW_STATIC);
  339. }
  340. for(i=0; i<15; i++){
  341. total_zeros_vlc[i].table = total_zeros_vlc_tables[i];
  342. total_zeros_vlc[i].table_allocated = total_zeros_vlc_tables_size;
  343. init_vlc(&total_zeros_vlc[i],
  344. TOTAL_ZEROS_VLC_BITS, 16,
  345. &total_zeros_len [i][0], 1, 1,
  346. &total_zeros_bits[i][0], 1, 1,
  347. INIT_VLC_USE_NEW_STATIC);
  348. }
  349. for(i=0; i<6; i++){
  350. run_vlc[i].table = run_vlc_tables[i];
  351. run_vlc[i].table_allocated = run_vlc_tables_size;
  352. init_vlc(&run_vlc[i],
  353. RUN_VLC_BITS, 7,
  354. &run_len [i][0], 1, 1,
  355. &run_bits[i][0], 1, 1,
  356. INIT_VLC_USE_NEW_STATIC);
  357. }
  358. run7_vlc.table = run7_vlc_table,
  359. run7_vlc.table_allocated = run7_vlc_table_size;
  360. init_vlc(&run7_vlc, RUN7_VLC_BITS, 16,
  361. &run_len [6][0], 1, 1,
  362. &run_bits[6][0], 1, 1,
  363. INIT_VLC_USE_NEW_STATIC);
  364. init_cavlc_level_tab();
  365. }
  366. }
  367. static inline int get_level_prefix(GetBitContext *gb){
  368. unsigned int buf;
  369. int log;
  370. OPEN_READER(re, gb);
  371. UPDATE_CACHE(re, gb);
  372. buf=GET_CACHE(re, gb);
  373. log= 32 - av_log2(buf);
  374. LAST_SKIP_BITS(re, gb, log);
  375. CLOSE_READER(re, gb);
  376. return log-1;
  377. }
  378. /**
  379. * Decode a residual block.
  380. * @param n block index
  381. * @param scantable scantable
  382. * @param max_coeff number of coefficients in the block
  383. * @return <0 if an error occurred
  384. */
  385. static int decode_residual(const H264Context *h, H264SliceContext *sl,
  386. GetBitContext *gb, int16_t *block, int n,
  387. const uint8_t *scantable, const uint32_t *qmul,
  388. int max_coeff)
  389. {
  390. static const int coeff_token_table_index[17]= {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
  391. int level[16];
  392. int zeros_left, coeff_token, total_coeff, i, trailing_ones, run_before;
  393. //FIXME put trailing_onex into the context
  394. if(max_coeff <= 8){
  395. if (max_coeff == 4)
  396. coeff_token = get_vlc2(gb, chroma_dc_coeff_token_vlc.table, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 1);
  397. else
  398. coeff_token = get_vlc2(gb, chroma422_dc_coeff_token_vlc.table, CHROMA422_DC_COEFF_TOKEN_VLC_BITS, 1);
  399. total_coeff= coeff_token>>2;
  400. }else{
  401. if(n >= LUMA_DC_BLOCK_INDEX){
  402. total_coeff= pred_non_zero_count(h, sl, (n - LUMA_DC_BLOCK_INDEX)*16);
  403. coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  404. total_coeff= coeff_token>>2;
  405. }else{
  406. total_coeff= pred_non_zero_count(h, sl, n);
  407. coeff_token= get_vlc2(gb, coeff_token_vlc[ coeff_token_table_index[total_coeff] ].table, COEFF_TOKEN_VLC_BITS, 2);
  408. total_coeff= coeff_token>>2;
  409. }
  410. }
  411. sl->non_zero_count_cache[scan8[n]] = total_coeff;
  412. //FIXME set last_non_zero?
  413. if(total_coeff==0)
  414. return 0;
  415. if(total_coeff > (unsigned)max_coeff) {
  416. av_log(h->avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", sl->mb_x, sl->mb_y, total_coeff);
  417. return -1;
  418. }
  419. trailing_ones= coeff_token&3;
  420. ff_tlog(h->avctx, "trailing:%d, total:%d\n", trailing_ones, total_coeff);
  421. assert(total_coeff<=16);
  422. i = show_bits(gb, 3);
  423. skip_bits(gb, trailing_ones);
  424. level[0] = 1-((i&4)>>1);
  425. level[1] = 1-((i&2) );
  426. level[2] = 1-((i&1)<<1);
  427. if(trailing_ones<total_coeff) {
  428. int mask, prefix;
  429. int suffix_length = total_coeff > 10 & trailing_ones < 3;
  430. int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  431. int level_code= cavlc_level_tab[suffix_length][bitsi][0];
  432. skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  433. if(level_code >= 100){
  434. prefix= level_code - 100;
  435. if(prefix == LEVEL_TAB_BITS)
  436. prefix += get_level_prefix(gb);
  437. //first coefficient has suffix_length equal to 0 or 1
  438. if(prefix<14){ //FIXME try to build a large unified VLC table for all this
  439. if(suffix_length)
  440. level_code= (prefix<<1) + get_bits1(gb); //part
  441. else
  442. level_code= prefix; //part
  443. }else if(prefix==14){
  444. if(suffix_length)
  445. level_code= (prefix<<1) + get_bits1(gb); //part
  446. else
  447. level_code= prefix + get_bits(gb, 4); //part
  448. }else{
  449. level_code= 30 + get_bits(gb, prefix-3); //part
  450. if(prefix>=16){
  451. if(prefix > 25+3){
  452. av_log(h->avctx, AV_LOG_ERROR, "Invalid level prefix\n");
  453. return -1;
  454. }
  455. level_code += (1<<(prefix-3))-4096;
  456. }
  457. }
  458. if(trailing_ones < 3) level_code += 2;
  459. suffix_length = 2;
  460. mask= -(level_code&1);
  461. level[trailing_ones]= (((2+level_code)>>1) ^ mask) - mask;
  462. }else{
  463. level_code += ((level_code>>31)|1) & -(trailing_ones < 3);
  464. suffix_length = 1 + (level_code + 3U > 6U);
  465. level[trailing_ones]= level_code;
  466. }
  467. //remaining coefficients have suffix_length > 0
  468. for(i=trailing_ones+1;i<total_coeff;i++) {
  469. static const unsigned int suffix_limit[7] = {0,3,6,12,24,48,INT_MAX };
  470. int bitsi= show_bits(gb, LEVEL_TAB_BITS);
  471. level_code= cavlc_level_tab[suffix_length][bitsi][0];
  472. skip_bits(gb, cavlc_level_tab[suffix_length][bitsi][1]);
  473. if(level_code >= 100){
  474. prefix= level_code - 100;
  475. if(prefix == LEVEL_TAB_BITS){
  476. prefix += get_level_prefix(gb);
  477. }
  478. if(prefix<15){
  479. level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
  480. }else{
  481. level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
  482. if(prefix>=16)
  483. level_code += (1<<(prefix-3))-4096;
  484. }
  485. mask= -(level_code&1);
  486. level_code= (((2+level_code)>>1) ^ mask) - mask;
  487. }
  488. level[i]= level_code;
  489. suffix_length+= suffix_limit[suffix_length] + level_code > 2U*suffix_limit[suffix_length];
  490. }
  491. }
  492. if(total_coeff == max_coeff)
  493. zeros_left=0;
  494. else{
  495. if (max_coeff <= 8) {
  496. if (max_coeff == 4)
  497. zeros_left = get_vlc2(gb, chroma_dc_total_zeros_vlc[total_coeff - 1].table,
  498. CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 1);
  499. else
  500. zeros_left = get_vlc2(gb, chroma422_dc_total_zeros_vlc[total_coeff - 1].table,
  501. CHROMA422_DC_TOTAL_ZEROS_VLC_BITS, 1);
  502. } else {
  503. zeros_left= get_vlc2(gb, total_zeros_vlc[total_coeff - 1].table, TOTAL_ZEROS_VLC_BITS, 1);
  504. }
  505. }
  506. #define STORE_BLOCK(type) \
  507. scantable += zeros_left + total_coeff - 1; \
  508. if(n >= LUMA_DC_BLOCK_INDEX){ \
  509. ((type*)block)[*scantable] = level[0]; \
  510. for(i=1;i<total_coeff && zeros_left > 0;i++) { \
  511. if(zeros_left < 7) \
  512. run_before= get_vlc2(gb, run_vlc[zeros_left - 1].table, RUN_VLC_BITS, 1); \
  513. else {\
  514. run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
  515. run_before = FFMIN(zeros_left, run_before);\
  516. }\
  517. zeros_left -= run_before; \
  518. scantable -= 1 + run_before; \
  519. ((type*)block)[*scantable]= level[i]; \
  520. } \
  521. for(;i<total_coeff;i++) { \
  522. scantable--; \
  523. ((type*)block)[*scantable]= level[i]; \
  524. } \
  525. }else{ \
  526. ((type*)block)[*scantable] = ((int)(level[0] * qmul[*scantable] + 32))>>6; \
  527. for(i=1;i<total_coeff && zeros_left > 0;i++) { \
  528. if(zeros_left < 7) \
  529. run_before= get_vlc2(gb, run_vlc[zeros_left - 1].table, RUN_VLC_BITS, 1); \
  530. else {\
  531. run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
  532. run_before = FFMIN(zeros_left, run_before);\
  533. }\
  534. zeros_left -= run_before; \
  535. scantable -= 1 + run_before; \
  536. ((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
  537. } \
  538. for(;i<total_coeff;i++) { \
  539. scantable--; \
  540. ((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
  541. } \
  542. }
  543. if (zeros_left < 0) {
  544. av_log(h->avctx, AV_LOG_ERROR,
  545. "negative number of zero coeffs at %d %d\n", sl->mb_x, sl->mb_y);
  546. return AVERROR_INVALIDDATA;
  547. }
  548. if (h->pixel_shift) {
  549. STORE_BLOCK(int32_t)
  550. } else {
  551. STORE_BLOCK(int16_t)
  552. }
  553. return 0;
  554. }
  555. static av_always_inline
  556. int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
  557. GetBitContext *gb, const uint8_t *scan,
  558. const uint8_t *scan8x8, int pixel_shift,
  559. int mb_type, int cbp, int p)
  560. {
  561. int i4x4, i8x8;
  562. int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1];
  563. if(IS_INTRA16x16(mb_type)){
  564. AV_ZERO128(sl->mb_luma_dc[p]+0);
  565. AV_ZERO128(sl->mb_luma_dc[p]+8);
  566. AV_ZERO128(sl->mb_luma_dc[p]+16);
  567. AV_ZERO128(sl->mb_luma_dc[p]+24);
  568. if (decode_residual(h, sl, gb, sl->mb_luma_dc[p], LUMA_DC_BLOCK_INDEX + p, scan, NULL, 16) < 0) {
  569. return -1; //FIXME continue if partitioned and other return -1 too
  570. }
  571. assert((cbp&15) == 0 || (cbp&15) == 15);
  572. if(cbp&15){
  573. for(i8x8=0; i8x8<4; i8x8++){
  574. for(i4x4=0; i4x4<4; i4x4++){
  575. const int index= i4x4 + 4*i8x8 + p*16;
  576. if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift),
  577. index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale], 15) < 0 ){
  578. return -1;
  579. }
  580. }
  581. }
  582. return 0xf;
  583. }else{
  584. fill_rectangle(&sl->non_zero_count_cache[scan8[p*16]], 4, 4, 8, 0, 1);
  585. return 0;
  586. }
  587. }else{
  588. int cqm = (IS_INTRA( mb_type ) ? 0:3)+p;
  589. /* For CAVLC 4:4:4, we need to keep track of the luma 8x8 CBP for deblocking nnz purposes. */
  590. int new_cbp = 0;
  591. for(i8x8=0; i8x8<4; i8x8++){
  592. if(cbp & (1<<i8x8)){
  593. if(IS_8x8DCT(mb_type)){
  594. int16_t *buf = &sl->mb[64*i8x8+256*p << pixel_shift];
  595. uint8_t *nnz;
  596. for(i4x4=0; i4x4<4; i4x4++){
  597. const int index= i4x4 + 4*i8x8 + p*16;
  598. if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4,
  599. h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 )
  600. return -1;
  601. }
  602. nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]];
  603. nnz[0] += nnz[1] + nnz[8] + nnz[9];
  604. new_cbp |= !!nnz[0] << i8x8;
  605. }else{
  606. for(i4x4=0; i4x4<4; i4x4++){
  607. const int index= i4x4 + 4*i8x8 + p*16;
  608. if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index,
  609. scan, h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){
  610. return -1;
  611. }
  612. new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8;
  613. }
  614. }
  615. }else{
  616. uint8_t * const nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]];
  617. nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0;
  618. }
  619. }
  620. return new_cbp;
  621. }
  622. }
  623. int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
  624. {
  625. int mb_xy;
  626. int partition_count;
  627. unsigned int mb_type, cbp;
  628. int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
  629. int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2;
  630. const int pixel_shift = h->pixel_shift;
  631. mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
  632. cbp = 0; /* avoid warning. FIXME: find a solution without slowing
  633. down the code */
  634. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  635. if (sl->mb_skip_run == -1)
  636. sl->mb_skip_run = get_ue_golomb(&sl->gb);
  637. if (sl->mb_skip_run--) {
  638. if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) {
  639. if (sl->mb_skip_run == 0)
  640. sl->mb_mbaff = sl->mb_field_decoding_flag = get_bits1(&sl->gb);
  641. }
  642. decode_mb_skip(h, sl);
  643. return 0;
  644. }
  645. }
  646. if (FRAME_MBAFF(h)) {
  647. if ((sl->mb_y & 1) == 0)
  648. sl->mb_mbaff = sl->mb_field_decoding_flag = get_bits1(&sl->gb);
  649. }
  650. sl->prev_mb_skipped = 0;
  651. mb_type= get_ue_golomb(&sl->gb);
  652. if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  653. if(mb_type < 23){
  654. partition_count = ff_h264_b_mb_type_info[mb_type].partition_count;
  655. mb_type = ff_h264_b_mb_type_info[mb_type].type;
  656. }else{
  657. mb_type -= 23;
  658. goto decode_intra_mb;
  659. }
  660. } else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) {
  661. if(mb_type < 5){
  662. partition_count = ff_h264_p_mb_type_info[mb_type].partition_count;
  663. mb_type = ff_h264_p_mb_type_info[mb_type].type;
  664. }else{
  665. mb_type -= 5;
  666. goto decode_intra_mb;
  667. }
  668. }else{
  669. assert(sl->slice_type_nos == AV_PICTURE_TYPE_I);
  670. if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type)
  671. mb_type--;
  672. decode_intra_mb:
  673. if(mb_type > 25){
  674. av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(sl->slice_type), sl->mb_x, sl->mb_y);
  675. return -1;
  676. }
  677. partition_count=0;
  678. cbp = ff_h264_i_mb_type_info[mb_type].cbp;
  679. sl->intra16x16_pred_mode = ff_h264_i_mb_type_info[mb_type].pred_mode;
  680. mb_type = ff_h264_i_mb_type_info[mb_type].type;
  681. }
  682. if (MB_FIELD(sl))
  683. mb_type |= MB_TYPE_INTERLACED;
  684. h->slice_table[mb_xy] = sl->slice_num;
  685. if(IS_INTRA_PCM(mb_type)){
  686. const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
  687. h->ps.sps->bit_depth_luma;
  688. // We assume these blocks are very rare so we do not optimize it.
  689. sl->intra_pcm_ptr = align_get_bits(&sl->gb);
  690. if (get_bits_left(&sl->gb) < mb_size) {
  691. av_log(h->avctx, AV_LOG_ERROR, "Not enough data for an intra PCM block.\n");
  692. return AVERROR_INVALIDDATA;
  693. }
  694. skip_bits_long(&sl->gb, mb_size);
  695. // In deblocking, the quantizer is 0
  696. h->cur_pic.qscale_table[mb_xy] = 0;
  697. // All coeffs are present
  698. memset(h->non_zero_count[mb_xy], 16, 48);
  699. h->cur_pic.mb_type[mb_xy] = mb_type;
  700. return 0;
  701. }
  702. fill_decode_neighbors(h, sl, mb_type);
  703. fill_decode_caches(h, sl, mb_type);
  704. //mb_pred
  705. if(IS_INTRA(mb_type)){
  706. int pred_mode;
  707. // init_top_left_availability(h);
  708. if(IS_INTRA4x4(mb_type)){
  709. int i;
  710. int di = 1;
  711. if(dct8x8_allowed && get_bits1(&sl->gb)){
  712. mb_type |= MB_TYPE_8x8DCT;
  713. di = 4;
  714. }
  715. // fill_intra4x4_pred_table(h);
  716. for(i=0; i<16; i+=di){
  717. int mode = pred_intra_mode(h, sl, i);
  718. if(!get_bits1(&sl->gb)){
  719. const int rem_mode= get_bits(&sl->gb, 3);
  720. mode = rem_mode + (rem_mode >= mode);
  721. }
  722. if(di==4)
  723. fill_rectangle(&sl->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1);
  724. else
  725. sl->intra4x4_pred_mode_cache[scan8[i]] = mode;
  726. }
  727. write_back_intra_pred_mode(h, sl);
  728. if (ff_h264_check_intra4x4_pred_mode(sl->intra4x4_pred_mode_cache, h->avctx,
  729. sl->top_samples_available, sl->left_samples_available) < 0)
  730. return -1;
  731. }else{
  732. sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available,
  733. sl->left_samples_available, sl->intra16x16_pred_mode, 0);
  734. if (sl->intra16x16_pred_mode < 0)
  735. return -1;
  736. }
  737. if(decode_chroma){
  738. pred_mode= ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available,
  739. sl->left_samples_available, get_ue_golomb_31(&sl->gb), 1);
  740. if(pred_mode < 0)
  741. return -1;
  742. sl->chroma_pred_mode = pred_mode;
  743. } else {
  744. sl->chroma_pred_mode = DC_128_PRED8x8;
  745. }
  746. }else if(partition_count==4){
  747. int i, j, sub_partition_count[4], list, ref[2][4];
  748. if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
  749. for(i=0; i<4; i++){
  750. sl->sub_mb_type[i]= get_ue_golomb_31(&sl->gb);
  751. if(sl->sub_mb_type[i] >=13){
  752. av_log(h->avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
  753. return -1;
  754. }
  755. sub_partition_count[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].partition_count;
  756. sl->sub_mb_type[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].type;
  757. }
  758. if( IS_DIRECT(sl->sub_mb_type[0]|sl->sub_mb_type[1]|sl->sub_mb_type[2]|sl->sub_mb_type[3])) {
  759. ff_h264_pred_direct_motion(h, sl, &mb_type);
  760. sl->ref_cache[0][scan8[4]] =
  761. sl->ref_cache[1][scan8[4]] =
  762. sl->ref_cache[0][scan8[12]] =
  763. sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
  764. }
  765. }else{
  766. assert(sl->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
  767. for(i=0; i<4; i++){
  768. sl->sub_mb_type[i]= get_ue_golomb_31(&sl->gb);
  769. if(sl->sub_mb_type[i] >=4){
  770. av_log(h->avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
  771. return -1;
  772. }
  773. sub_partition_count[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].partition_count;
  774. sl->sub_mb_type[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].type;
  775. }
  776. }
  777. for (list = 0; list < sl->list_count; list++) {
  778. int ref_count = IS_REF0(mb_type) ? 1 : sl->ref_count[list] << MB_MBAFF(sl);
  779. for(i=0; i<4; i++){
  780. if(IS_DIRECT(sl->sub_mb_type[i])) continue;
  781. if(IS_DIR(sl->sub_mb_type[i], 0, list)){
  782. unsigned int tmp;
  783. if(ref_count == 1){
  784. tmp= 0;
  785. }else if(ref_count == 2){
  786. tmp= get_bits1(&sl->gb)^1;
  787. }else{
  788. tmp= get_ue_golomb_31(&sl->gb);
  789. if(tmp>=ref_count){
  790. av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp);
  791. return -1;
  792. }
  793. }
  794. ref[list][i]= tmp;
  795. }else{
  796. //FIXME
  797. ref[list][i] = -1;
  798. }
  799. }
  800. }
  801. if(dct8x8_allowed)
  802. dct8x8_allowed = get_dct8x8_allowed(h, sl);
  803. for (list = 0; list < sl->list_count; list++) {
  804. for(i=0; i<4; i++){
  805. if(IS_DIRECT(sl->sub_mb_type[i])) {
  806. sl->ref_cache[list][ scan8[4*i] ] = sl->ref_cache[list][ scan8[4*i]+1 ];
  807. continue;
  808. }
  809. sl->ref_cache[list][ scan8[4*i] ]=sl->ref_cache[list][ scan8[4*i]+1 ]=
  810. sl->ref_cache[list][ scan8[4*i]+8 ]=sl->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
  811. if(IS_DIR(sl->sub_mb_type[i], 0, list)){
  812. const int sub_mb_type= sl->sub_mb_type[i];
  813. const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
  814. for(j=0; j<sub_partition_count[i]; j++){
  815. int mx, my;
  816. const int index= 4*i + block_width*j;
  817. int16_t (* mv_cache)[2]= &sl->mv_cache[list][ scan8[index] ];
  818. pred_motion(h, sl, index, block_width, list, sl->ref_cache[list][ scan8[index] ], &mx, &my);
  819. mx += get_se_golomb(&sl->gb);
  820. my += get_se_golomb(&sl->gb);
  821. ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
  822. if(IS_SUB_8X8(sub_mb_type)){
  823. mv_cache[ 1 ][0]=
  824. mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
  825. mv_cache[ 1 ][1]=
  826. mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
  827. }else if(IS_SUB_8X4(sub_mb_type)){
  828. mv_cache[ 1 ][0]= mx;
  829. mv_cache[ 1 ][1]= my;
  830. }else if(IS_SUB_4X8(sub_mb_type)){
  831. mv_cache[ 8 ][0]= mx;
  832. mv_cache[ 8 ][1]= my;
  833. }
  834. mv_cache[ 0 ][0]= mx;
  835. mv_cache[ 0 ][1]= my;
  836. }
  837. }else{
  838. uint32_t *p= (uint32_t *)&sl->mv_cache[list][ scan8[4*i] ][0];
  839. p[0] = p[1]=
  840. p[8] = p[9]= 0;
  841. }
  842. }
  843. }
  844. }else if(IS_DIRECT(mb_type)){
  845. ff_h264_pred_direct_motion(h, sl, &mb_type);
  846. dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag;
  847. }else{
  848. int list, mx, my, i;
  849. //FIXME we should set ref_idx_l? to 0 if we use that later ...
  850. if(IS_16X16(mb_type)){
  851. for (list = 0; list < sl->list_count; list++) {
  852. unsigned int val;
  853. if(IS_DIR(mb_type, 0, list)){
  854. int rc = sl->ref_count[list] << MB_MBAFF(sl);
  855. if (rc == 1) {
  856. val= 0;
  857. } else if (rc == 2) {
  858. val= get_bits1(&sl->gb)^1;
  859. }else{
  860. val= get_ue_golomb_31(&sl->gb);
  861. if (val >= rc) {
  862. av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  863. return -1;
  864. }
  865. }
  866. fill_rectangle(&sl->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
  867. }
  868. }
  869. for (list = 0; list < sl->list_count; list++) {
  870. if(IS_DIR(mb_type, 0, list)){
  871. pred_motion(h, sl, 0, 4, list, sl->ref_cache[list][ scan8[0] ], &mx, &my);
  872. mx += get_se_golomb(&sl->gb);
  873. my += get_se_golomb(&sl->gb);
  874. ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
  875. fill_rectangle(sl->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
  876. }
  877. }
  878. }
  879. else if(IS_16X8(mb_type)){
  880. for (list = 0; list < sl->list_count; list++) {
  881. for(i=0; i<2; i++){
  882. unsigned int val;
  883. if(IS_DIR(mb_type, i, list)){
  884. int rc = sl->ref_count[list] << MB_MBAFF(sl);
  885. if (rc == 1) {
  886. val= 0;
  887. } else if (rc == 2) {
  888. val= get_bits1(&sl->gb)^1;
  889. }else{
  890. val= get_ue_golomb_31(&sl->gb);
  891. if (val >= rc) {
  892. av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  893. return -1;
  894. }
  895. }
  896. }else
  897. val= LIST_NOT_USED&0xFF;
  898. fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
  899. }
  900. }
  901. for (list = 0; list < sl->list_count; list++) {
  902. for(i=0; i<2; i++){
  903. unsigned int val;
  904. if(IS_DIR(mb_type, i, list)){
  905. pred_16x8_motion(h, sl, 8*i, list, sl->ref_cache[list][scan8[0] + 16*i], &mx, &my);
  906. mx += get_se_golomb(&sl->gb);
  907. my += get_se_golomb(&sl->gb);
  908. ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
  909. val= pack16to32(mx,my);
  910. }else
  911. val=0;
  912. fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4);
  913. }
  914. }
  915. }else{
  916. assert(IS_8X16(mb_type));
  917. for (list = 0; list < sl->list_count; list++) {
  918. for(i=0; i<2; i++){
  919. unsigned int val;
  920. if(IS_DIR(mb_type, i, list)){ //FIXME optimize
  921. int rc = sl->ref_count[list] << MB_MBAFF(sl);
  922. if (rc == 1) {
  923. val= 0;
  924. } else if (rc == 2) {
  925. val= get_bits1(&sl->gb)^1;
  926. }else{
  927. val= get_ue_golomb_31(&sl->gb);
  928. if (val >= rc) {
  929. av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
  930. return -1;
  931. }
  932. }
  933. }else
  934. val= LIST_NOT_USED&0xFF;
  935. fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
  936. }
  937. }
  938. for (list = 0; list < sl->list_count; list++) {
  939. for(i=0; i<2; i++){
  940. unsigned int val;
  941. if(IS_DIR(mb_type, i, list)){
  942. pred_8x16_motion(h, sl, i*4, list, sl->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
  943. mx += get_se_golomb(&sl->gb);
  944. my += get_se_golomb(&sl->gb);
  945. ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
  946. val= pack16to32(mx,my);
  947. }else
  948. val=0;
  949. fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4);
  950. }
  951. }
  952. }
  953. }
  954. if(IS_INTER(mb_type))
  955. write_back_motion(h, sl, mb_type);
  956. if(!IS_INTRA16x16(mb_type)){
  957. cbp= get_ue_golomb(&sl->gb);
  958. if(decode_chroma){
  959. if(cbp > 47){
  960. av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
  961. return -1;
  962. }
  963. if (IS_INTRA4x4(mb_type))
  964. cbp = ff_h264_golomb_to_intra4x4_cbp[cbp];
  965. else
  966. cbp = ff_h264_golomb_to_inter_cbp[cbp];
  967. }else{
  968. if(cbp > 15){
  969. av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
  970. return -1;
  971. }
  972. if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
  973. else cbp= golomb_to_inter_cbp_gray[cbp];
  974. }
  975. }
  976. if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
  977. mb_type |= MB_TYPE_8x8DCT*get_bits1(&sl->gb);
  978. }
  979. sl->cbp=
  980. h->cbp_table[mb_xy]= cbp;
  981. h->cur_pic.mb_type[mb_xy] = mb_type;
  982. if(cbp || IS_INTRA16x16(mb_type)){
  983. int i4x4, i8x8, chroma_idx;
  984. int dquant;
  985. int ret;
  986. GetBitContext *gb = &sl->gb;
  987. const uint8_t *scan, *scan8x8;
  988. const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8);
  989. if(IS_INTERLACED(mb_type)){
  990. scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
  991. scan = sl->qscale ? h->field_scan : h->field_scan_q0;
  992. }else{
  993. scan8x8 = sl->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
  994. scan = sl->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
  995. }
  996. dquant= get_se_golomb(&sl->gb);
  997. sl->qscale += dquant;
  998. if (((unsigned)sl->qscale) > max_qp){
  999. if (sl->qscale < 0) sl->qscale += max_qp + 1;
  1000. else sl->qscale -= max_qp+1;
  1001. if (((unsigned)sl->qscale) > max_qp){
  1002. av_log(h->avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, sl->mb_x, sl->mb_y);
  1003. return -1;
  1004. }
  1005. }
  1006. sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
  1007. sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
  1008. if ((ret = decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ) {
  1009. return -1;
  1010. }
  1011. h->cbp_table[mb_xy] |= ret << 12;
  1012. if (CHROMA444(h)) {
  1013. if (decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 1) < 0 ) {
  1014. return -1;
  1015. }
  1016. if (decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 2) < 0 ) {
  1017. return -1;
  1018. }
  1019. } else if (CHROMA422(h)) {
  1020. if(cbp&0x30){
  1021. for(chroma_idx=0; chroma_idx<2; chroma_idx++)
  1022. if (decode_residual(h, sl, gb, sl->mb + ((256 + 16*16*chroma_idx) << pixel_shift),
  1023. CHROMA_DC_BLOCK_INDEX + chroma_idx, ff_h264_chroma422_dc_scan,
  1024. NULL, 8) < 0) {
  1025. return -1;
  1026. }
  1027. }
  1028. if(cbp&0x20){
  1029. for(chroma_idx=0; chroma_idx<2; chroma_idx++){
  1030. const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
  1031. int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift);
  1032. for (i8x8 = 0; i8x8 < 2; i8x8++) {
  1033. for (i4x4 = 0; i4x4 < 4; i4x4++) {
  1034. const int index = 16 + 16*chroma_idx + 8*i8x8 + i4x4;
  1035. if (decode_residual(h, sl, gb, mb, index, scan + 1, qmul, 15) < 0)
  1036. return -1;
  1037. mb += 16 << pixel_shift;
  1038. }
  1039. }
  1040. }
  1041. }else{
  1042. fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
  1043. fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
  1044. }
  1045. } else /* yuv420 */ {
  1046. if(cbp&0x30){
  1047. for(chroma_idx=0; chroma_idx<2; chroma_idx++)
  1048. if (decode_residual(h, sl, gb, sl->mb + ((256 + 16 * 16 * chroma_idx) << pixel_shift),
  1049. CHROMA_DC_BLOCK_INDEX + chroma_idx, ff_h264_chroma_dc_scan, NULL, 4) < 0) {
  1050. return -1;
  1051. }
  1052. }
  1053. if(cbp&0x20){
  1054. for(chroma_idx=0; chroma_idx<2; chroma_idx++){
  1055. const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
  1056. for(i4x4=0; i4x4<4; i4x4++){
  1057. const int index= 16 + 16*chroma_idx + i4x4;
  1058. if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){
  1059. return -1;
  1060. }
  1061. }
  1062. }
  1063. }else{
  1064. fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
  1065. fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
  1066. }
  1067. }
  1068. }else{
  1069. fill_rectangle(&sl->non_zero_count_cache[scan8[ 0]], 4, 4, 8, 0, 1);
  1070. fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
  1071. fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
  1072. }
  1073. h->cur_pic.qscale_table[mb_xy] = sl->qscale;
  1074. write_back_non_zero_count(h, sl);
  1075. return 0;
  1076. }