You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

727 lines
22KB

  1. /*
  2. * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
  3. * Copyright (C) 2006 Robert Edele <yartrebo@earthlink.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #ifndef AVCODEC_SNOW_H
  22. #define AVCODEC_SNOW_H
  23. #include "libavutil/motion_vector.h"
  24. #include "hpeldsp.h"
  25. #include "me_cmp.h"
  26. #include "qpeldsp.h"
  27. #include "snow_dwt.h"
  28. #include "rangecoder.h"
  29. #include "mathops.h"
  30. #define FF_MPV_OFFSET(x) (offsetof(MpegEncContext, x) + offsetof(SnowContext, m))
  31. #include "mpegvideo.h"
  32. #include "h264qpel.h"
  33. #define MID_STATE 128
  34. #define MAX_PLANES 4
  35. #define QSHIFT 5
  36. #define QROOT (1<<QSHIFT)
  37. #define LOSSLESS_QLOG -128
  38. #define FRAC_BITS 4
  39. #define MAX_REF_FRAMES 8
  40. #define LOG2_OBMC_MAX 8
  41. #define OBMC_MAX (1<<(LOG2_OBMC_MAX))
  42. typedef struct BlockNode{
  43. int16_t mx;
  44. int16_t my;
  45. uint8_t ref;
  46. uint8_t color[3];
  47. uint8_t type;
  48. //#define TYPE_SPLIT 1
  49. #define BLOCK_INTRA 1
  50. #define BLOCK_OPT 2
  51. //#define TYPE_NOCOLOR 4
  52. uint8_t level; //FIXME merge into type?
  53. }BlockNode;
  54. static const BlockNode null_block= { //FIXME add border maybe
  55. .color= {128,128,128},
  56. .mx= 0,
  57. .my= 0,
  58. .ref= 0,
  59. .type= 0,
  60. .level= 0,
  61. };
  62. #define LOG2_MB_SIZE 4
  63. #define MB_SIZE (1<<LOG2_MB_SIZE)
  64. #define ENCODER_EXTRA_BITS 4
  65. #define HTAPS_MAX 8
  66. typedef struct x_and_coeff{
  67. int16_t x;
  68. uint16_t coeff;
  69. } x_and_coeff;
  70. typedef struct SubBand{
  71. int level;
  72. int stride;
  73. int width;
  74. int height;
  75. int qlog; ///< log(qscale)/log[2^(1/6)]
  76. DWTELEM *buf;
  77. IDWTELEM *ibuf;
  78. int buf_x_offset;
  79. int buf_y_offset;
  80. int stride_line; ///< Stride measured in lines, not pixels.
  81. x_and_coeff * x_coeff;
  82. struct SubBand *parent;
  83. uint8_t state[/*7*2*/ 7 + 512][32];
  84. }SubBand;
  85. typedef struct Plane{
  86. int width;
  87. int height;
  88. SubBand band[MAX_DECOMPOSITIONS][4];
  89. int htaps;
  90. int8_t hcoeff[HTAPS_MAX/2];
  91. int diag_mc;
  92. int fast_mc;
  93. int last_htaps;
  94. int8_t last_hcoeff[HTAPS_MAX/2];
  95. int last_diag_mc;
  96. }Plane;
  97. typedef struct SnowContext{
  98. AVClass *class;
  99. AVCodecContext *avctx;
  100. RangeCoder c;
  101. MECmpContext mecc;
  102. HpelDSPContext hdsp;
  103. QpelDSPContext qdsp;
  104. VideoDSPContext vdsp;
  105. H264QpelContext h264qpel;
  106. MpegvideoEncDSPContext mpvencdsp;
  107. SnowDWTContext dwt;
  108. const AVFrame *new_picture;
  109. AVFrame *input_picture; ///< new_picture with the internal linesizes
  110. AVFrame *current_picture;
  111. AVFrame *last_picture[MAX_REF_FRAMES];
  112. uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4];
  113. AVFrame *mconly_picture;
  114. // uint8_t q_context[16];
  115. uint8_t header_state[32];
  116. uint8_t block_state[128 + 32*128];
  117. int keyframe;
  118. int always_reset;
  119. int version;
  120. int spatial_decomposition_type;
  121. int last_spatial_decomposition_type;
  122. int temporal_decomposition_type;
  123. int spatial_decomposition_count;
  124. int last_spatial_decomposition_count;
  125. int temporal_decomposition_count;
  126. int max_ref_frames;
  127. int ref_frames;
  128. int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
  129. uint32_t *ref_scores[MAX_REF_FRAMES];
  130. DWTELEM *spatial_dwt_buffer;
  131. DWTELEM *temp_dwt_buffer;
  132. IDWTELEM *spatial_idwt_buffer;
  133. IDWTELEM *temp_idwt_buffer;
  134. int *run_buffer;
  135. int colorspace_type;
  136. int chroma_h_shift;
  137. int chroma_v_shift;
  138. int spatial_scalability;
  139. int qlog;
  140. int last_qlog;
  141. int lambda;
  142. int lambda2;
  143. int pass1_rc;
  144. int mv_scale;
  145. int last_mv_scale;
  146. int qbias;
  147. int last_qbias;
  148. #define QBIAS_SHIFT 3
  149. int b_width;
  150. int b_height;
  151. int block_max_depth;
  152. int last_block_max_depth;
  153. int nb_planes;
  154. Plane plane[MAX_PLANES];
  155. BlockNode *block;
  156. #define ME_CACHE_SIZE 1024
  157. unsigned me_cache[ME_CACHE_SIZE];
  158. unsigned me_cache_generation;
  159. slice_buffer sb;
  160. int memc_only;
  161. int no_bitstream;
  162. int intra_penalty;
  163. int motion_est;
  164. MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
  165. uint8_t *scratchbuf;
  166. uint8_t *emu_edge_buffer;
  167. AVMotionVector *avmv;
  168. int avmv_index;
  169. }SnowContext;
  170. /* Tables */
  171. extern const uint8_t * const ff_obmc_tab[4];
  172. extern uint8_t ff_qexp[QROOT];
  173. extern int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES];
  174. /* C bits used by mmx/sse2/altivec */
  175. static av_always_inline void snow_interleave_line_header(int * i, int width, IDWTELEM * low, IDWTELEM * high){
  176. (*i) = (width) - 2;
  177. if (width & 1){
  178. low[(*i)+1] = low[((*i)+1)>>1];
  179. (*i)--;
  180. }
  181. }
  182. static av_always_inline void snow_interleave_line_footer(int * i, IDWTELEM * low, IDWTELEM * high){
  183. for (; (*i)>=0; (*i)-=2){
  184. low[(*i)+1] = high[(*i)>>1];
  185. low[*i] = low[(*i)>>1];
  186. }
  187. }
  188. static av_always_inline void snow_horizontal_compose_lift_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){
  189. for(; i<w; i++){
  190. dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift);
  191. }
  192. if((width^lift_high)&1){
  193. dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift);
  194. }
  195. }
  196. static av_always_inline void snow_horizontal_compose_liftS_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w){
  197. for(; i<w; i++){
  198. dst[i] = src[i] + ((ref[i] + ref[(i+1)]+W_BO + 4 * src[i]) >> W_BS);
  199. }
  200. if(width&1){
  201. dst[w] = src[w] + ((2 * ref[w] + W_BO + 4 * src[w]) >> W_BS);
  202. }
  203. }
  204. /* common code */
  205. int ff_snow_common_init(AVCodecContext *avctx);
  206. int ff_snow_common_init_after_header(AVCodecContext *avctx);
  207. void ff_snow_common_end(SnowContext *s);
  208. void ff_snow_release_buffer(AVCodecContext *avctx);
  209. void ff_snow_reset_contexts(SnowContext *s);
  210. int ff_snow_alloc_blocks(SnowContext *s);
  211. int ff_snow_frame_start(SnowContext *s);
  212. void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride,
  213. int sx, int sy, int b_w, int b_h, const BlockNode *block,
  214. int plane_index, int w, int h);
  215. int ff_snow_get_buffer(SnowContext *s, AVFrame *frame);
  216. /* common inline functions */
  217. //XXX doublecheck all of them should stay inlined
  218. static inline void snow_set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
  219. const int w= s->b_width << s->block_max_depth;
  220. const int rem_depth= s->block_max_depth - level;
  221. const int index= (x + y*w) << rem_depth;
  222. const int block_w= 1<<rem_depth;
  223. BlockNode block;
  224. int i,j;
  225. block.color[0]= l;
  226. block.color[1]= cb;
  227. block.color[2]= cr;
  228. block.mx= mx;
  229. block.my= my;
  230. block.ref= ref;
  231. block.type= type;
  232. block.level= level;
  233. for(j=0; j<block_w; j++){
  234. for(i=0; i<block_w; i++){
  235. s->block[index + i + j*w]= block;
  236. }
  237. }
  238. }
  239. static inline void pred_mv(SnowContext *s, int *mx, int *my, int ref,
  240. const BlockNode *left, const BlockNode *top, const BlockNode *tr){
  241. if(s->ref_frames == 1){
  242. *mx = mid_pred(left->mx, top->mx, tr->mx);
  243. *my = mid_pred(left->my, top->my, tr->my);
  244. }else{
  245. const int *scale = ff_scale_mv_ref[ref];
  246. *mx = mid_pred((left->mx * scale[left->ref] + 128) >>8,
  247. (top ->mx * scale[top ->ref] + 128) >>8,
  248. (tr ->mx * scale[tr ->ref] + 128) >>8);
  249. *my = mid_pred((left->my * scale[left->ref] + 128) >>8,
  250. (top ->my * scale[top ->ref] + 128) >>8,
  251. (tr ->my * scale[tr ->ref] + 128) >>8);
  252. }
  253. }
  254. static av_always_inline int same_block(BlockNode *a, BlockNode *b){
  255. if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
  256. return !((a->color[0] - b->color[0]) | (a->color[1] - b->color[1]) | (a->color[2] - b->color[2]));
  257. }else{
  258. return !((a->mx - b->mx) | (a->my - b->my) | (a->ref - b->ref) | ((a->type ^ b->type)&BLOCK_INTRA));
  259. }
  260. }
  261. //FIXME name cleanup (b_w, block_w, b_width stuff)
  262. //XXX should we really inline it?
  263. static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
  264. const int b_width = s->b_width << s->block_max_depth;
  265. const int b_height= s->b_height << s->block_max_depth;
  266. const int b_stride= b_width;
  267. BlockNode *lt= &s->block[b_x + b_y*b_stride];
  268. BlockNode *rt= lt+1;
  269. BlockNode *lb= lt+b_stride;
  270. BlockNode *rb= lb+1;
  271. uint8_t *block[4];
  272. // When src_stride is large enough, it is possible to interleave the blocks.
  273. // Otherwise the blocks are written sequentially in the tmp buffer.
  274. int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
  275. uint8_t *tmp = s->scratchbuf;
  276. uint8_t *ptmp;
  277. int x,y;
  278. if(b_x<0){
  279. lt= rt;
  280. lb= rb;
  281. }else if(b_x + 1 >= b_width){
  282. rt= lt;
  283. rb= lb;
  284. }
  285. if(b_y<0){
  286. lt= lb;
  287. rt= rb;
  288. }else if(b_y + 1 >= b_height){
  289. lb= lt;
  290. rb= rt;
  291. }
  292. if(src_x<0){ //FIXME merge with prev & always round internal width up to *16
  293. obmc -= src_x;
  294. b_w += src_x;
  295. if(!sliced && !offset_dst)
  296. dst -= src_x;
  297. src_x=0;
  298. }
  299. if(src_x + b_w > w){
  300. b_w = w - src_x;
  301. }
  302. if(src_y<0){
  303. obmc -= src_y*obmc_stride;
  304. b_h += src_y;
  305. if(!sliced && !offset_dst)
  306. dst -= src_y*dst_stride;
  307. src_y=0;
  308. }
  309. if(src_y + b_h> h){
  310. b_h = h - src_y;
  311. }
  312. if(b_w<=0 || b_h<=0) return;
  313. if(!sliced && offset_dst)
  314. dst += src_x + src_y*dst_stride;
  315. dst8+= src_x + src_y*src_stride;
  316. // src += src_x + src_y*src_stride;
  317. ptmp= tmp + 3*tmp_step;
  318. block[0]= ptmp;
  319. ptmp+=tmp_step;
  320. ff_snow_pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
  321. if(same_block(lt, rt)){
  322. block[1]= block[0];
  323. }else{
  324. block[1]= ptmp;
  325. ptmp+=tmp_step;
  326. ff_snow_pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
  327. }
  328. if(same_block(lt, lb)){
  329. block[2]= block[0];
  330. }else if(same_block(rt, lb)){
  331. block[2]= block[1];
  332. }else{
  333. block[2]= ptmp;
  334. ptmp+=tmp_step;
  335. ff_snow_pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
  336. }
  337. if(same_block(lt, rb) ){
  338. block[3]= block[0];
  339. }else if(same_block(rt, rb)){
  340. block[3]= block[1];
  341. }else if(same_block(lb, rb)){
  342. block[3]= block[2];
  343. }else{
  344. block[3]= ptmp;
  345. ff_snow_pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
  346. }
  347. if(sliced){
  348. s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
  349. }else{
  350. for(y=0; y<b_h; y++){
  351. //FIXME ugly misuse of obmc_stride
  352. const uint8_t *obmc1= obmc + y*obmc_stride;
  353. const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
  354. const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
  355. const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
  356. for(x=0; x<b_w; x++){
  357. int v= obmc1[x] * block[3][x + y*src_stride]
  358. +obmc2[x] * block[2][x + y*src_stride]
  359. +obmc3[x] * block[1][x + y*src_stride]
  360. +obmc4[x] * block[0][x + y*src_stride];
  361. v <<= 8 - LOG2_OBMC_MAX;
  362. if(FRAC_BITS != 8){
  363. v >>= 8 - FRAC_BITS;
  364. }
  365. if(add){
  366. v += dst[x + y*dst_stride];
  367. v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
  368. if(v&(~255)) v= ~(v>>31);
  369. dst8[x + y*src_stride] = v;
  370. }else{
  371. dst[x + y*dst_stride] -= v;
  372. }
  373. }
  374. }
  375. }
  376. }
  377. static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int plane_index, int add, int mb_y){
  378. Plane *p= &s->plane[plane_index];
  379. const int mb_w= s->b_width << s->block_max_depth;
  380. const int mb_h= s->b_height << s->block_max_depth;
  381. int x, y, mb_x;
  382. int block_size = MB_SIZE >> s->block_max_depth;
  383. int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
  384. int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
  385. const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
  386. const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
  387. int ref_stride= s->current_picture->linesize[plane_index];
  388. uint8_t *dst8= s->current_picture->data[plane_index];
  389. int w= p->width;
  390. int h= p->height;
  391. av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
  392. if(s->keyframe || (s->avctx->debug&512)){
  393. if(mb_y==mb_h)
  394. return;
  395. if(add){
  396. for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
  397. for(x=0; x<w; x++){
  398. int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
  399. v >>= FRAC_BITS;
  400. if(v&(~255)) v= ~(v>>31);
  401. dst8[x + y*ref_stride]= v;
  402. }
  403. }
  404. }else{
  405. for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
  406. for(x=0; x<w; x++){
  407. buf[x + y*w]-= 128<<FRAC_BITS;
  408. }
  409. }
  410. }
  411. return;
  412. }
  413. for(mb_x=0; mb_x<=mb_w; mb_x++){
  414. add_yblock(s, 0, NULL, buf, dst8, obmc,
  415. block_w*mb_x - block_w/2,
  416. block_h*mb_y - block_h/2,
  417. block_w, block_h,
  418. w, h,
  419. w, ref_stride, obmc_stride,
  420. mb_x - 1, mb_y - 1,
  421. add, 1, plane_index);
  422. }
  423. }
  424. static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add){
  425. const int mb_h= s->b_height << s->block_max_depth;
  426. int mb_y;
  427. for(mb_y=0; mb_y<=mb_h; mb_y++)
  428. predict_slice(s, buf, plane_index, add, mb_y);
  429. }
  430. static inline void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
  431. const int w= s->b_width << s->block_max_depth;
  432. const int rem_depth= s->block_max_depth - level;
  433. const int index= (x + y*w) << rem_depth;
  434. const int block_w= 1<<rem_depth;
  435. const int block_h= 1<<rem_depth; //FIXME "w!=h"
  436. BlockNode block;
  437. int i,j;
  438. block.color[0]= l;
  439. block.color[1]= cb;
  440. block.color[2]= cr;
  441. block.mx= mx;
  442. block.my= my;
  443. block.ref= ref;
  444. block.type= type;
  445. block.level= level;
  446. for(j=0; j<block_h; j++){
  447. for(i=0; i<block_w; i++){
  448. s->block[index + i + j*w]= block;
  449. }
  450. }
  451. }
  452. static inline void init_ref(MotionEstContext *c, uint8_t *src[3], uint8_t *ref[3], uint8_t *ref2[3], int x, int y, int ref_index){
  453. SnowContext *s = c->avctx->priv_data;
  454. const int offset[3]= {
  455. y*c-> stride + x,
  456. ((y*c->uvstride + x)>>s->chroma_h_shift),
  457. ((y*c->uvstride + x)>>s->chroma_h_shift),
  458. };
  459. int i;
  460. for(i=0; i<3; i++){
  461. c->src[0][i]= src [i];
  462. c->ref[0][i]= ref [i] + offset[i];
  463. }
  464. av_assert2(!ref_index);
  465. }
  466. /* bitstream functions */
  467. extern const int8_t ff_quant3bA[256];
  468. #define QEXPSHIFT (7-FRAC_BITS+8) //FIXME try to change this to 0
  469. static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
  470. int i;
  471. if(v){
  472. const int a= FFABS(v);
  473. const int e= av_log2(a);
  474. const int el= FFMIN(e, 10);
  475. put_rac(c, state+0, 0);
  476. for(i=0; i<el; i++){
  477. put_rac(c, state+1+i, 1); //1..10
  478. }
  479. for(; i<e; i++){
  480. put_rac(c, state+1+9, 1); //1..10
  481. }
  482. put_rac(c, state+1+FFMIN(i,9), 0);
  483. for(i=e-1; i>=el; i--){
  484. put_rac(c, state+22+9, (a>>i)&1); //22..31
  485. }
  486. for(; i>=0; i--){
  487. put_rac(c, state+22+i, (a>>i)&1); //22..31
  488. }
  489. if(is_signed)
  490. put_rac(c, state+11 + el, v < 0); //11..21
  491. }else{
  492. put_rac(c, state+0, 1);
  493. }
  494. }
  495. static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
  496. if(get_rac(c, state+0))
  497. return 0;
  498. else{
  499. int i, e, a;
  500. e= 0;
  501. while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
  502. e++;
  503. }
  504. a= 1;
  505. for(i=e-1; i>=0; i--){
  506. a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
  507. }
  508. e= -(is_signed && get_rac(c, state+11 + FFMIN(e,10))); //11..21
  509. return (a^e)-e;
  510. }
  511. }
  512. static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2){
  513. int i;
  514. int r= log2>=0 ? 1<<log2 : 1;
  515. av_assert2(v>=0);
  516. av_assert2(log2>=-4);
  517. while(v >= r){
  518. put_rac(c, state+4+log2, 1);
  519. v -= r;
  520. log2++;
  521. if(log2>0) r+=r;
  522. }
  523. put_rac(c, state+4+log2, 0);
  524. for(i=log2-1; i>=0; i--){
  525. put_rac(c, state+31-i, (v>>i)&1);
  526. }
  527. }
  528. static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2){
  529. int i;
  530. int r= log2>=0 ? 1<<log2 : 1;
  531. int v=0;
  532. av_assert2(log2>=-4);
  533. while(log2<28 && get_rac(c, state+4+log2)){
  534. v+= r;
  535. log2++;
  536. if(log2>0) r+=r;
  537. }
  538. for(i=log2-1; i>=0; i--){
  539. v+= get_rac(c, state+31-i)<<i;
  540. }
  541. return v;
  542. }
  543. static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, int orientation){
  544. const int w= b->width;
  545. const int h= b->height;
  546. int x,y;
  547. int run, runs;
  548. x_and_coeff *xc= b->x_coeff;
  549. x_and_coeff *prev_xc= NULL;
  550. x_and_coeff *prev2_xc= xc;
  551. x_and_coeff *parent_xc= parent ? parent->x_coeff : NULL;
  552. x_and_coeff *prev_parent_xc= parent_xc;
  553. runs= get_symbol2(&s->c, b->state[30], 0);
  554. if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
  555. else run= INT_MAX;
  556. for(y=0; y<h; y++){
  557. int v=0;
  558. int lt=0, t=0, rt=0;
  559. if(y && prev_xc->x == 0){
  560. rt= prev_xc->coeff;
  561. }
  562. for(x=0; x<w; x++){
  563. int p=0;
  564. const int l= v;
  565. lt= t; t= rt;
  566. if(y){
  567. if(prev_xc->x <= x)
  568. prev_xc++;
  569. if(prev_xc->x == x + 1)
  570. rt= prev_xc->coeff;
  571. else
  572. rt=0;
  573. }
  574. if(parent_xc){
  575. if(x>>1 > parent_xc->x){
  576. parent_xc++;
  577. }
  578. if(x>>1 == parent_xc->x){
  579. p= parent_xc->coeff;
  580. }
  581. }
  582. if(/*ll|*/l|lt|t|rt|p){
  583. int context= av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
  584. v=get_rac(&s->c, &b->state[0][context]);
  585. if(v){
  586. v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
  587. v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
  588. if ((uint16_t)v != v) {
  589. av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
  590. v = 1;
  591. }
  592. xc->x=x;
  593. (xc++)->coeff= v;
  594. }
  595. }else{
  596. if(!run){
  597. if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
  598. else run= INT_MAX;
  599. v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
  600. v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
  601. if ((uint16_t)v != v) {
  602. av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
  603. v = 1;
  604. }
  605. xc->x=x;
  606. (xc++)->coeff= v;
  607. }else{
  608. int max_run;
  609. run--;
  610. v=0;
  611. av_assert2(run >= 0);
  612. if(y) max_run= FFMIN(run, prev_xc->x - x - 2);
  613. else max_run= FFMIN(run, w-x-1);
  614. if(parent_xc)
  615. max_run= FFMIN(max_run, 2*parent_xc->x - x - 1);
  616. av_assert2(max_run >= 0 && max_run <= run);
  617. x+= max_run;
  618. run-= max_run;
  619. }
  620. }
  621. }
  622. (xc++)->x= w+1; //end marker
  623. prev_xc= prev2_xc;
  624. prev2_xc= xc;
  625. if(parent_xc){
  626. if(y&1){
  627. while(parent_xc->x != parent->width+1)
  628. parent_xc++;
  629. parent_xc++;
  630. prev_parent_xc= parent_xc;
  631. }else{
  632. parent_xc= prev_parent_xc;
  633. }
  634. }
  635. }
  636. (xc++)->x= w+1; //end marker
  637. }
  638. #endif /* AVCODEC_SNOW_H */