You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

664 lines
25KB

  1. /*
  2. * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/intmath.h"
  21. #include "libavutil/log.h"
  22. #include "libavutil/opt.h"
  23. #include "avcodec.h"
  24. #include "snow_dwt.h"
  25. #include "internal.h"
  26. #include "snow.h"
  27. #include "rangecoder.h"
  28. #include "mathops.h"
  29. #include "mpegvideo.h"
  30. #include "h263.h"
  31. static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
  32. Plane *p= &s->plane[plane_index];
  33. const int mb_w= s->b_width << s->block_max_depth;
  34. const int mb_h= s->b_height << s->block_max_depth;
  35. int x, y, mb_x;
  36. int block_size = MB_SIZE >> s->block_max_depth;
  37. int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
  38. int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
  39. const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
  40. int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
  41. int ref_stride= s->current_picture->linesize[plane_index];
  42. uint8_t *dst8= s->current_picture->data[plane_index];
  43. int w= p->width;
  44. int h= p->height;
  45. if(s->keyframe || (s->avctx->debug&512)){
  46. if(mb_y==mb_h)
  47. return;
  48. if(add){
  49. for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
  50. // DWTELEM * line = slice_buffer_get_line(sb, y);
  51. IDWTELEM * line = sb->line[y];
  52. for(x=0; x<w; x++){
  53. // int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
  54. int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
  55. v >>= FRAC_BITS;
  56. if(v&(~255)) v= ~(v>>31);
  57. dst8[x + y*ref_stride]= v;
  58. }
  59. }
  60. }else{
  61. for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
  62. // DWTELEM * line = slice_buffer_get_line(sb, y);
  63. IDWTELEM * line = sb->line[y];
  64. for(x=0; x<w; x++){
  65. line[x] -= 128 << FRAC_BITS;
  66. // buf[x + y*w]-= 128<<FRAC_BITS;
  67. }
  68. }
  69. }
  70. return;
  71. }
  72. for(mb_x=0; mb_x<=mb_w; mb_x++){
  73. add_yblock(s, 1, sb, old_buffer, dst8, obmc,
  74. block_w*mb_x - block_w/2,
  75. block_h*mb_y - block_h/2,
  76. block_w, block_h,
  77. w, h,
  78. w, ref_stride, obmc_stride,
  79. mb_x - 1, mb_y - 1,
  80. add, 0, plane_index);
  81. }
  82. if(s->avmv && mb_y < mb_h && plane_index == 0)
  83. for(mb_x=0; mb_x<mb_w; mb_x++){
  84. AVMotionVector *avmv = s->avmv + s->avmv_index;
  85. const int b_width = s->b_width << s->block_max_depth;
  86. const int b_stride= b_width;
  87. BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
  88. if (bn->type)
  89. continue;
  90. s->avmv_index++;
  91. avmv->w = block_w;
  92. avmv->h = block_h;
  93. avmv->dst_x = block_w*mb_x - block_w/2;
  94. avmv->dst_y = block_h*mb_y - block_h/2;
  95. avmv->motion_scale = 8;
  96. avmv->motion_x = bn->mx * s->mv_scale;
  97. avmv->motion_y = bn->my * s->mv_scale;
  98. avmv->src_x = avmv->dst_x + avmv->motion_x / 8;
  99. avmv->src_y = avmv->dst_y + avmv->motion_y / 8;
  100. avmv->source= -1 - bn->ref;
  101. avmv->flags = 0;
  102. }
  103. }
  104. static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
  105. const int w= b->width;
  106. int y;
  107. const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
  108. int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
  109. int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
  110. int new_index = 0;
  111. if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
  112. qadd= 0;
  113. qmul= 1<<QEXPSHIFT;
  114. }
  115. /* If we are on the second or later slice, restore our index. */
  116. if (start_y != 0)
  117. new_index = save_state[0];
  118. for(y=start_y; y<h; y++){
  119. int x = 0;
  120. int v;
  121. IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
  122. memset(line, 0, b->width*sizeof(IDWTELEM));
  123. v = b->x_coeff[new_index].coeff;
  124. x = b->x_coeff[new_index++].x;
  125. while(x < w){
  126. register int t= (int)( (v>>1)*(unsigned)qmul + qadd)>>QEXPSHIFT;
  127. register int u= -(v&1);
  128. line[x] = (t^u) - u;
  129. v = b->x_coeff[new_index].coeff;
  130. x = b->x_coeff[new_index++].x;
  131. }
  132. }
  133. /* Save our variables for the next slice. */
  134. save_state[0] = new_index;
  135. return;
  136. }
  137. static int decode_q_branch(SnowContext *s, int level, int x, int y){
  138. const int w= s->b_width << s->block_max_depth;
  139. const int rem_depth= s->block_max_depth - level;
  140. const int index= (x + y*w) << rem_depth;
  141. int trx= (x+1)<<rem_depth;
  142. const BlockNode *left = x ? &s->block[index-1] : &null_block;
  143. const BlockNode *top = y ? &s->block[index-w] : &null_block;
  144. const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
  145. const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
  146. int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
  147. int res;
  148. if(s->keyframe){
  149. set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA);
  150. return 0;
  151. }
  152. if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
  153. int type, mx, my;
  154. int l = left->color[0];
  155. int cb= left->color[1];
  156. int cr= left->color[2];
  157. unsigned ref = 0;
  158. int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
  159. int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
  160. int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
  161. type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
  162. if(type){
  163. int ld, cbd, crd;
  164. pred_mv(s, &mx, &my, 0, left, top, tr);
  165. ld = get_symbol(&s->c, &s->block_state[32], 1);
  166. if (ld < -255 || ld > 255) {
  167. return AVERROR_INVALIDDATA;
  168. }
  169. l += ld;
  170. if (s->nb_planes > 2) {
  171. cbd = get_symbol(&s->c, &s->block_state[64], 1);
  172. crd = get_symbol(&s->c, &s->block_state[96], 1);
  173. if (cbd < -255 || cbd > 255 || crd < -255 || crd > 255) {
  174. return AVERROR_INVALIDDATA;
  175. }
  176. cb += cbd;
  177. cr += crd;
  178. }
  179. }else{
  180. if(s->ref_frames > 1)
  181. ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
  182. if (ref >= s->ref_frames) {
  183. av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
  184. return AVERROR_INVALIDDATA;
  185. }
  186. pred_mv(s, &mx, &my, ref, left, top, tr);
  187. mx+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
  188. my+= (unsigned)get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
  189. }
  190. set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
  191. }else{
  192. if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
  193. (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
  194. (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
  195. (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
  196. return res;
  197. }
  198. return 0;
  199. }
  200. static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
  201. const int w= b->width;
  202. const int qlog= av_clip(s->qlog + (int64_t)b->qlog, 0, QROOT*16);
  203. const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
  204. const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
  205. int x,y;
  206. if(s->qlog == LOSSLESS_QLOG) return;
  207. for(y=start_y; y<end_y; y++){
  208. // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
  209. IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
  210. for(x=0; x<w; x++){
  211. int i= line[x];
  212. if(i<0){
  213. line[x]= -((-i*(unsigned)qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
  214. }else if(i>0){
  215. line[x]= (( i*(unsigned)qmul + qadd)>>(QEXPSHIFT));
  216. }
  217. }
  218. }
  219. }
  220. static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
  221. const int w= b->width;
  222. int x,y;
  223. IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
  224. IDWTELEM * prev;
  225. if (start_y != 0)
  226. line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
  227. for(y=start_y; y<end_y; y++){
  228. prev = line;
  229. // line = slice_buffer_get_line_from_address(sb, src + (y * stride));
  230. line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
  231. for(x=0; x<w; x++){
  232. if(x){
  233. if(use_median){
  234. if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
  235. else line[x] += line[x - 1];
  236. }else{
  237. if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
  238. else line[x] += line[x - 1];
  239. }
  240. }else{
  241. if(y) line[x] += prev[x];
  242. }
  243. }
  244. }
  245. }
  246. static void decode_qlogs(SnowContext *s){
  247. int plane_index, level, orientation;
  248. for(plane_index=0; plane_index < s->nb_planes; plane_index++){
  249. for(level=0; level<s->spatial_decomposition_count; level++){
  250. for(orientation=level ? 1:0; orientation<4; orientation++){
  251. int q;
  252. if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
  253. else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
  254. else q= get_symbol(&s->c, s->header_state, 1);
  255. s->plane[plane_index].band[level][orientation].qlog= q;
  256. }
  257. }
  258. }
  259. }
  260. #define GET_S(dst, check) \
  261. tmp= get_symbol(&s->c, s->header_state, 0);\
  262. if(!(check)){\
  263. av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
  264. return AVERROR_INVALIDDATA;\
  265. }\
  266. dst= tmp;
  267. static int decode_header(SnowContext *s){
  268. int plane_index, tmp;
  269. uint8_t kstate[32];
  270. memset(kstate, MID_STATE, sizeof(kstate));
  271. s->keyframe= get_rac(&s->c, kstate);
  272. if(s->keyframe || s->always_reset){
  273. ff_snow_reset_contexts(s);
  274. s->spatial_decomposition_type=
  275. s->qlog=
  276. s->qbias=
  277. s->mv_scale=
  278. s->block_max_depth= 0;
  279. }
  280. if(s->keyframe){
  281. GET_S(s->version, tmp <= 0U)
  282. s->always_reset= get_rac(&s->c, s->header_state);
  283. s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
  284. s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
  285. GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
  286. s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
  287. if (s->colorspace_type == 1) {
  288. s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
  289. s->nb_planes = 1;
  290. } else if(s->colorspace_type == 0) {
  291. s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
  292. s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
  293. if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
  294. s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
  295. }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
  296. s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
  297. }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
  298. s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
  299. } else {
  300. av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
  301. s->chroma_h_shift = s->chroma_v_shift = 1;
  302. s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
  303. return AVERROR_INVALIDDATA;
  304. }
  305. s->nb_planes = 3;
  306. } else {
  307. av_log(s, AV_LOG_ERROR, "unsupported color space\n");
  308. s->chroma_h_shift = s->chroma_v_shift = 1;
  309. s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
  310. return AVERROR_INVALIDDATA;
  311. }
  312. s->spatial_scalability= get_rac(&s->c, s->header_state);
  313. // s->rate_scalability= get_rac(&s->c, s->header_state);
  314. GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
  315. s->max_ref_frames++;
  316. decode_qlogs(s);
  317. }
  318. if(!s->keyframe){
  319. if(get_rac(&s->c, s->header_state)){
  320. for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
  321. int htaps, i, sum=0;
  322. Plane *p= &s->plane[plane_index];
  323. p->diag_mc= get_rac(&s->c, s->header_state);
  324. htaps= get_symbol(&s->c, s->header_state, 0);
  325. if((unsigned)htaps >= HTAPS_MAX/2 - 1)
  326. return AVERROR_INVALIDDATA;
  327. htaps = htaps*2 + 2;
  328. p->htaps= htaps;
  329. for(i= htaps/2; i; i--){
  330. unsigned hcoeff = get_symbol(&s->c, s->header_state, 0);
  331. if (hcoeff > 127)
  332. return AVERROR_INVALIDDATA;
  333. p->hcoeff[i]= hcoeff * (1-2*(i&1));
  334. sum += p->hcoeff[i];
  335. }
  336. p->hcoeff[0]= 32-sum;
  337. }
  338. s->plane[2].diag_mc= s->plane[1].diag_mc;
  339. s->plane[2].htaps = s->plane[1].htaps;
  340. memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
  341. }
  342. if(get_rac(&s->c, s->header_state)){
  343. GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
  344. decode_qlogs(s);
  345. }
  346. }
  347. s->spatial_decomposition_type+= (unsigned)get_symbol(&s->c, s->header_state, 1);
  348. if(s->spatial_decomposition_type > 1U){
  349. av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
  350. return AVERROR_INVALIDDATA;
  351. }
  352. if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
  353. s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
  354. av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
  355. return AVERROR_INVALIDDATA;
  356. }
  357. if (s->avctx->width > 65536-4) {
  358. av_log(s->avctx, AV_LOG_ERROR, "Width %d is too large\n", s->avctx->width);
  359. return AVERROR_INVALIDDATA;
  360. }
  361. s->qlog += (unsigned)get_symbol(&s->c, s->header_state, 1);
  362. s->mv_scale += (unsigned)get_symbol(&s->c, s->header_state, 1);
  363. s->qbias += (unsigned)get_symbol(&s->c, s->header_state, 1);
  364. s->block_max_depth+= (unsigned)get_symbol(&s->c, s->header_state, 1);
  365. if(s->block_max_depth > 1 || s->block_max_depth < 0 || s->mv_scale > 256U){
  366. av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
  367. s->block_max_depth= 0;
  368. s->mv_scale = 0;
  369. return AVERROR_INVALIDDATA;
  370. }
  371. if (FFABS(s->qbias) > 127) {
  372. av_log(s->avctx, AV_LOG_ERROR, "qbias %d is too large\n", s->qbias);
  373. s->qbias = 0;
  374. return AVERROR_INVALIDDATA;
  375. }
  376. return 0;
  377. }
  378. static int decode_blocks(SnowContext *s){
  379. int x, y;
  380. int w= s->b_width;
  381. int h= s->b_height;
  382. int res;
  383. for(y=0; y<h; y++){
  384. for(x=0; x<w; x++){
  385. if (s->c.bytestream >= s->c.bytestream_end)
  386. return AVERROR_INVALIDDATA;
  387. if ((res = decode_q_branch(s, 0, x, y)) < 0)
  388. return res;
  389. }
  390. }
  391. return 0;
  392. }
  393. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  394. AVPacket *avpkt)
  395. {
  396. const uint8_t *buf = avpkt->data;
  397. int buf_size = avpkt->size;
  398. SnowContext *s = avctx->priv_data;
  399. RangeCoder * const c= &s->c;
  400. int bytes_read;
  401. AVFrame *picture = data;
  402. int level, orientation, plane_index;
  403. int res;
  404. ff_init_range_decoder(c, buf, buf_size);
  405. ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
  406. s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
  407. if ((res = decode_header(s)) < 0)
  408. return res;
  409. if ((res=ff_snow_common_init_after_header(avctx)) < 0)
  410. return res;
  411. // realloc slice buffer for the case that spatial_decomposition_count changed
  412. ff_slice_buffer_destroy(&s->sb);
  413. if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
  414. (MB_SIZE >> s->block_max_depth) +
  415. s->spatial_decomposition_count * 11 + 1,
  416. s->plane[0].width,
  417. s->spatial_idwt_buffer)) < 0)
  418. return res;
  419. for(plane_index=0; plane_index < s->nb_planes; plane_index++){
  420. Plane *p= &s->plane[plane_index];
  421. p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
  422. && p->hcoeff[1]==-10
  423. && p->hcoeff[2]==2;
  424. }
  425. ff_snow_alloc_blocks(s);
  426. if((res = ff_snow_frame_start(s)) < 0)
  427. return res;
  428. s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  429. //keyframe flag duplication mess FIXME
  430. if(avctx->debug&FF_DEBUG_PICT_INFO)
  431. av_log(avctx, AV_LOG_ERROR,
  432. "keyframe:%d qlog:%d qbias: %d mvscale: %d "
  433. "decomposition_type:%d decomposition_count:%d\n",
  434. s->keyframe, s->qlog, s->qbias, s->mv_scale,
  435. s->spatial_decomposition_type,
  436. s->spatial_decomposition_count
  437. );
  438. av_assert0(!s->avmv);
  439. if (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) {
  440. s->avmv = av_malloc_array(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2));
  441. }
  442. s->avmv_index = 0;
  443. if ((res = decode_blocks(s)) < 0)
  444. return res;
  445. for(plane_index=0; plane_index < s->nb_planes; plane_index++){
  446. Plane *p= &s->plane[plane_index];
  447. int w= p->width;
  448. int h= p->height;
  449. int x, y;
  450. int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
  451. if(s->avctx->debug&2048){
  452. memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
  453. predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
  454. for(y=0; y<h; y++){
  455. for(x=0; x<w; x++){
  456. int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
  457. s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
  458. }
  459. }
  460. }
  461. for(level=0; level<s->spatial_decomposition_count; level++){
  462. for(orientation=level ? 1 : 0; orientation<4; orientation++){
  463. SubBand *b= &p->band[level][orientation];
  464. unpack_coeffs(s, b, b->parent, orientation);
  465. }
  466. }
  467. {
  468. const int mb_h= s->b_height << s->block_max_depth;
  469. const int block_size = MB_SIZE >> s->block_max_depth;
  470. const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
  471. int mb_y;
  472. DWTCompose cs[MAX_DECOMPOSITIONS];
  473. int yd=0, yq=0;
  474. int y;
  475. int end_y;
  476. ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
  477. for(mb_y=0; mb_y<=mb_h; mb_y++){
  478. int slice_starty = block_h*mb_y;
  479. int slice_h = block_h*(mb_y+1);
  480. if (!(s->keyframe || s->avctx->debug&512)){
  481. slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
  482. slice_h -= (block_h >> 1);
  483. }
  484. for(level=0; level<s->spatial_decomposition_count; level++){
  485. for(orientation=level ? 1 : 0; orientation<4; orientation++){
  486. SubBand *b= &p->band[level][orientation];
  487. int start_y;
  488. int end_y;
  489. int our_mb_start = mb_y;
  490. int our_mb_end = (mb_y + 1);
  491. const int extra= 3;
  492. start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
  493. end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
  494. if (!(s->keyframe || s->avctx->debug&512)){
  495. start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
  496. end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
  497. }
  498. start_y = FFMIN(b->height, start_y);
  499. end_y = FFMIN(b->height, end_y);
  500. if (start_y != end_y){
  501. if (orientation == 0){
  502. SubBand * correlate_band = &p->band[0][0];
  503. int correlate_end_y = FFMIN(b->height, end_y + 1);
  504. int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
  505. decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
  506. correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
  507. dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
  508. }
  509. else
  510. decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
  511. }
  512. }
  513. }
  514. for(; yd<slice_h; yd+=4){
  515. ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
  516. }
  517. if(s->qlog == LOSSLESS_QLOG){
  518. for(; yq<slice_h && yq<h; yq++){
  519. IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
  520. for(x=0; x<w; x++){
  521. line[x] *= 1<<FRAC_BITS;
  522. }
  523. }
  524. }
  525. predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
  526. y = FFMIN(p->height, slice_starty);
  527. end_y = FFMIN(p->height, slice_h);
  528. while(y < end_y)
  529. ff_slice_buffer_release(&s->sb, y++);
  530. }
  531. ff_slice_buffer_flush(&s->sb);
  532. }
  533. }
  534. emms_c();
  535. ff_snow_release_buffer(avctx);
  536. if(!(s->avctx->debug&2048))
  537. res = av_frame_ref(picture, s->current_picture);
  538. else
  539. res = av_frame_ref(picture, s->mconly_picture);
  540. if (res >= 0 && s->avmv_index) {
  541. AVFrameSideData *sd;
  542. sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
  543. if (!sd)
  544. return AVERROR(ENOMEM);
  545. memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
  546. }
  547. av_freep(&s->avmv);
  548. if (res < 0)
  549. return res;
  550. *got_frame = 1;
  551. bytes_read= c->bytestream - c->bytestream_start;
  552. if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
  553. return bytes_read;
  554. }
  555. static av_cold int decode_end(AVCodecContext *avctx)
  556. {
  557. SnowContext *s = avctx->priv_data;
  558. ff_slice_buffer_destroy(&s->sb);
  559. ff_snow_common_end(s);
  560. return 0;
  561. }
  562. AVCodec ff_snow_decoder = {
  563. .name = "snow",
  564. .long_name = NULL_IF_CONFIG_SMALL("Snow"),
  565. .type = AVMEDIA_TYPE_VIDEO,
  566. .id = AV_CODEC_ID_SNOW,
  567. .priv_data_size = sizeof(SnowContext),
  568. .init = ff_snow_common_init,
  569. .close = decode_end,
  570. .decode = decode_frame,
  571. .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  572. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  573. FF_CODEC_CAP_INIT_CLEANUP,
  574. };