You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1182 lines
46KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. /*
  34. * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
  35. * but error concealment must support both h264 and h263 thus we must undo this
  36. */
  37. #undef mb_intra
  38. static void decode_mb(MpegEncContext *s, int ref){
  39. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  40. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  41. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  42. if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
  43. H264Context *h= (void*)s;
  44. h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
  45. memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
  46. assert(ref>=0);
  47. if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
  48. ref=0;
  49. fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
  50. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  51. fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
  52. assert(!FRAME_MBAFF);
  53. ff_h264_hl_decode_mb(h);
  54. }else{
  55. assert(ref==0);
  56. MPV_decode_mb(s, s->block);
  57. }
  58. }
  59. /**
  60. * @param stride the number of MVs to get to the next row
  61. * @param mv_step the number of MVs per row or column in a macroblock
  62. */
  63. static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
  64. if(s->codec_id == CODEC_ID_H264){
  65. H264Context *h= (void*)s;
  66. assert(s->quarter_sample);
  67. *mv_step= 4;
  68. *stride= h->b_stride;
  69. }else{
  70. *mv_step= 2;
  71. *stride= s->b8_stride;
  72. }
  73. }
  74. /**
  75. * Replace the current MB with a flat dc-only version.
  76. */
  77. static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
  78. {
  79. int dc, dcu, dcv, y, i;
  80. for(i=0; i<4; i++){
  81. dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
  82. if(dc<0) dc=0;
  83. else if(dc>2040) dc=2040;
  84. for(y=0; y<8; y++){
  85. int x;
  86. for(x=0; x<8; x++){
  87. dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
  88. }
  89. }
  90. }
  91. dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
  92. dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
  93. if (dcu<0 ) dcu=0;
  94. else if(dcu>2040) dcu=2040;
  95. if (dcv<0 ) dcv=0;
  96. else if(dcv>2040) dcv=2040;
  97. for(y=0; y<8; y++){
  98. int x;
  99. for(x=0; x<8; x++){
  100. dest_cb[x + y*(s->uvlinesize)]= dcu/8;
  101. dest_cr[x + y*(s->uvlinesize)]= dcv/8;
  102. }
  103. }
  104. }
  105. static void filter181(int16_t *data, int width, int height, int stride){
  106. int x,y;
  107. /* horizontal filter */
  108. for(y=1; y<height-1; y++){
  109. int prev_dc= data[0 + y*stride];
  110. for(x=1; x<width-1; x++){
  111. int dc;
  112. dc= - prev_dc
  113. + data[x + y*stride]*8
  114. - data[x + 1 + y*stride];
  115. dc= (dc*10923 + 32768)>>16;
  116. prev_dc= data[x + y*stride];
  117. data[x + y*stride]= dc;
  118. }
  119. }
  120. /* vertical filter */
  121. for(x=1; x<width-1; x++){
  122. int prev_dc= data[x];
  123. for(y=1; y<height-1; y++){
  124. int dc;
  125. dc= - prev_dc
  126. + data[x + y *stride]*8
  127. - data[x + (y+1)*stride];
  128. dc= (dc*10923 + 32768)>>16;
  129. prev_dc= data[x + y*stride];
  130. data[x + y*stride]= dc;
  131. }
  132. }
  133. }
  134. /**
  135. * guess the dc of blocks which do not have an undamaged dc
  136. * @param w width in 8 pixel blocks
  137. * @param h height in 8 pixel blocks
  138. */
  139. static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
  140. int b_x, b_y;
  141. int16_t (*col )[4] = av_malloc(stride*h*sizeof( int16_t)*4);
  142. uint16_t (*dist)[4] = av_malloc(stride*h*sizeof(uint16_t)*4);
  143. for(b_y=0; b_y<h; b_y++){
  144. int color= 1024;
  145. int distance= -1;
  146. for(b_x=0; b_x<w; b_x++){
  147. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  148. int error_j= s->error_status_table[mb_index_j];
  149. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  150. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  151. color= dc[b_x + b_y*stride];
  152. distance= b_x;
  153. }
  154. col [b_x + b_y*stride][1]= color;
  155. dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
  156. }
  157. color= 1024;
  158. distance= -1;
  159. for(b_x=w-1; b_x>=0; b_x--){
  160. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  161. int error_j= s->error_status_table[mb_index_j];
  162. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  163. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  164. color= dc[b_x + b_y*stride];
  165. distance= b_x;
  166. }
  167. col [b_x + b_y*stride][0]= color;
  168. dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
  169. }
  170. }
  171. for(b_x=0; b_x<w; b_x++){
  172. int color= 1024;
  173. int distance= -1;
  174. for(b_y=0; b_y<h; b_y++){
  175. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  176. int error_j= s->error_status_table[mb_index_j];
  177. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  178. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  179. color= dc[b_x + b_y*stride];
  180. distance= b_y;
  181. }
  182. col [b_x + b_y*stride][3]= color;
  183. dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
  184. }
  185. color= 1024;
  186. distance= -1;
  187. for(b_y=h-1; b_y>=0; b_y--){
  188. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  189. int error_j= s->error_status_table[mb_index_j];
  190. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  191. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  192. color= dc[b_x + b_y*stride];
  193. distance= b_y;
  194. }
  195. col [b_x + b_y*stride][2]= color;
  196. dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
  197. }
  198. }
  199. for(b_y=0; b_y<h; b_y++){
  200. for(b_x=0; b_x<w; b_x++){
  201. int mb_index, error, j;
  202. int64_t guess, weight_sum;
  203. mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  204. error= s->error_status_table[mb_index];
  205. if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
  206. if(!(error&ER_DC_ERROR)) continue; //dc-ok
  207. weight_sum=0;
  208. guess=0;
  209. for(j=0; j<4; j++){
  210. int64_t weight= 256*256*256*16/dist[b_x + b_y*stride][j];
  211. guess+= weight*(int64_t)col[b_x + b_y*stride][j];
  212. weight_sum+= weight;
  213. }
  214. guess= (guess + weight_sum/2) / weight_sum;
  215. dc[b_x + b_y*stride]= guess;
  216. }
  217. }
  218. av_freep(&col);
  219. av_freep(&dist);
  220. }
  221. /**
  222. * simple horizontal deblocking filter used for error resilience
  223. * @param w width in 8 pixel blocks
  224. * @param h height in 8 pixel blocks
  225. */
  226. static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
  227. int b_x, b_y, mvx_stride, mvy_stride;
  228. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  229. set_mv_strides(s, &mvx_stride, &mvy_stride);
  230. mvx_stride >>= is_luma;
  231. mvy_stride *= mvx_stride;
  232. for(b_y=0; b_y<h; b_y++){
  233. for(b_x=0; b_x<w-1; b_x++){
  234. int y;
  235. int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
  236. int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
  237. int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  238. int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  239. int left_damage = left_status&ER_MB_ERROR;
  240. int right_damage= right_status&ER_MB_ERROR;
  241. int offset= b_x*8 + b_y*stride*8;
  242. int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
  243. int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
  244. if(!(left_damage||right_damage)) continue; // both undamaged
  245. if( (!left_intra) && (!right_intra)
  246. && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
  247. for(y=0; y<8; y++){
  248. int a,b,c,d;
  249. a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
  250. b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
  251. c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
  252. d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
  253. d= FFMAX(d, 0);
  254. if(b<0) d= -d;
  255. if(d==0) continue;
  256. if(!(left_damage && right_damage))
  257. d= d*16/9;
  258. if(left_damage){
  259. dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
  260. dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
  261. dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
  262. dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
  263. }
  264. if(right_damage){
  265. dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
  266. dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
  267. dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
  268. dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
  269. }
  270. }
  271. }
  272. }
  273. }
  274. /**
  275. * simple vertical deblocking filter used for error resilience
  276. * @param w width in 8 pixel blocks
  277. * @param h height in 8 pixel blocks
  278. */
  279. static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
  280. int b_x, b_y, mvx_stride, mvy_stride;
  281. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  282. set_mv_strides(s, &mvx_stride, &mvy_stride);
  283. mvx_stride >>= is_luma;
  284. mvy_stride *= mvx_stride;
  285. for(b_y=0; b_y<h-1; b_y++){
  286. for(b_x=0; b_x<w; b_x++){
  287. int x;
  288. int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
  289. int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
  290. int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  291. int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  292. int top_damage = top_status&ER_MB_ERROR;
  293. int bottom_damage= bottom_status&ER_MB_ERROR;
  294. int offset= b_x*8 + b_y*stride*8;
  295. int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  296. int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  297. if(!(top_damage||bottom_damage)) continue; // both undamaged
  298. if( (!top_intra) && (!bottom_intra)
  299. && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
  300. for(x=0; x<8; x++){
  301. int a,b,c,d;
  302. a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
  303. b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
  304. c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
  305. d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
  306. d= FFMAX(d, 0);
  307. if(b<0) d= -d;
  308. if(d==0) continue;
  309. if(!(top_damage && bottom_damage))
  310. d= d*16/9;
  311. if(top_damage){
  312. dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
  313. dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
  314. dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
  315. dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
  316. }
  317. if(bottom_damage){
  318. dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
  319. dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
  320. dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
  321. dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
  322. }
  323. }
  324. }
  325. }
  326. }
  327. static void guess_mv(MpegEncContext *s){
  328. uint8_t *fixed = av_malloc(s->mb_stride * s->mb_height);
  329. #define MV_FROZEN 3
  330. #define MV_CHANGED 2
  331. #define MV_UNCHANGED 1
  332. const int mb_stride = s->mb_stride;
  333. const int mb_width = s->mb_width;
  334. const int mb_height= s->mb_height;
  335. int i, depth, num_avail;
  336. int mb_x, mb_y, mot_step, mot_stride;
  337. set_mv_strides(s, &mot_step, &mot_stride);
  338. num_avail=0;
  339. for(i=0; i<s->mb_num; i++){
  340. const int mb_xy= s->mb_index2xy[ i ];
  341. int f=0;
  342. int error= s->error_status_table[mb_xy];
  343. if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
  344. if(!(error&ER_MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
  345. fixed[mb_xy]= f;
  346. if(f==MV_FROZEN)
  347. num_avail++;
  348. else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
  349. const int mb_y= mb_xy / s->mb_stride;
  350. const int mb_x= mb_xy % s->mb_stride;
  351. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  352. s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
  353. s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
  354. s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
  355. }
  356. }
  357. if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
  358. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  359. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  360. const int mb_xy= mb_x + mb_y*s->mb_stride;
  361. if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
  362. if(!(s->error_status_table[mb_xy]&ER_MV_ERROR)) continue;
  363. s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
  364. s->mb_intra=0;
  365. s->mv_type = MV_TYPE_16X16;
  366. s->mb_skipped=0;
  367. s->dsp.clear_blocks(s->block[0]);
  368. s->mb_x= mb_x;
  369. s->mb_y= mb_y;
  370. s->mv[0][0][0]= 0;
  371. s->mv[0][0][1]= 0;
  372. decode_mb(s, 0);
  373. }
  374. }
  375. goto end;
  376. }
  377. for(depth=0;; depth++){
  378. int changed, pass, none_left;
  379. none_left=1;
  380. changed=1;
  381. for(pass=0; (changed || pass<2) && pass<10; pass++){
  382. int mb_x, mb_y;
  383. int score_sum=0;
  384. changed=0;
  385. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  386. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  387. const int mb_xy= mb_x + mb_y*s->mb_stride;
  388. int mv_predictor[8][2]={{0}};
  389. int ref[8]={0};
  390. int pred_count=0;
  391. int j;
  392. int best_score=256*256*256*64;
  393. int best_pred=0;
  394. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  395. int prev_x, prev_y, prev_ref;
  396. if((mb_x^mb_y^pass)&1) continue;
  397. if(fixed[mb_xy]==MV_FROZEN) continue;
  398. assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
  399. assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
  400. j=0;
  401. if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
  402. if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
  403. if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
  404. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
  405. if(j==0) continue;
  406. j=0;
  407. if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
  408. if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
  409. if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
  410. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
  411. if(j==0 && pass>1) continue;
  412. none_left=0;
  413. if(mb_x>0 && fixed[mb_xy-1]){
  414. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
  415. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
  416. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
  417. pred_count++;
  418. }
  419. if(mb_x+1<mb_width && fixed[mb_xy+1]){
  420. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
  421. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
  422. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
  423. pred_count++;
  424. }
  425. if(mb_y>0 && fixed[mb_xy-mb_stride]){
  426. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
  427. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
  428. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
  429. pred_count++;
  430. }
  431. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
  432. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
  433. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
  434. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
  435. pred_count++;
  436. }
  437. if(pred_count==0) continue;
  438. if(pred_count>1){
  439. int sum_x=0, sum_y=0, sum_r=0;
  440. int max_x, max_y, min_x, min_y, max_r, min_r;
  441. for(j=0; j<pred_count; j++){
  442. sum_x+= mv_predictor[j][0];
  443. sum_y+= mv_predictor[j][1];
  444. sum_r+= ref[j];
  445. if(j && ref[j] != ref[j-1])
  446. goto skip_mean_and_median;
  447. }
  448. /* mean */
  449. mv_predictor[pred_count][0] = sum_x/j;
  450. mv_predictor[pred_count][1] = sum_y/j;
  451. ref [pred_count] = sum_r/j;
  452. /* median */
  453. if(pred_count>=3){
  454. min_y= min_x= min_r= 99999;
  455. max_y= max_x= max_r=-99999;
  456. }else{
  457. min_x=min_y=max_x=max_y=min_r=max_r=0;
  458. }
  459. for(j=0; j<pred_count; j++){
  460. max_x= FFMAX(max_x, mv_predictor[j][0]);
  461. max_y= FFMAX(max_y, mv_predictor[j][1]);
  462. max_r= FFMAX(max_r, ref[j]);
  463. min_x= FFMIN(min_x, mv_predictor[j][0]);
  464. min_y= FFMIN(min_y, mv_predictor[j][1]);
  465. min_r= FFMIN(min_r, ref[j]);
  466. }
  467. mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
  468. mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
  469. ref [pred_count+1] = sum_r - max_r - min_r;
  470. if(pred_count==4){
  471. mv_predictor[pred_count+1][0] /= 2;
  472. mv_predictor[pred_count+1][1] /= 2;
  473. ref [pred_count+1] /= 2;
  474. }
  475. pred_count+=2;
  476. }
  477. skip_mean_and_median:
  478. /* zero MV */
  479. pred_count++;
  480. if (!fixed[mb_xy] && 0) {
  481. if (s->avctx->codec_id == CODEC_ID_H264) {
  482. // FIXME
  483. } else {
  484. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  485. mb_y, 0);
  486. }
  487. if (!s->last_picture.f.motion_val[0] ||
  488. !s->last_picture.f.ref_index[0])
  489. goto skip_last_mv;
  490. prev_x = s->last_picture.f.motion_val[0][mot_index][0];
  491. prev_y = s->last_picture.f.motion_val[0][mot_index][1];
  492. prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
  493. } else {
  494. prev_x = s->current_picture.f.motion_val[0][mot_index][0];
  495. prev_y = s->current_picture.f.motion_val[0][mot_index][1];
  496. prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
  497. }
  498. /* last MV */
  499. mv_predictor[pred_count][0]= prev_x;
  500. mv_predictor[pred_count][1]= prev_y;
  501. ref [pred_count] = prev_ref;
  502. pred_count++;
  503. skip_last_mv:
  504. s->mv_dir = MV_DIR_FORWARD;
  505. s->mb_intra=0;
  506. s->mv_type = MV_TYPE_16X16;
  507. s->mb_skipped=0;
  508. s->dsp.clear_blocks(s->block[0]);
  509. s->mb_x= mb_x;
  510. s->mb_y= mb_y;
  511. for(j=0; j<pred_count; j++){
  512. int score=0;
  513. uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
  514. s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
  515. s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
  516. if(ref[j]<0) //predictor intra or otherwise not available
  517. continue;
  518. decode_mb(s, ref[j]);
  519. if(mb_x>0 && fixed[mb_xy-1]){
  520. int k;
  521. for(k=0; k<16; k++)
  522. score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
  523. }
  524. if(mb_x+1<mb_width && fixed[mb_xy+1]){
  525. int k;
  526. for(k=0; k<16; k++)
  527. score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
  528. }
  529. if(mb_y>0 && fixed[mb_xy-mb_stride]){
  530. int k;
  531. for(k=0; k<16; k++)
  532. score += FFABS(src[k-s->linesize ]-src[k ]);
  533. }
  534. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
  535. int k;
  536. for(k=0; k<16; k++)
  537. score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
  538. }
  539. if(score <= best_score){ // <= will favor the last MV
  540. best_score= score;
  541. best_pred= j;
  542. }
  543. }
  544. score_sum+= best_score;
  545. s->mv[0][0][0]= mv_predictor[best_pred][0];
  546. s->mv[0][0][1]= mv_predictor[best_pred][1];
  547. for(i=0; i<mot_step; i++)
  548. for(j=0; j<mot_step; j++){
  549. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  550. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  551. }
  552. decode_mb(s, ref[best_pred]);
  553. if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
  554. fixed[mb_xy]=MV_CHANGED;
  555. changed++;
  556. }else
  557. fixed[mb_xy]=MV_UNCHANGED;
  558. }
  559. }
  560. // printf(".%d/%d", changed, score_sum); fflush(stdout);
  561. }
  562. if(none_left)
  563. goto end;
  564. for(i=0; i<s->mb_num; i++){
  565. int mb_xy= s->mb_index2xy[i];
  566. if(fixed[mb_xy])
  567. fixed[mb_xy]=MV_FROZEN;
  568. }
  569. // printf(":"); fflush(stdout);
  570. }
  571. end:
  572. av_free(fixed);
  573. }
  574. static int is_intra_more_likely(MpegEncContext *s){
  575. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  576. if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
  577. undamaged_count=0;
  578. for(i=0; i<s->mb_num; i++){
  579. const int mb_xy= s->mb_index2xy[i];
  580. const int error= s->error_status_table[mb_xy];
  581. if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
  582. undamaged_count++;
  583. }
  584. if(s->codec_id == CODEC_ID_H264){
  585. H264Context *h= (void*)s;
  586. if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
  587. return 1;
  588. }
  589. if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
  590. //prevent dsp.sad() check, that requires access to the image
  591. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
  592. return 1;
  593. skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
  594. is_intra_likely=0;
  595. j=0;
  596. for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
  597. for(mb_x= 0; mb_x<s->mb_width; mb_x++){
  598. int error;
  599. const int mb_xy= mb_x + mb_y*s->mb_stride;
  600. error= s->error_status_table[mb_xy];
  601. if((error&ER_DC_ERROR) && (error&ER_MV_ERROR))
  602. continue; //skip damaged
  603. j++;
  604. if((j%skip_amount) != 0) continue; //skip a few to speed things up
  605. if(s->pict_type==AV_PICTURE_TYPE_I){
  606. uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
  607. uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
  608. if (s->avctx->codec_id == CODEC_ID_H264) {
  609. // FIXME
  610. } else {
  611. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  612. mb_y, 0);
  613. }
  614. is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
  615. // FIXME need await_progress() here
  616. is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
  617. }else{
  618. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  619. is_intra_likely++;
  620. else
  621. is_intra_likely--;
  622. }
  623. }
  624. }
  625. //printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  626. return is_intra_likely > 0;
  627. }
  628. void ff_er_frame_start(MpegEncContext *s){
  629. if(!s->err_recognition) return;
  630. memset(s->error_status_table, ER_MB_ERROR|VP_START|ER_MB_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
  631. s->error_count= 3*s->mb_num;
  632. s->error_occurred = 0;
  633. }
  634. /**
  635. * Add a slice.
  636. * @param endx x component of the last macroblock, can be -1 for the last of the previous line
  637. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is assumed that no earlier end or
  638. * error of the same type occurred
  639. */
  640. void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
  641. const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
  642. const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
  643. const int start_xy= s->mb_index2xy[start_i];
  644. const int end_xy = s->mb_index2xy[end_i];
  645. int mask= -1;
  646. if(s->avctx->hwaccel)
  647. return;
  648. if(start_i > end_i || start_xy > end_xy){
  649. av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
  650. return;
  651. }
  652. if(!s->err_recognition) return;
  653. mask &= ~VP_START;
  654. if(status & (ER_AC_ERROR|ER_AC_END)){
  655. mask &= ~(ER_AC_ERROR|ER_AC_END);
  656. s->error_count -= end_i - start_i + 1;
  657. }
  658. if(status & (ER_DC_ERROR|ER_DC_END)){
  659. mask &= ~(ER_DC_ERROR|ER_DC_END);
  660. s->error_count -= end_i - start_i + 1;
  661. }
  662. if(status & (ER_MV_ERROR|ER_MV_END)){
  663. mask &= ~(ER_MV_ERROR|ER_MV_END);
  664. s->error_count -= end_i - start_i + 1;
  665. }
  666. if(status & ER_MB_ERROR) {
  667. s->error_occurred = 1;
  668. s->error_count= INT_MAX;
  669. }
  670. if(mask == ~0x7F){
  671. memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
  672. }else{
  673. int i;
  674. for(i=start_xy; i<end_xy; i++){
  675. s->error_status_table[ i ] &= mask;
  676. }
  677. }
  678. if(end_i == s->mb_num)
  679. s->error_count= INT_MAX;
  680. else{
  681. s->error_status_table[end_xy] &= mask;
  682. s->error_status_table[end_xy] |= status;
  683. }
  684. s->error_status_table[start_xy] |= VP_START;
  685. if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
  686. int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
  687. prev_status &= ~ VP_START;
  688. if(prev_status != (ER_MV_END|ER_DC_END|ER_AC_END)) s->error_count= INT_MAX;
  689. }
  690. }
  691. void ff_er_frame_end(MpegEncContext *s){
  692. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  693. int distance;
  694. int threshold_part[4]= {100,100,100};
  695. int threshold= 50;
  696. int is_intra_likely;
  697. int size = s->b8_stride * 2 * s->mb_height;
  698. Picture *pic= s->current_picture_ptr;
  699. if(!s->err_recognition || s->error_count==0 || s->avctx->lowres ||
  700. s->avctx->hwaccel ||
  701. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  702. s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
  703. s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
  704. if (s->current_picture.f.motion_val[0] == NULL) {
  705. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  706. for(i=0; i<2; i++){
  707. pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  708. pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
  709. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  710. }
  711. pic->f.motion_subsample_log2 = 3;
  712. s->current_picture= *s->current_picture_ptr;
  713. }
  714. if(s->avctx->debug&FF_DEBUG_ER){
  715. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  716. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  717. int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
  718. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  719. }
  720. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  721. }
  722. }
  723. #if 1
  724. /* handle overlapping slices */
  725. for(error_type=1; error_type<=3; error_type++){
  726. int end_ok=0;
  727. for(i=s->mb_num-1; i>=0; i--){
  728. const int mb_xy= s->mb_index2xy[i];
  729. int error= s->error_status_table[mb_xy];
  730. if(error&(1<<error_type))
  731. end_ok=1;
  732. if(error&(8<<error_type))
  733. end_ok=1;
  734. if(!end_ok)
  735. s->error_status_table[mb_xy]|= 1<<error_type;
  736. if(error&VP_START)
  737. end_ok=0;
  738. }
  739. }
  740. #endif
  741. #if 1
  742. /* handle slices with partitions of different length */
  743. if(s->partitioned_frame){
  744. int end_ok=0;
  745. for(i=s->mb_num-1; i>=0; i--){
  746. const int mb_xy= s->mb_index2xy[i];
  747. int error= s->error_status_table[mb_xy];
  748. if(error&ER_AC_END)
  749. end_ok=0;
  750. if((error&ER_MV_END) || (error&ER_DC_END) || (error&ER_AC_ERROR))
  751. end_ok=1;
  752. if(!end_ok)
  753. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  754. if(error&VP_START)
  755. end_ok=0;
  756. }
  757. }
  758. #endif
  759. /* handle missing slices */
  760. if(s->err_recognition&AV_EF_EXPLODE){
  761. int end_ok=1;
  762. for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
  763. const int mb_xy= s->mb_index2xy[i];
  764. int error1= s->error_status_table[mb_xy ];
  765. int error2= s->error_status_table[s->mb_index2xy[i+1]];
  766. if(error1&VP_START)
  767. end_ok=1;
  768. if( error2==(VP_START|ER_MB_ERROR|ER_MB_END)
  769. && error1!=(VP_START|ER_MB_ERROR|ER_MB_END)
  770. && ((error1&ER_AC_END) || (error1&ER_DC_END) || (error1&ER_MV_END))){ //end & uninit
  771. end_ok=0;
  772. }
  773. if(!end_ok)
  774. s->error_status_table[mb_xy]|= ER_MB_ERROR;
  775. }
  776. }
  777. #if 1
  778. /* backward mark errors */
  779. distance=9999999;
  780. for(error_type=1; error_type<=3; error_type++){
  781. for(i=s->mb_num-1; i>=0; i--){
  782. const int mb_xy= s->mb_index2xy[i];
  783. int error= s->error_status_table[mb_xy];
  784. if(!s->mbskip_table[mb_xy]) //FIXME partition specific
  785. distance++;
  786. if(error&(1<<error_type))
  787. distance= 0;
  788. if(s->partitioned_frame){
  789. if(distance < threshold_part[error_type-1])
  790. s->error_status_table[mb_xy]|= 1<<error_type;
  791. }else{
  792. if(distance < threshold)
  793. s->error_status_table[mb_xy]|= 1<<error_type;
  794. }
  795. if(error&VP_START)
  796. distance= 9999999;
  797. }
  798. }
  799. #endif
  800. /* forward mark errors */
  801. error=0;
  802. for(i=0; i<s->mb_num; i++){
  803. const int mb_xy= s->mb_index2xy[i];
  804. int old_error= s->error_status_table[mb_xy];
  805. if(old_error&VP_START)
  806. error= old_error& ER_MB_ERROR;
  807. else{
  808. error|= old_error& ER_MB_ERROR;
  809. s->error_status_table[mb_xy]|= error;
  810. }
  811. }
  812. #if 1
  813. /* handle not partitioned case */
  814. if(!s->partitioned_frame){
  815. for(i=0; i<s->mb_num; i++){
  816. const int mb_xy= s->mb_index2xy[i];
  817. error= s->error_status_table[mb_xy];
  818. if(error&ER_MB_ERROR)
  819. error|= ER_MB_ERROR;
  820. s->error_status_table[mb_xy]= error;
  821. }
  822. }
  823. #endif
  824. dc_error= ac_error= mv_error=0;
  825. for(i=0; i<s->mb_num; i++){
  826. const int mb_xy= s->mb_index2xy[i];
  827. error= s->error_status_table[mb_xy];
  828. if(error&ER_DC_ERROR) dc_error ++;
  829. if(error&ER_AC_ERROR) ac_error ++;
  830. if(error&ER_MV_ERROR) mv_error ++;
  831. }
  832. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
  833. is_intra_likely= is_intra_more_likely(s);
  834. /* set unknown mb-type to most likely */
  835. for(i=0; i<s->mb_num; i++){
  836. const int mb_xy= s->mb_index2xy[i];
  837. error= s->error_status_table[mb_xy];
  838. if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
  839. continue;
  840. if(is_intra_likely)
  841. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  842. else
  843. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  844. }
  845. // change inter to intra blocks if no reference frames are available
  846. if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
  847. for(i=0; i<s->mb_num; i++){
  848. const int mb_xy= s->mb_index2xy[i];
  849. if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  850. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  851. }
  852. /* handle inter blocks with damaged AC */
  853. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  854. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  855. const int mb_xy= mb_x + mb_y * s->mb_stride;
  856. const int mb_type= s->current_picture.f.mb_type[mb_xy];
  857. int dir = !s->last_picture.f.data[0];
  858. error= s->error_status_table[mb_xy];
  859. if(IS_INTRA(mb_type)) continue; //intra
  860. if(error&ER_MV_ERROR) continue; //inter with damaged MV
  861. if(!(error&ER_AC_ERROR)) continue; //undamaged inter
  862. s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  863. s->mb_intra=0;
  864. s->mb_skipped=0;
  865. if(IS_8X8(mb_type)){
  866. int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
  867. int j;
  868. s->mv_type = MV_TYPE_8X8;
  869. for(j=0; j<4; j++){
  870. s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  871. s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  872. }
  873. }else{
  874. s->mv_type = MV_TYPE_16X16;
  875. s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
  876. s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
  877. }
  878. s->dsp.clear_blocks(s->block[0]);
  879. s->mb_x= mb_x;
  880. s->mb_y= mb_y;
  881. decode_mb(s, 0/*FIXME h264 partitioned slices need this set*/);
  882. }
  883. }
  884. /* guess MVs */
  885. if(s->pict_type==AV_PICTURE_TYPE_B){
  886. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  887. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  888. int xy= mb_x*2 + mb_y*2*s->b8_stride;
  889. const int mb_xy= mb_x + mb_y * s->mb_stride;
  890. const int mb_type= s->current_picture.f.mb_type[mb_xy];
  891. error= s->error_status_table[mb_xy];
  892. if(IS_INTRA(mb_type)) continue;
  893. if(!(error&ER_MV_ERROR)) continue; //inter with undamaged MV
  894. if(!(error&ER_AC_ERROR)) continue; //undamaged inter
  895. s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
  896. if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
  897. if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
  898. s->mb_intra=0;
  899. s->mv_type = MV_TYPE_16X16;
  900. s->mb_skipped=0;
  901. if(s->pp_time){
  902. int time_pp= s->pp_time;
  903. int time_pb= s->pb_time;
  904. if (s->avctx->codec_id == CODEC_ID_H264) {
  905. //FIXME
  906. } else {
  907. ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
  908. mb_y, 0);
  909. }
  910. s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
  911. s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
  912. s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  913. s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  914. }else{
  915. s->mv[0][0][0]= 0;
  916. s->mv[0][0][1]= 0;
  917. s->mv[1][0][0]= 0;
  918. s->mv[1][0][1]= 0;
  919. }
  920. s->dsp.clear_blocks(s->block[0]);
  921. s->mb_x= mb_x;
  922. s->mb_y= mb_y;
  923. decode_mb(s, 0);
  924. }
  925. }
  926. }else
  927. guess_mv(s);
  928. /* the filters below are not XvMC compatible, skip them */
  929. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  930. goto ec_clean;
  931. /* fill DC for inter blocks */
  932. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  933. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  934. int dc, dcu, dcv, y, n;
  935. int16_t *dc_ptr;
  936. uint8_t *dest_y, *dest_cb, *dest_cr;
  937. const int mb_xy= mb_x + mb_y * s->mb_stride;
  938. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  939. error= s->error_status_table[mb_xy];
  940. if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
  941. // if(error&ER_MV_ERROR) continue; //inter data damaged FIXME is this good?
  942. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  943. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  944. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  945. dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
  946. for(n=0; n<4; n++){
  947. dc=0;
  948. for(y=0; y<8; y++){
  949. int x;
  950. for(x=0; x<8; x++){
  951. dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
  952. }
  953. }
  954. dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
  955. }
  956. dcu=dcv=0;
  957. for(y=0; y<8; y++){
  958. int x;
  959. for(x=0; x<8; x++){
  960. dcu+=dest_cb[x + y*(s->uvlinesize)];
  961. dcv+=dest_cr[x + y*(s->uvlinesize)];
  962. }
  963. }
  964. s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
  965. s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
  966. }
  967. }
  968. #if 1
  969. /* guess DC for damaged blocks */
  970. guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
  971. guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
  972. guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
  973. #endif
  974. /* filter luma DC */
  975. filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
  976. #if 1
  977. /* render DC only intra */
  978. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  979. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  980. uint8_t *dest_y, *dest_cb, *dest_cr;
  981. const int mb_xy= mb_x + mb_y * s->mb_stride;
  982. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  983. error= s->error_status_table[mb_xy];
  984. if(IS_INTER(mb_type)) continue;
  985. if(!(error&ER_AC_ERROR)) continue; //undamaged
  986. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  987. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  988. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  989. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  990. }
  991. }
  992. #endif
  993. if(s->avctx->error_concealment&FF_EC_DEBLOCK){
  994. /* filter horizontal block boundaries */
  995. h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
  996. h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
  997. h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
  998. /* filter vertical block boundaries */
  999. v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
  1000. v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
  1001. v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
  1002. }
  1003. ec_clean:
  1004. /* clean a few tables */
  1005. for(i=0; i<s->mb_num; i++){
  1006. const int mb_xy= s->mb_index2xy[i];
  1007. int error= s->error_status_table[mb_xy];
  1008. if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(ER_DC_ERROR|ER_MV_ERROR|ER_AC_ERROR))){
  1009. s->mbskip_table[mb_xy]=0;
  1010. }
  1011. s->mbintra_table[mb_xy]=1;
  1012. }
  1013. }