You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1156 lines
45KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. /*
  34. * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
  35. * but error concealment must support both h264 and h263 thus we must undo this
  36. */
  37. #undef mb_intra
  38. static void decode_mb(MpegEncContext *s, int ref){
  39. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  40. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  41. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  42. if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
  43. H264Context *h= (void*)s;
  44. h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
  45. memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
  46. assert(ref>=0);
  47. /* FIXME: It is posible albeit uncommon that slice references
  48. * differ between slices. We take the easy approach and ignore
  49. * it for now. If this turns out to have any relevance in
  50. * practice then correct remapping should be added. */
  51. if (ref >= h->ref_count[0])
  52. ref=0;
  53. fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
  54. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  55. fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
  56. assert(!FRAME_MBAFF);
  57. ff_h264_hl_decode_mb(h);
  58. }else{
  59. assert(ref==0);
  60. MPV_decode_mb(s, s->block);
  61. }
  62. }
  63. /**
  64. * @param stride the number of MVs to get to the next row
  65. * @param mv_step the number of MVs per row or column in a macroblock
  66. */
  67. static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
  68. if(s->codec_id == CODEC_ID_H264){
  69. H264Context *h= (void*)s;
  70. assert(s->quarter_sample);
  71. *mv_step= 4;
  72. *stride= h->b_stride;
  73. }else{
  74. *mv_step= 2;
  75. *stride= s->b8_stride;
  76. }
  77. }
  78. /**
  79. * Replace the current MB with a flat dc-only version.
  80. */
  81. static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
  82. {
  83. int dc, dcu, dcv, y, i;
  84. for(i=0; i<4; i++){
  85. dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
  86. if(dc<0) dc=0;
  87. else if(dc>2040) dc=2040;
  88. for(y=0; y<8; y++){
  89. int x;
  90. for(x=0; x<8; x++){
  91. dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
  92. }
  93. }
  94. }
  95. dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
  96. dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
  97. if (dcu<0 ) dcu=0;
  98. else if(dcu>2040) dcu=2040;
  99. if (dcv<0 ) dcv=0;
  100. else if(dcv>2040) dcv=2040;
  101. for(y=0; y<8; y++){
  102. int x;
  103. for(x=0; x<8; x++){
  104. dest_cb[x + y * s->uvlinesize] = dcu / 8;
  105. dest_cr[x + y * s->uvlinesize] = dcv / 8;
  106. }
  107. }
  108. }
  109. static void filter181(int16_t *data, int width, int height, int stride){
  110. int x,y;
  111. /* horizontal filter */
  112. for(y=1; y<height-1; y++){
  113. int prev_dc= data[0 + y*stride];
  114. for(x=1; x<width-1; x++){
  115. int dc;
  116. dc= - prev_dc
  117. + data[x + y*stride]*8
  118. - data[x + 1 + y*stride];
  119. dc= (dc*10923 + 32768)>>16;
  120. prev_dc= data[x + y*stride];
  121. data[x + y*stride]= dc;
  122. }
  123. }
  124. /* vertical filter */
  125. for(x=1; x<width-1; x++){
  126. int prev_dc= data[x];
  127. for(y=1; y<height-1; y++){
  128. int dc;
  129. dc= - prev_dc
  130. + data[x + y *stride]*8
  131. - data[x + (y+1)*stride];
  132. dc= (dc*10923 + 32768)>>16;
  133. prev_dc= data[x + y*stride];
  134. data[x + y*stride]= dc;
  135. }
  136. }
  137. }
  138. /**
  139. * guess the dc of blocks which do not have an undamaged dc
  140. * @param w width in 8 pixel blocks
  141. * @param h height in 8 pixel blocks
  142. */
  143. static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
  144. int b_x, b_y;
  145. for(b_y=0; b_y<h; b_y++){
  146. for(b_x=0; b_x<w; b_x++){
  147. int color[4]={1024,1024,1024,1024};
  148. int distance[4]={9999,9999,9999,9999};
  149. int mb_index, error, j;
  150. int64_t guess, weight_sum;
  151. mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  152. error= s->error_status_table[mb_index];
  153. if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
  154. if(!(error&ER_DC_ERROR)) continue; //dc-ok
  155. /* right block */
  156. for(j=b_x+1; j<w; j++){
  157. int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  158. int error_j= s->error_status_table[mb_index_j];
  159. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  160. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  161. color[0]= dc[j + b_y*stride];
  162. distance[0]= j-b_x;
  163. break;
  164. }
  165. }
  166. /* left block */
  167. for(j=b_x-1; j>=0; j--){
  168. int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  169. int error_j= s->error_status_table[mb_index_j];
  170. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  171. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  172. color[1]= dc[j + b_y*stride];
  173. distance[1]= b_x-j;
  174. break;
  175. }
  176. }
  177. /* bottom block */
  178. for(j=b_y+1; j<h; j++){
  179. int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
  180. int error_j= s->error_status_table[mb_index_j];
  181. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  182. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  183. color[2]= dc[b_x + j*stride];
  184. distance[2]= j-b_y;
  185. break;
  186. }
  187. }
  188. /* top block */
  189. for(j=b_y-1; j>=0; j--){
  190. int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
  191. int error_j= s->error_status_table[mb_index_j];
  192. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  193. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  194. color[3]= dc[b_x + j*stride];
  195. distance[3]= b_y-j;
  196. break;
  197. }
  198. }
  199. weight_sum=0;
  200. guess=0;
  201. for(j=0; j<4; j++){
  202. int64_t weight= 256*256*256*16/distance[j];
  203. guess+= weight*(int64_t)color[j];
  204. weight_sum+= weight;
  205. }
  206. guess= (guess + weight_sum/2) / weight_sum;
  207. dc[b_x + b_y*stride]= guess;
  208. }
  209. }
  210. }
  211. /**
  212. * simple horizontal deblocking filter used for error resilience
  213. * @param w width in 8 pixel blocks
  214. * @param h height in 8 pixel blocks
  215. */
  216. static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
  217. int b_x, b_y, mvx_stride, mvy_stride;
  218. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  219. set_mv_strides(s, &mvx_stride, &mvy_stride);
  220. mvx_stride >>= is_luma;
  221. mvy_stride *= mvx_stride;
  222. for(b_y=0; b_y<h; b_y++){
  223. for(b_x=0; b_x<w-1; b_x++){
  224. int y;
  225. int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
  226. int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
  227. int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  228. int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  229. int left_damage = left_status&ER_MB_ERROR;
  230. int right_damage= right_status&ER_MB_ERROR;
  231. int offset= b_x*8 + b_y*stride*8;
  232. int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
  233. int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
  234. if(!(left_damage||right_damage)) continue; // both undamaged
  235. if( (!left_intra) && (!right_intra)
  236. && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
  237. for(y=0; y<8; y++){
  238. int a,b,c,d;
  239. a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
  240. b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
  241. c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
  242. d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
  243. d= FFMAX(d, 0);
  244. if(b<0) d= -d;
  245. if(d==0) continue;
  246. if(!(left_damage && right_damage))
  247. d= d*16/9;
  248. if(left_damage){
  249. dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
  250. dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
  251. dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
  252. dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
  253. }
  254. if(right_damage){
  255. dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
  256. dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
  257. dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
  258. dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
  259. }
  260. }
  261. }
  262. }
  263. }
  264. /**
  265. * simple vertical deblocking filter used for error resilience
  266. * @param w width in 8 pixel blocks
  267. * @param h height in 8 pixel blocks
  268. */
  269. static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
  270. int b_x, b_y, mvx_stride, mvy_stride;
  271. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  272. set_mv_strides(s, &mvx_stride, &mvy_stride);
  273. mvx_stride >>= is_luma;
  274. mvy_stride *= mvx_stride;
  275. for(b_y=0; b_y<h-1; b_y++){
  276. for(b_x=0; b_x<w; b_x++){
  277. int x;
  278. int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
  279. int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
  280. int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  281. int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  282. int top_damage = top_status&ER_MB_ERROR;
  283. int bottom_damage= bottom_status&ER_MB_ERROR;
  284. int offset= b_x*8 + b_y*stride*8;
  285. int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  286. int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  287. if(!(top_damage||bottom_damage)) continue; // both undamaged
  288. if( (!top_intra) && (!bottom_intra)
  289. && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
  290. for(x=0; x<8; x++){
  291. int a,b,c,d;
  292. a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
  293. b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
  294. c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
  295. d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
  296. d= FFMAX(d, 0);
  297. if(b<0) d= -d;
  298. if(d==0) continue;
  299. if(!(top_damage && bottom_damage))
  300. d= d*16/9;
  301. if(top_damage){
  302. dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
  303. dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
  304. dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
  305. dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
  306. }
  307. if(bottom_damage){
  308. dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
  309. dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
  310. dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
  311. dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
  312. }
  313. }
  314. }
  315. }
  316. }
  317. static void guess_mv(MpegEncContext *s){
  318. uint8_t fixed[s->mb_stride * s->mb_height];
  319. #define MV_FROZEN 3
  320. #define MV_CHANGED 2
  321. #define MV_UNCHANGED 1
  322. const int mb_stride = s->mb_stride;
  323. const int mb_width = s->mb_width;
  324. const int mb_height= s->mb_height;
  325. int i, depth, num_avail;
  326. int mb_x, mb_y, mot_step, mot_stride;
  327. set_mv_strides(s, &mot_step, &mot_stride);
  328. num_avail=0;
  329. for(i=0; i<s->mb_num; i++){
  330. const int mb_xy= s->mb_index2xy[ i ];
  331. int f=0;
  332. int error= s->error_status_table[mb_xy];
  333. if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
  334. if(!(error&ER_MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
  335. fixed[mb_xy]= f;
  336. if(f==MV_FROZEN)
  337. num_avail++;
  338. }
  339. if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
  340. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  341. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  342. const int mb_xy= mb_x + mb_y*s->mb_stride;
  343. if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
  344. if(!(s->error_status_table[mb_xy]&ER_MV_ERROR)) continue;
  345. s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
  346. s->mb_intra=0;
  347. s->mv_type = MV_TYPE_16X16;
  348. s->mb_skipped=0;
  349. s->dsp.clear_blocks(s->block[0]);
  350. s->mb_x= mb_x;
  351. s->mb_y= mb_y;
  352. s->mv[0][0][0]= 0;
  353. s->mv[0][0][1]= 0;
  354. decode_mb(s, 0);
  355. }
  356. }
  357. return;
  358. }
  359. for(depth=0;; depth++){
  360. int changed, pass, none_left;
  361. none_left=1;
  362. changed=1;
  363. for(pass=0; (changed || pass<2) && pass<10; pass++){
  364. int mb_x, mb_y;
  365. int score_sum=0;
  366. changed=0;
  367. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  368. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  369. const int mb_xy= mb_x + mb_y*s->mb_stride;
  370. int mv_predictor[8][2]={{0}};
  371. int ref[8]={0};
  372. int pred_count=0;
  373. int j;
  374. int best_score=256*256*256*64;
  375. int best_pred=0;
  376. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  377. int prev_x, prev_y, prev_ref;
  378. if((mb_x^mb_y^pass)&1) continue;
  379. if(fixed[mb_xy]==MV_FROZEN) continue;
  380. assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
  381. assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
  382. j=0;
  383. if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
  384. if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
  385. if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
  386. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
  387. if(j==0) continue;
  388. j=0;
  389. if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
  390. if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
  391. if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
  392. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
  393. if(j==0 && pass>1) continue;
  394. none_left=0;
  395. if(mb_x>0 && fixed[mb_xy-1]){
  396. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
  397. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
  398. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
  399. pred_count++;
  400. }
  401. if(mb_x+1<mb_width && fixed[mb_xy+1]){
  402. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
  403. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
  404. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
  405. pred_count++;
  406. }
  407. if(mb_y>0 && fixed[mb_xy-mb_stride]){
  408. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
  409. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
  410. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
  411. pred_count++;
  412. }
  413. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
  414. mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
  415. mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
  416. ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
  417. pred_count++;
  418. }
  419. if(pred_count==0) continue;
  420. if(pred_count>1){
  421. int sum_x=0, sum_y=0, sum_r=0;
  422. int max_x, max_y, min_x, min_y, max_r, min_r;
  423. for(j=0; j<pred_count; j++){
  424. sum_x+= mv_predictor[j][0];
  425. sum_y+= mv_predictor[j][1];
  426. sum_r+= ref[j];
  427. if(j && ref[j] != ref[j-1])
  428. goto skip_mean_and_median;
  429. }
  430. /* mean */
  431. mv_predictor[pred_count][0] = sum_x/j;
  432. mv_predictor[pred_count][1] = sum_y/j;
  433. ref [pred_count] = sum_r/j;
  434. /* median */
  435. if(pred_count>=3){
  436. min_y= min_x= min_r= 99999;
  437. max_y= max_x= max_r=-99999;
  438. }else{
  439. min_x=min_y=max_x=max_y=min_r=max_r=0;
  440. }
  441. for(j=0; j<pred_count; j++){
  442. max_x= FFMAX(max_x, mv_predictor[j][0]);
  443. max_y= FFMAX(max_y, mv_predictor[j][1]);
  444. max_r= FFMAX(max_r, ref[j]);
  445. min_x= FFMIN(min_x, mv_predictor[j][0]);
  446. min_y= FFMIN(min_y, mv_predictor[j][1]);
  447. min_r= FFMIN(min_r, ref[j]);
  448. }
  449. mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
  450. mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
  451. ref [pred_count+1] = sum_r - max_r - min_r;
  452. if(pred_count==4){
  453. mv_predictor[pred_count+1][0] /= 2;
  454. mv_predictor[pred_count+1][1] /= 2;
  455. ref [pred_count+1] /= 2;
  456. }
  457. pred_count+=2;
  458. }
  459. skip_mean_and_median:
  460. /* zero MV */
  461. pred_count++;
  462. if (!fixed[mb_xy]) {
  463. if (s->avctx->codec_id == CODEC_ID_H264) {
  464. // FIXME
  465. } else {
  466. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  467. mb_y, 0);
  468. }
  469. if (!s->last_picture.f.motion_val[0] ||
  470. !s->last_picture.f.ref_index[0])
  471. goto skip_last_mv;
  472. prev_x = s->last_picture.f.motion_val[0][mot_index][0];
  473. prev_y = s->last_picture.f.motion_val[0][mot_index][1];
  474. prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
  475. } else {
  476. prev_x = s->current_picture.f.motion_val[0][mot_index][0];
  477. prev_y = s->current_picture.f.motion_val[0][mot_index][1];
  478. prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
  479. }
  480. /* last MV */
  481. mv_predictor[pred_count][0]= prev_x;
  482. mv_predictor[pred_count][1]= prev_y;
  483. ref [pred_count] = prev_ref;
  484. pred_count++;
  485. skip_last_mv:
  486. s->mv_dir = MV_DIR_FORWARD;
  487. s->mb_intra=0;
  488. s->mv_type = MV_TYPE_16X16;
  489. s->mb_skipped=0;
  490. s->dsp.clear_blocks(s->block[0]);
  491. s->mb_x= mb_x;
  492. s->mb_y= mb_y;
  493. for(j=0; j<pred_count; j++){
  494. int score=0;
  495. uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
  496. s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
  497. s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
  498. if(ref[j]<0) //predictor intra or otherwise not available
  499. continue;
  500. decode_mb(s, ref[j]);
  501. if(mb_x>0 && fixed[mb_xy-1]){
  502. int k;
  503. for(k=0; k<16; k++)
  504. score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
  505. }
  506. if(mb_x+1<mb_width && fixed[mb_xy+1]){
  507. int k;
  508. for(k=0; k<16; k++)
  509. score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
  510. }
  511. if(mb_y>0 && fixed[mb_xy-mb_stride]){
  512. int k;
  513. for(k=0; k<16; k++)
  514. score += FFABS(src[k-s->linesize ]-src[k ]);
  515. }
  516. if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
  517. int k;
  518. for(k=0; k<16; k++)
  519. score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
  520. }
  521. if(score <= best_score){ // <= will favor the last MV
  522. best_score= score;
  523. best_pred= j;
  524. }
  525. }
  526. score_sum+= best_score;
  527. s->mv[0][0][0]= mv_predictor[best_pred][0];
  528. s->mv[0][0][1]= mv_predictor[best_pred][1];
  529. for(i=0; i<mot_step; i++)
  530. for(j=0; j<mot_step; j++){
  531. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  532. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  533. }
  534. decode_mb(s, ref[best_pred]);
  535. if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
  536. fixed[mb_xy]=MV_CHANGED;
  537. changed++;
  538. }else
  539. fixed[mb_xy]=MV_UNCHANGED;
  540. }
  541. }
  542. // printf(".%d/%d", changed, score_sum); fflush(stdout);
  543. }
  544. if(none_left)
  545. return;
  546. for(i=0; i<s->mb_num; i++){
  547. int mb_xy= s->mb_index2xy[i];
  548. if(fixed[mb_xy])
  549. fixed[mb_xy]=MV_FROZEN;
  550. }
  551. // printf(":"); fflush(stdout);
  552. }
  553. }
  554. static int is_intra_more_likely(MpegEncContext *s){
  555. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  556. if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
  557. undamaged_count=0;
  558. for(i=0; i<s->mb_num; i++){
  559. const int mb_xy= s->mb_index2xy[i];
  560. const int error= s->error_status_table[mb_xy];
  561. if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
  562. undamaged_count++;
  563. }
  564. if(s->codec_id == CODEC_ID_H264){
  565. H264Context *h= (void*)s;
  566. if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
  567. return 1;
  568. }
  569. if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
  570. //prevent dsp.sad() check, that requires access to the image
  571. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
  572. return 1;
  573. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  574. is_intra_likely=0;
  575. j=0;
  576. for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
  577. for(mb_x= 0; mb_x<s->mb_width; mb_x++){
  578. int error;
  579. const int mb_xy= mb_x + mb_y*s->mb_stride;
  580. error= s->error_status_table[mb_xy];
  581. if((error&ER_DC_ERROR) && (error&ER_MV_ERROR))
  582. continue; //skip damaged
  583. j++;
  584. if((j%skip_amount) != 0) continue; //skip a few to speed things up
  585. if(s->pict_type==AV_PICTURE_TYPE_I){
  586. uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
  587. uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
  588. if (s->avctx->codec_id == CODEC_ID_H264) {
  589. // FIXME
  590. } else {
  591. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  592. mb_y, 0);
  593. }
  594. is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
  595. is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
  596. }else{
  597. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  598. is_intra_likely++;
  599. else
  600. is_intra_likely--;
  601. }
  602. }
  603. }
  604. //printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  605. return is_intra_likely > 0;
  606. }
  607. void ff_er_frame_start(MpegEncContext *s){
  608. if(!s->err_recognition) return;
  609. memset(s->error_status_table, ER_MB_ERROR|VP_START|ER_MB_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
  610. s->error_count= 3*s->mb_num;
  611. s->error_occurred = 0;
  612. }
  613. /**
  614. * Add a slice.
  615. * @param endx x component of the last macroblock, can be -1 for the last of the previous line
  616. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is assumed that no earlier end or
  617. * error of the same type occurred
  618. */
  619. void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
  620. const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
  621. const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
  622. const int start_xy= s->mb_index2xy[start_i];
  623. const int end_xy = s->mb_index2xy[end_i];
  624. int mask= -1;
  625. if(s->avctx->hwaccel)
  626. return;
  627. if(start_i > end_i || start_xy > end_xy){
  628. av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
  629. return;
  630. }
  631. if(!s->err_recognition) return;
  632. mask &= ~VP_START;
  633. if(status & (ER_AC_ERROR|ER_AC_END)){
  634. mask &= ~(ER_AC_ERROR|ER_AC_END);
  635. s->error_count -= end_i - start_i + 1;
  636. }
  637. if(status & (ER_DC_ERROR|ER_DC_END)){
  638. mask &= ~(ER_DC_ERROR|ER_DC_END);
  639. s->error_count -= end_i - start_i + 1;
  640. }
  641. if(status & (ER_MV_ERROR|ER_MV_END)){
  642. mask &= ~(ER_MV_ERROR|ER_MV_END);
  643. s->error_count -= end_i - start_i + 1;
  644. }
  645. if(status & ER_MB_ERROR) {
  646. s->error_occurred = 1;
  647. s->error_count= INT_MAX;
  648. }
  649. if(mask == ~0x7F){
  650. memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
  651. }else{
  652. int i;
  653. for(i=start_xy; i<end_xy; i++){
  654. s->error_status_table[ i ] &= mask;
  655. }
  656. }
  657. if(end_i == s->mb_num)
  658. s->error_count= INT_MAX;
  659. else{
  660. s->error_status_table[end_xy] &= mask;
  661. s->error_status_table[end_xy] |= status;
  662. }
  663. s->error_status_table[start_xy] |= VP_START;
  664. if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
  665. int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
  666. prev_status &= ~ VP_START;
  667. if(prev_status != (ER_MV_END|ER_DC_END|ER_AC_END)) s->error_count= INT_MAX;
  668. }
  669. }
  670. void ff_er_frame_end(MpegEncContext *s){
  671. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  672. int distance;
  673. int threshold_part[4]= {100,100,100};
  674. int threshold= 50;
  675. int is_intra_likely;
  676. int size = s->b8_stride * 2 * s->mb_height;
  677. Picture *pic= s->current_picture_ptr;
  678. if(!s->err_recognition || s->error_count==0 || s->avctx->lowres ||
  679. s->avctx->hwaccel ||
  680. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  681. s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
  682. s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
  683. if (s->current_picture.f.motion_val[0] == NULL) {
  684. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  685. for(i=0; i<2; i++){
  686. pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  687. pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
  688. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  689. }
  690. pic->f.motion_subsample_log2 = 3;
  691. s->current_picture= *s->current_picture_ptr;
  692. }
  693. if(s->avctx->debug&FF_DEBUG_ER){
  694. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  695. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  696. int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
  697. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  698. }
  699. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  700. }
  701. }
  702. /* handle overlapping slices */
  703. for(error_type=1; error_type<=3; error_type++){
  704. int end_ok=0;
  705. for(i=s->mb_num-1; i>=0; i--){
  706. const int mb_xy= s->mb_index2xy[i];
  707. int error= s->error_status_table[mb_xy];
  708. if(error&(1<<error_type))
  709. end_ok=1;
  710. if(error&(8<<error_type))
  711. end_ok=1;
  712. if(!end_ok)
  713. s->error_status_table[mb_xy]|= 1<<error_type;
  714. if(error&VP_START)
  715. end_ok=0;
  716. }
  717. }
  718. /* handle slices with partitions of different length */
  719. if(s->partitioned_frame){
  720. int end_ok=0;
  721. for(i=s->mb_num-1; i>=0; i--){
  722. const int mb_xy= s->mb_index2xy[i];
  723. int error= s->error_status_table[mb_xy];
  724. if(error&ER_AC_END)
  725. end_ok=0;
  726. if((error&ER_MV_END) || (error&ER_DC_END) || (error&ER_AC_ERROR))
  727. end_ok=1;
  728. if(!end_ok)
  729. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  730. if(error&VP_START)
  731. end_ok=0;
  732. }
  733. }
  734. /* handle missing slices */
  735. if(s->err_recognition&AV_EF_EXPLODE){
  736. int end_ok=1;
  737. for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
  738. const int mb_xy= s->mb_index2xy[i];
  739. int error1= s->error_status_table[mb_xy ];
  740. int error2= s->error_status_table[s->mb_index2xy[i+1]];
  741. if(error1&VP_START)
  742. end_ok=1;
  743. if( error2==(VP_START|ER_MB_ERROR|ER_MB_END)
  744. && error1!=(VP_START|ER_MB_ERROR|ER_MB_END)
  745. && ((error1&ER_AC_END) || (error1&ER_DC_END) || (error1&ER_MV_END))){ //end & uninit
  746. end_ok=0;
  747. }
  748. if(!end_ok)
  749. s->error_status_table[mb_xy]|= ER_MB_ERROR;
  750. }
  751. }
  752. /* backward mark errors */
  753. distance=9999999;
  754. for(error_type=1; error_type<=3; error_type++){
  755. for(i=s->mb_num-1; i>=0; i--){
  756. const int mb_xy= s->mb_index2xy[i];
  757. int error= s->error_status_table[mb_xy];
  758. if(!s->mbskip_table[mb_xy]) //FIXME partition specific
  759. distance++;
  760. if(error&(1<<error_type))
  761. distance= 0;
  762. if(s->partitioned_frame){
  763. if(distance < threshold_part[error_type-1])
  764. s->error_status_table[mb_xy]|= 1<<error_type;
  765. }else{
  766. if(distance < threshold)
  767. s->error_status_table[mb_xy]|= 1<<error_type;
  768. }
  769. if(error&VP_START)
  770. distance= 9999999;
  771. }
  772. }
  773. /* forward mark errors */
  774. error=0;
  775. for(i=0; i<s->mb_num; i++){
  776. const int mb_xy= s->mb_index2xy[i];
  777. int old_error= s->error_status_table[mb_xy];
  778. if(old_error&VP_START)
  779. error= old_error& ER_MB_ERROR;
  780. else{
  781. error|= old_error& ER_MB_ERROR;
  782. s->error_status_table[mb_xy]|= error;
  783. }
  784. }
  785. /* handle not partitioned case */
  786. if(!s->partitioned_frame){
  787. for(i=0; i<s->mb_num; i++){
  788. const int mb_xy= s->mb_index2xy[i];
  789. error= s->error_status_table[mb_xy];
  790. if(error&ER_MB_ERROR)
  791. error|= ER_MB_ERROR;
  792. s->error_status_table[mb_xy]= error;
  793. }
  794. }
  795. dc_error= ac_error= mv_error=0;
  796. for(i=0; i<s->mb_num; i++){
  797. const int mb_xy= s->mb_index2xy[i];
  798. error= s->error_status_table[mb_xy];
  799. if(error&ER_DC_ERROR) dc_error ++;
  800. if(error&ER_AC_ERROR) ac_error ++;
  801. if(error&ER_MV_ERROR) mv_error ++;
  802. }
  803. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
  804. is_intra_likely= is_intra_more_likely(s);
  805. /* set unknown mb-type to most likely */
  806. for(i=0; i<s->mb_num; i++){
  807. const int mb_xy= s->mb_index2xy[i];
  808. error= s->error_status_table[mb_xy];
  809. if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
  810. continue;
  811. if(is_intra_likely)
  812. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  813. else
  814. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  815. }
  816. // change inter to intra blocks if no reference frames are available
  817. if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
  818. for(i=0; i<s->mb_num; i++){
  819. const int mb_xy= s->mb_index2xy[i];
  820. if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  821. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  822. }
  823. /* handle inter blocks with damaged AC */
  824. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  825. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  826. const int mb_xy= mb_x + mb_y * s->mb_stride;
  827. const int mb_type= s->current_picture.f.mb_type[mb_xy];
  828. int dir = !s->last_picture.f.data[0];
  829. error= s->error_status_table[mb_xy];
  830. if(IS_INTRA(mb_type)) continue; //intra
  831. if(error&ER_MV_ERROR) continue; //inter with damaged MV
  832. if(!(error&ER_AC_ERROR)) continue; //undamaged inter
  833. s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  834. s->mb_intra=0;
  835. s->mb_skipped=0;
  836. if(IS_8X8(mb_type)){
  837. int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
  838. int j;
  839. s->mv_type = MV_TYPE_8X8;
  840. for(j=0; j<4; j++){
  841. s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  842. s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  843. }
  844. }else{
  845. s->mv_type = MV_TYPE_16X16;
  846. s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
  847. s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
  848. }
  849. s->dsp.clear_blocks(s->block[0]);
  850. s->mb_x= mb_x;
  851. s->mb_y= mb_y;
  852. decode_mb(s, 0/*FIXME h264 partitioned slices need this set*/);
  853. }
  854. }
  855. /* guess MVs */
  856. if(s->pict_type==AV_PICTURE_TYPE_B){
  857. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  858. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  859. int xy= mb_x*2 + mb_y*2*s->b8_stride;
  860. const int mb_xy= mb_x + mb_y * s->mb_stride;
  861. const int mb_type= s->current_picture.f.mb_type[mb_xy];
  862. error= s->error_status_table[mb_xy];
  863. if(IS_INTRA(mb_type)) continue;
  864. if(!(error&ER_MV_ERROR)) continue; //inter with undamaged MV
  865. if(!(error&ER_AC_ERROR)) continue; //undamaged inter
  866. s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
  867. if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
  868. if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
  869. s->mb_intra=0;
  870. s->mv_type = MV_TYPE_16X16;
  871. s->mb_skipped=0;
  872. if(s->pp_time){
  873. int time_pp= s->pp_time;
  874. int time_pb= s->pb_time;
  875. if (s->avctx->codec_id == CODEC_ID_H264) {
  876. //FIXME
  877. } else {
  878. ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
  879. mb_y, 0);
  880. }
  881. s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
  882. s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
  883. s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  884. s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  885. }else{
  886. s->mv[0][0][0]= 0;
  887. s->mv[0][0][1]= 0;
  888. s->mv[1][0][0]= 0;
  889. s->mv[1][0][1]= 0;
  890. }
  891. s->dsp.clear_blocks(s->block[0]);
  892. s->mb_x= mb_x;
  893. s->mb_y= mb_y;
  894. decode_mb(s, 0);
  895. }
  896. }
  897. }else
  898. guess_mv(s);
  899. /* the filters below are not XvMC compatible, skip them */
  900. if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  901. goto ec_clean;
  902. /* fill DC for inter blocks */
  903. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  904. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  905. int dc, dcu, dcv, y, n;
  906. int16_t *dc_ptr;
  907. uint8_t *dest_y, *dest_cb, *dest_cr;
  908. const int mb_xy= mb_x + mb_y * s->mb_stride;
  909. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  910. error= s->error_status_table[mb_xy];
  911. if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
  912. // if(error&ER_MV_ERROR) continue; //inter data damaged FIXME is this good?
  913. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  914. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  915. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  916. dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
  917. for(n=0; n<4; n++){
  918. dc=0;
  919. for(y=0; y<8; y++){
  920. int x;
  921. for(x=0; x<8; x++){
  922. dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
  923. }
  924. }
  925. dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
  926. }
  927. dcu=dcv=0;
  928. for(y=0; y<8; y++){
  929. int x;
  930. for(x=0; x<8; x++){
  931. dcu += dest_cb[x + y * s->uvlinesize];
  932. dcv += dest_cr[x + y * s->uvlinesize];
  933. }
  934. }
  935. s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
  936. s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
  937. }
  938. }
  939. /* guess DC for damaged blocks */
  940. guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
  941. guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
  942. guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
  943. /* filter luma DC */
  944. filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
  945. /* render DC only intra */
  946. for(mb_y=0; mb_y<s->mb_height; mb_y++){
  947. for(mb_x=0; mb_x<s->mb_width; mb_x++){
  948. uint8_t *dest_y, *dest_cb, *dest_cr;
  949. const int mb_xy= mb_x + mb_y * s->mb_stride;
  950. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  951. error= s->error_status_table[mb_xy];
  952. if(IS_INTER(mb_type)) continue;
  953. if(!(error&ER_AC_ERROR)) continue; //undamaged
  954. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  955. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  956. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  957. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  958. }
  959. }
  960. if(s->avctx->error_concealment&FF_EC_DEBLOCK){
  961. /* filter horizontal block boundaries */
  962. h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
  963. h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
  964. h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
  965. /* filter vertical block boundaries */
  966. v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
  967. v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
  968. v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
  969. }
  970. ec_clean:
  971. /* clean a few tables */
  972. for(i=0; i<s->mb_num; i++){
  973. const int mb_xy= s->mb_index2xy[i];
  974. int error= s->error_status_table[mb_xy];
  975. if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(ER_DC_ERROR|ER_MV_ERROR|ER_AC_ERROR))){
  976. s->mbskip_table[mb_xy]=0;
  977. }
  978. s->mbintra_table[mb_xy]=1;
  979. }
  980. }