You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1307 lines
50KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. /*
  34. * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
  35. * but error concealment must support both h264 and h263 thus we must undo this
  36. */
  37. #undef mb_intra
  38. static void decode_mb(MpegEncContext *s, int ref)
  39. {
  40. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  41. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  42. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  43. ff_init_block_index(s);
  44. ff_update_block_index(s);
  45. if (CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264) {
  46. H264Context *h = (void*)s;
  47. h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
  48. memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
  49. assert(ref >= 0);
  50. /* FIXME: It is possible albeit uncommon that slice references
  51. * differ between slices. We take the easy approach and ignore
  52. * it for now. If this turns out to have any relevance in
  53. * practice then correct remapping should be added. */
  54. if (ref >= h->ref_count[0])
  55. ref = 0;
  56. fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy],
  57. 2, 2, 2, ref, 1);
  58. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  59. fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
  60. pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4);
  61. assert(!FRAME_MBAFF);
  62. ff_h264_hl_decode_mb(h);
  63. } else {
  64. assert(ref == 0);
  65. MPV_decode_mb(s, s->block);
  66. }
  67. }
  68. /**
  69. * @param stride the number of MVs to get to the next row
  70. * @param mv_step the number of MVs per row or column in a macroblock
  71. */
  72. static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride)
  73. {
  74. if (s->codec_id == CODEC_ID_H264) {
  75. H264Context *h = (void*)s;
  76. assert(s->quarter_sample);
  77. *mv_step = 4;
  78. *stride = h->b_stride;
  79. } else {
  80. *mv_step = 2;
  81. *stride = s->b8_stride;
  82. }
  83. }
  84. /**
  85. * Replace the current MB with a flat dc-only version.
  86. */
  87. static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  88. uint8_t *dest_cr, int mb_x, int mb_y)
  89. {
  90. int dc, dcu, dcv, y, i;
  91. for (i = 0; i < 4; i++) {
  92. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  93. if (dc < 0)
  94. dc = 0;
  95. else if (dc > 2040)
  96. dc = 2040;
  97. for (y = 0; y < 8; y++) {
  98. int x;
  99. for (x = 0; x < 8; x++)
  100. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * s->linesize] = dc / 8;
  101. }
  102. }
  103. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  104. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  105. if (dcu < 0)
  106. dcu = 0;
  107. else if (dcu > 2040)
  108. dcu = 2040;
  109. if (dcv < 0)
  110. dcv = 0;
  111. else if (dcv > 2040)
  112. dcv = 2040;
  113. for (y = 0; y < 8; y++) {
  114. int x;
  115. for (x = 0; x < 8; x++) {
  116. dest_cb[x + y * s->uvlinesize] = dcu / 8;
  117. dest_cr[x + y * s->uvlinesize] = dcv / 8;
  118. }
  119. }
  120. }
  121. static void filter181(int16_t *data, int width, int height, int stride)
  122. {
  123. int x, y;
  124. /* horizontal filter */
  125. for (y = 1; y < height - 1; y++) {
  126. int prev_dc = data[0 + y * stride];
  127. for (x = 1; x < width - 1; x++) {
  128. int dc;
  129. dc = -prev_dc +
  130. data[x + y * stride] * 8 -
  131. data[x + 1 + y * stride];
  132. dc = (dc * 10923 + 32768) >> 16;
  133. prev_dc = data[x + y * stride];
  134. data[x + y * stride] = dc;
  135. }
  136. }
  137. /* vertical filter */
  138. for (x = 1; x < width - 1; x++) {
  139. int prev_dc = data[x];
  140. for (y = 1; y < height - 1; y++) {
  141. int dc;
  142. dc = -prev_dc +
  143. data[x + y * stride] * 8 -
  144. data[x + (y + 1) * stride];
  145. dc = (dc * 10923 + 32768) >> 16;
  146. prev_dc = data[x + y * stride];
  147. data[x + y * stride] = dc;
  148. }
  149. }
  150. }
  151. /**
  152. * guess the dc of blocks which do not have an undamaged dc
  153. * @param w width in 8 pixel blocks
  154. * @param h height in 8 pixel blocks
  155. */
  156. static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
  157. int h, int stride, int is_luma)
  158. {
  159. int b_x, b_y;
  160. int16_t (*col )[4] = av_malloc(stride*h*sizeof( int16_t)*4);
  161. uint16_t (*dist)[4] = av_malloc(stride*h*sizeof(uint16_t)*4);
  162. for(b_y=0; b_y<h; b_y++){
  163. int color= 1024;
  164. int distance= -1;
  165. for(b_x=0; b_x<w; b_x++){
  166. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  167. int error_j= s->error_status_table[mb_index_j];
  168. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  169. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  170. color= dc[b_x + b_y*stride];
  171. distance= b_x;
  172. }
  173. col [b_x + b_y*stride][1]= color;
  174. dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
  175. }
  176. color= 1024;
  177. distance= -1;
  178. for(b_x=w-1; b_x>=0; b_x--){
  179. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  180. int error_j= s->error_status_table[mb_index_j];
  181. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  182. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  183. color= dc[b_x + b_y*stride];
  184. distance= b_x;
  185. }
  186. col [b_x + b_y*stride][0]= color;
  187. dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
  188. }
  189. }
  190. for(b_x=0; b_x<w; b_x++){
  191. int color= 1024;
  192. int distance= -1;
  193. for(b_y=0; b_y<h; b_y++){
  194. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  195. int error_j= s->error_status_table[mb_index_j];
  196. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  197. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  198. color= dc[b_x + b_y*stride];
  199. distance= b_y;
  200. }
  201. col [b_x + b_y*stride][3]= color;
  202. dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
  203. }
  204. color= 1024;
  205. distance= -1;
  206. for(b_y=h-1; b_y>=0; b_y--){
  207. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  208. int error_j= s->error_status_table[mb_index_j];
  209. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  210. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  211. color= dc[b_x + b_y*stride];
  212. distance= b_y;
  213. }
  214. col [b_x + b_y*stride][2]= color;
  215. dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
  216. }
  217. }
  218. for (b_y = 0; b_y < h; b_y++) {
  219. for (b_x = 0; b_x < w; b_x++) {
  220. int mb_index, error, j;
  221. int64_t guess, weight_sum;
  222. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  223. error = s->error_status_table[mb_index];
  224. if (IS_INTER(s->current_picture.f.mb_type[mb_index]))
  225. continue; // inter
  226. if (!(error & ER_DC_ERROR))
  227. continue; // dc-ok
  228. weight_sum = 0;
  229. guess = 0;
  230. for (j = 0; j < 4; j++) {
  231. int64_t weight = 256 * 256 * 256 * 16 / dist[b_x + b_y*stride][j];
  232. guess += weight*(int64_t)col[b_x + b_y*stride][j];
  233. weight_sum += weight;
  234. }
  235. guess = (guess + weight_sum / 2) / weight_sum;
  236. dc[b_x + b_y * stride] = guess;
  237. }
  238. }
  239. av_freep(&col);
  240. av_freep(&dist);
  241. }
  242. /**
  243. * simple horizontal deblocking filter used for error resilience
  244. * @param w width in 8 pixel blocks
  245. * @param h height in 8 pixel blocks
  246. */
  247. static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
  248. int h, int stride, int is_luma)
  249. {
  250. int b_x, b_y, mvx_stride, mvy_stride;
  251. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  252. set_mv_strides(s, &mvx_stride, &mvy_stride);
  253. mvx_stride >>= is_luma;
  254. mvy_stride *= mvx_stride;
  255. for (b_y = 0; b_y < h; b_y++) {
  256. for (b_x = 0; b_x < w - 1; b_x++) {
  257. int y;
  258. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  259. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  260. int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  261. int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  262. int left_damage = left_status & ER_MB_ERROR;
  263. int right_damage = right_status & ER_MB_ERROR;
  264. int offset = b_x * 8 + b_y * stride * 8;
  265. int16_t *left_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  266. int16_t *right_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  267. if (!(left_damage || right_damage))
  268. continue; // both undamaged
  269. if ((!left_intra) && (!right_intra) &&
  270. FFABS(left_mv[0] - right_mv[0]) +
  271. FFABS(left_mv[1] + right_mv[1]) < 2)
  272. continue;
  273. for (y = 0; y < 8; y++) {
  274. int a, b, c, d;
  275. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  276. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  277. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  278. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  279. d = FFMAX(d, 0);
  280. if (b < 0)
  281. d = -d;
  282. if (d == 0)
  283. continue;
  284. if (!(left_damage && right_damage))
  285. d = d * 16 / 9;
  286. if (left_damage) {
  287. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  288. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  289. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  290. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  291. }
  292. if (right_damage) {
  293. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  294. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  295. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  296. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  297. }
  298. }
  299. }
  300. }
  301. }
  302. /**
  303. * simple vertical deblocking filter used for error resilience
  304. * @param w width in 8 pixel blocks
  305. * @param h height in 8 pixel blocks
  306. */
  307. static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
  308. int stride, int is_luma)
  309. {
  310. int b_x, b_y, mvx_stride, mvy_stride;
  311. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  312. set_mv_strides(s, &mvx_stride, &mvy_stride);
  313. mvx_stride >>= is_luma;
  314. mvy_stride *= mvx_stride;
  315. for (b_y = 0; b_y < h - 1; b_y++) {
  316. for (b_x = 0; b_x < w; b_x++) {
  317. int x;
  318. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  319. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  320. int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  321. int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  322. int top_damage = top_status & ER_MB_ERROR;
  323. int bottom_damage = bottom_status & ER_MB_ERROR;
  324. int offset = b_x * 8 + b_y * stride * 8;
  325. int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  326. int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  327. if (!(top_damage || bottom_damage))
  328. continue; // both undamaged
  329. if ((!top_intra) && (!bottom_intra) &&
  330. FFABS(top_mv[0] - bottom_mv[0]) +
  331. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  332. continue;
  333. for (x = 0; x < 8; x++) {
  334. int a, b, c, d;
  335. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  336. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  337. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  338. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  339. d = FFMAX(d, 0);
  340. if (b < 0)
  341. d = -d;
  342. if (d == 0)
  343. continue;
  344. if (!(top_damage && bottom_damage))
  345. d = d * 16 / 9;
  346. if (top_damage) {
  347. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  348. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  349. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  350. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  351. }
  352. if (bottom_damage) {
  353. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  354. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  355. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  356. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  357. }
  358. }
  359. }
  360. }
  361. }
  362. static void guess_mv(MpegEncContext *s)
  363. {
  364. uint8_t *fixed = av_malloc(s->mb_stride * s->mb_height);
  365. #define MV_FROZEN 3
  366. #define MV_CHANGED 2
  367. #define MV_UNCHANGED 1
  368. const int mb_stride = s->mb_stride;
  369. const int mb_width = s->mb_width;
  370. const int mb_height = s->mb_height;
  371. int i, depth, num_avail;
  372. int mb_x, mb_y, mot_step, mot_stride;
  373. set_mv_strides(s, &mot_step, &mot_stride);
  374. num_avail = 0;
  375. for (i = 0; i < s->mb_num; i++) {
  376. const int mb_xy = s->mb_index2xy[i];
  377. int f = 0;
  378. int error = s->error_status_table[mb_xy];
  379. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  380. f = MV_FROZEN; // intra // FIXME check
  381. if (!(error & ER_MV_ERROR))
  382. f = MV_FROZEN; // inter with undamaged MV
  383. fixed[mb_xy] = f;
  384. if (f == MV_FROZEN)
  385. num_avail++;
  386. else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
  387. const int mb_y= mb_xy / s->mb_stride;
  388. const int mb_x= mb_xy % s->mb_stride;
  389. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  390. s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
  391. s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
  392. s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
  393. }
  394. }
  395. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  396. num_avail <= mb_width / 2) {
  397. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  398. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  399. const int mb_xy = mb_x + mb_y * s->mb_stride;
  400. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  401. continue;
  402. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  403. continue;
  404. s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD
  405. : MV_DIR_BACKWARD;
  406. s->mb_intra = 0;
  407. s->mv_type = MV_TYPE_16X16;
  408. s->mb_skipped = 0;
  409. s->dsp.clear_blocks(s->block[0]);
  410. s->mb_x = mb_x;
  411. s->mb_y = mb_y;
  412. s->mv[0][0][0] = 0;
  413. s->mv[0][0][1] = 0;
  414. decode_mb(s, 0);
  415. }
  416. }
  417. goto end;
  418. }
  419. for (depth = 0; ; depth++) {
  420. int changed, pass, none_left;
  421. none_left = 1;
  422. changed = 1;
  423. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  424. int mb_x, mb_y;
  425. int score_sum = 0;
  426. changed = 0;
  427. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  428. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  429. const int mb_xy = mb_x + mb_y * s->mb_stride;
  430. int mv_predictor[8][2] = { { 0 } };
  431. int ref[8] = { 0 };
  432. int pred_count = 0;
  433. int j;
  434. int best_score = 256 * 256 * 256 * 64;
  435. int best_pred = 0;
  436. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  437. int prev_x, prev_y, prev_ref;
  438. if ((mb_x ^ mb_y ^ pass) & 1)
  439. continue;
  440. if (fixed[mb_xy] == MV_FROZEN)
  441. continue;
  442. assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
  443. assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
  444. j = 0;
  445. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  446. j = 1;
  447. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  448. j = 1;
  449. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  450. j = 1;
  451. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  452. j = 1;
  453. if (j == 0)
  454. continue;
  455. j = 0;
  456. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  457. j = 1;
  458. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  459. j = 1;
  460. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  461. j = 1;
  462. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  463. j = 1;
  464. if (j == 0 && pass > 1)
  465. continue;
  466. none_left = 0;
  467. if (mb_x > 0 && fixed[mb_xy - 1]) {
  468. mv_predictor[pred_count][0] =
  469. s->current_picture.f.motion_val[0][mot_index - mot_step][0];
  470. mv_predictor[pred_count][1] =
  471. s->current_picture.f.motion_val[0][mot_index - mot_step][1];
  472. ref[pred_count] =
  473. s->current_picture.f.ref_index[0][4 * (mb_xy - 1)];
  474. pred_count++;
  475. }
  476. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  477. mv_predictor[pred_count][0] =
  478. s->current_picture.f.motion_val[0][mot_index + mot_step][0];
  479. mv_predictor[pred_count][1] =
  480. s->current_picture.f.motion_val[0][mot_index + mot_step][1];
  481. ref[pred_count] =
  482. s->current_picture.f.ref_index[0][4 * (mb_xy + 1)];
  483. pred_count++;
  484. }
  485. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  486. mv_predictor[pred_count][0] =
  487. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][0];
  488. mv_predictor[pred_count][1] =
  489. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][1];
  490. ref[pred_count] =
  491. s->current_picture.f.ref_index[0][4 * (mb_xy - s->mb_stride)];
  492. pred_count++;
  493. }
  494. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  495. mv_predictor[pred_count][0] =
  496. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][0];
  497. mv_predictor[pred_count][1] =
  498. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][1];
  499. ref[pred_count] =
  500. s->current_picture.f.ref_index[0][4 * (mb_xy + s->mb_stride)];
  501. pred_count++;
  502. }
  503. if (pred_count == 0)
  504. continue;
  505. if (pred_count > 1) {
  506. int sum_x = 0, sum_y = 0, sum_r = 0;
  507. int max_x, max_y, min_x, min_y, max_r, min_r;
  508. for (j = 0; j < pred_count; j++) {
  509. sum_x += mv_predictor[j][0];
  510. sum_y += mv_predictor[j][1];
  511. sum_r += ref[j];
  512. if (j && ref[j] != ref[j - 1])
  513. goto skip_mean_and_median;
  514. }
  515. /* mean */
  516. mv_predictor[pred_count][0] = sum_x / j;
  517. mv_predictor[pred_count][1] = sum_y / j;
  518. ref[pred_count] = sum_r / j;
  519. /* median */
  520. if (pred_count >= 3) {
  521. min_y = min_x = min_r = 99999;
  522. max_y = max_x = max_r = -99999;
  523. } else {
  524. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  525. }
  526. for (j = 0; j < pred_count; j++) {
  527. max_x = FFMAX(max_x, mv_predictor[j][0]);
  528. max_y = FFMAX(max_y, mv_predictor[j][1]);
  529. max_r = FFMAX(max_r, ref[j]);
  530. min_x = FFMIN(min_x, mv_predictor[j][0]);
  531. min_y = FFMIN(min_y, mv_predictor[j][1]);
  532. min_r = FFMIN(min_r, ref[j]);
  533. }
  534. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  535. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  536. ref[pred_count + 1] = sum_r - max_r - min_r;
  537. if (pred_count == 4) {
  538. mv_predictor[pred_count + 1][0] /= 2;
  539. mv_predictor[pred_count + 1][1] /= 2;
  540. ref[pred_count + 1] /= 2;
  541. }
  542. pred_count += 2;
  543. }
  544. skip_mean_and_median:
  545. /* zero MV */
  546. pred_count++;
  547. if (!fixed[mb_xy] && 0) {
  548. if (s->avctx->codec_id == CODEC_ID_H264) {
  549. // FIXME
  550. } else {
  551. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  552. mb_y, 0);
  553. }
  554. if (!s->last_picture.f.motion_val[0] ||
  555. !s->last_picture.f.ref_index[0])
  556. goto skip_last_mv;
  557. prev_x = s->last_picture.f.motion_val[0][mot_index][0];
  558. prev_y = s->last_picture.f.motion_val[0][mot_index][1];
  559. prev_ref = s->last_picture.f.ref_index[0][4 * mb_xy];
  560. } else {
  561. prev_x = s->current_picture.f.motion_val[0][mot_index][0];
  562. prev_y = s->current_picture.f.motion_val[0][mot_index][1];
  563. prev_ref = s->current_picture.f.ref_index[0][4 * mb_xy];
  564. }
  565. /* last MV */
  566. mv_predictor[pred_count][0] = prev_x;
  567. mv_predictor[pred_count][1] = prev_y;
  568. ref[pred_count] = prev_ref;
  569. pred_count++;
  570. skip_last_mv:
  571. s->mv_dir = MV_DIR_FORWARD;
  572. s->mb_intra = 0;
  573. s->mv_type = MV_TYPE_16X16;
  574. s->mb_skipped = 0;
  575. s->dsp.clear_blocks(s->block[0]);
  576. s->mb_x = mb_x;
  577. s->mb_y = mb_y;
  578. for (j = 0; j < pred_count; j++) {
  579. int score = 0;
  580. uint8_t *src = s->current_picture.f.data[0] +
  581. mb_x * 16 + mb_y * 16 * s->linesize;
  582. s->current_picture.f.motion_val[0][mot_index][0] =
  583. s->mv[0][0][0] = mv_predictor[j][0];
  584. s->current_picture.f.motion_val[0][mot_index][1] =
  585. s->mv[0][0][1] = mv_predictor[j][1];
  586. // predictor intra or otherwise not available
  587. if (ref[j] < 0)
  588. continue;
  589. decode_mb(s, ref[j]);
  590. if (mb_x > 0 && fixed[mb_xy - 1]) {
  591. int k;
  592. for (k = 0; k < 16; k++)
  593. score += FFABS(src[k * s->linesize - 1] -
  594. src[k * s->linesize]);
  595. }
  596. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  597. int k;
  598. for (k = 0; k < 16; k++)
  599. score += FFABS(src[k * s->linesize + 15] -
  600. src[k * s->linesize + 16]);
  601. }
  602. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  603. int k;
  604. for (k = 0; k < 16; k++)
  605. score += FFABS(src[k - s->linesize] - src[k]);
  606. }
  607. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  608. int k;
  609. for (k = 0; k < 16; k++)
  610. score += FFABS(src[k + s->linesize * 15] -
  611. src[k + s->linesize * 16]);
  612. }
  613. if (score <= best_score) { // <= will favor the last MV
  614. best_score = score;
  615. best_pred = j;
  616. }
  617. }
  618. score_sum += best_score;
  619. s->mv[0][0][0] = mv_predictor[best_pred][0];
  620. s->mv[0][0][1] = mv_predictor[best_pred][1];
  621. for (i = 0; i < mot_step; i++)
  622. for (j = 0; j < mot_step; j++) {
  623. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  624. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  625. }
  626. decode_mb(s, ref[best_pred]);
  627. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  628. fixed[mb_xy] = MV_CHANGED;
  629. changed++;
  630. } else
  631. fixed[mb_xy] = MV_UNCHANGED;
  632. }
  633. }
  634. // printf(".%d/%d", changed, score_sum); fflush(stdout);
  635. }
  636. if (none_left)
  637. goto end;
  638. for (i = 0; i < s->mb_num; i++) {
  639. int mb_xy = s->mb_index2xy[i];
  640. if (fixed[mb_xy])
  641. fixed[mb_xy] = MV_FROZEN;
  642. }
  643. // printf(":"); fflush(stdout);
  644. }
  645. end:
  646. av_free(fixed);
  647. }
  648. static int is_intra_more_likely(MpegEncContext *s)
  649. {
  650. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  651. if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0])
  652. return 1; // no previous frame available -> use spatial prediction
  653. undamaged_count = 0;
  654. for (i = 0; i < s->mb_num; i++) {
  655. const int mb_xy = s->mb_index2xy[i];
  656. const int error = s->error_status_table[mb_xy];
  657. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  658. undamaged_count++;
  659. }
  660. if (s->codec_id == CODEC_ID_H264) {
  661. H264Context *h = (void*) s;
  662. if (h->list_count <= 0 || h->ref_count[0] <= 0 ||
  663. !h->ref_list[0][0].f.data[0])
  664. return 1;
  665. }
  666. if (undamaged_count < 5)
  667. return 0; // almost all MBs damaged -> use temporal prediction
  668. // prevent dsp.sad() check, that requires access to the image
  669. if (CONFIG_MPEG_XVMC_DECODER &&
  670. s->avctx->xvmc_acceleration &&
  671. s->pict_type == AV_PICTURE_TYPE_I)
  672. return 1;
  673. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  674. is_intra_likely = 0;
  675. j = 0;
  676. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  677. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  678. int error;
  679. const int mb_xy = mb_x + mb_y * s->mb_stride;
  680. error = s->error_status_table[mb_xy];
  681. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  682. continue; // skip damaged
  683. j++;
  684. // skip a few to speed things up
  685. if ((j % skip_amount) != 0)
  686. continue;
  687. if (s->pict_type == AV_PICTURE_TYPE_I) {
  688. uint8_t *mb_ptr = s->current_picture.f.data[0] +
  689. mb_x * 16 + mb_y * 16 * s->linesize;
  690. uint8_t *last_mb_ptr = s->last_picture.f.data[0] +
  691. mb_x * 16 + mb_y * 16 * s->linesize;
  692. if (s->avctx->codec_id == CODEC_ID_H264) {
  693. // FIXME
  694. } else {
  695. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  696. mb_y, 0);
  697. }
  698. is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
  699. // FIXME need await_progress() here
  700. is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
  701. } else {
  702. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  703. is_intra_likely++;
  704. else
  705. is_intra_likely--;
  706. }
  707. }
  708. }
  709. // printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  710. return is_intra_likely > 0;
  711. }
  712. void ff_er_frame_start(MpegEncContext *s)
  713. {
  714. if (!s->err_recognition)
  715. return;
  716. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  717. s->mb_stride * s->mb_height * sizeof(uint8_t));
  718. s->error_count = 3 * s->mb_num;
  719. s->error_occurred = 0;
  720. }
  721. /**
  722. * Add a slice.
  723. * @param endx x component of the last macroblock, can be -1
  724. * for the last of the previous line
  725. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  726. * assumed that no earlier end or error of the same type occurred
  727. */
  728. void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
  729. int endx, int endy, int status)
  730. {
  731. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  732. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  733. const int start_xy = s->mb_index2xy[start_i];
  734. const int end_xy = s->mb_index2xy[end_i];
  735. int mask = -1;
  736. if (s->avctx->hwaccel)
  737. return;
  738. if (start_i > end_i || start_xy > end_xy) {
  739. av_log(s->avctx, AV_LOG_ERROR,
  740. "internal error, slice end before start\n");
  741. return;
  742. }
  743. if (!s->err_recognition)
  744. return;
  745. mask &= ~VP_START;
  746. if (status & (ER_AC_ERROR | ER_AC_END)) {
  747. mask &= ~(ER_AC_ERROR | ER_AC_END);
  748. s->error_count -= end_i - start_i + 1;
  749. }
  750. if (status & (ER_DC_ERROR | ER_DC_END)) {
  751. mask &= ~(ER_DC_ERROR | ER_DC_END);
  752. s->error_count -= end_i - start_i + 1;
  753. }
  754. if (status & (ER_MV_ERROR | ER_MV_END)) {
  755. mask &= ~(ER_MV_ERROR | ER_MV_END);
  756. s->error_count -= end_i - start_i + 1;
  757. }
  758. if (status & ER_MB_ERROR) {
  759. s->error_occurred = 1;
  760. s->error_count = INT_MAX;
  761. }
  762. if (mask == ~0x7F) {
  763. memset(&s->error_status_table[start_xy], 0,
  764. (end_xy - start_xy) * sizeof(uint8_t));
  765. } else {
  766. int i;
  767. for (i = start_xy; i < end_xy; i++)
  768. s->error_status_table[i] &= mask;
  769. }
  770. if (end_i == s->mb_num)
  771. s->error_count = INT_MAX;
  772. else {
  773. s->error_status_table[end_xy] &= mask;
  774. s->error_status_table[end_xy] |= status;
  775. }
  776. s->error_status_table[start_xy] |= VP_START;
  777. if (start_xy > 0 && s->avctx->thread_count <= 1 &&
  778. s->avctx->skip_top * s->mb_width < start_i) {
  779. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  780. prev_status &= ~ VP_START;
  781. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  782. s->error_count = INT_MAX;
  783. }
  784. }
  785. void ff_er_frame_end(MpegEncContext *s)
  786. {
  787. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  788. int distance;
  789. int threshold_part[4] = { 100, 100, 100 };
  790. int threshold = 50;
  791. int is_intra_likely;
  792. int size = s->b8_stride * 2 * s->mb_height;
  793. Picture *pic = s->current_picture_ptr;
  794. /* We do not support ER of field pictures yet,
  795. * though it should not crash if enabled. */
  796. if (!s->err_recognition || s->error_count == 0 || s->avctx->lowres ||
  797. s->avctx->hwaccel ||
  798. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  799. s->picture_structure != PICT_FRAME ||
  800. s->error_count == 3 * s->mb_width *
  801. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  802. return;
  803. };
  804. if (s->current_picture.f.motion_val[0] == NULL) {
  805. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  806. for (i = 0; i < 2; i++) {
  807. pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  808. pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
  809. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  810. }
  811. pic->f.motion_subsample_log2 = 3;
  812. s->current_picture = *s->current_picture_ptr;
  813. }
  814. if (s->avctx->debug & FF_DEBUG_ER) {
  815. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  816. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  817. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  818. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  819. }
  820. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  821. }
  822. }
  823. #if 1
  824. /* handle overlapping slices */
  825. for (error_type = 1; error_type <= 3; error_type++) {
  826. int end_ok = 0;
  827. for (i = s->mb_num - 1; i >= 0; i--) {
  828. const int mb_xy = s->mb_index2xy[i];
  829. int error = s->error_status_table[mb_xy];
  830. if (error & (1 << error_type))
  831. end_ok = 1;
  832. if (error & (8 << error_type))
  833. end_ok = 1;
  834. if (!end_ok)
  835. s->error_status_table[mb_xy] |= 1 << error_type;
  836. if (error & VP_START)
  837. end_ok = 0;
  838. }
  839. }
  840. #endif
  841. #if 1
  842. /* handle slices with partitions of different length */
  843. if (s->partitioned_frame) {
  844. int end_ok = 0;
  845. for (i = s->mb_num - 1; i >= 0; i--) {
  846. const int mb_xy = s->mb_index2xy[i];
  847. int error = s->error_status_table[mb_xy];
  848. if (error & ER_AC_END)
  849. end_ok = 0;
  850. if ((error & ER_MV_END) ||
  851. (error & ER_DC_END) ||
  852. (error & ER_AC_ERROR))
  853. end_ok = 1;
  854. if (!end_ok)
  855. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  856. if (error & VP_START)
  857. end_ok = 0;
  858. }
  859. }
  860. #endif
  861. /* handle missing slices */
  862. if (s->err_recognition & AV_EF_EXPLODE) {
  863. int end_ok = 1;
  864. // FIXME + 100 hack
  865. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  866. const int mb_xy = s->mb_index2xy[i];
  867. int error1 = s->error_status_table[mb_xy];
  868. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  869. if (error1 & VP_START)
  870. end_ok = 1;
  871. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  872. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  873. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  874. (error1 & ER_MV_END))) {
  875. // end & uninit
  876. end_ok = 0;
  877. }
  878. if (!end_ok)
  879. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  880. }
  881. }
  882. #if 1
  883. /* backward mark errors */
  884. distance = 9999999;
  885. for (error_type = 1; error_type <= 3; error_type++) {
  886. for (i = s->mb_num - 1; i >= 0; i--) {
  887. const int mb_xy = s->mb_index2xy[i];
  888. int error = s->error_status_table[mb_xy];
  889. if (!s->mbskip_table[mb_xy]) // FIXME partition specific
  890. distance++;
  891. if (error & (1 << error_type))
  892. distance = 0;
  893. if (s->partitioned_frame) {
  894. if (distance < threshold_part[error_type - 1])
  895. s->error_status_table[mb_xy] |= 1 << error_type;
  896. } else {
  897. if (distance < threshold)
  898. s->error_status_table[mb_xy] |= 1 << error_type;
  899. }
  900. if (error & VP_START)
  901. distance = 9999999;
  902. }
  903. }
  904. #endif
  905. /* forward mark errors */
  906. error = 0;
  907. for (i = 0; i < s->mb_num; i++) {
  908. const int mb_xy = s->mb_index2xy[i];
  909. int old_error = s->error_status_table[mb_xy];
  910. if (old_error & VP_START) {
  911. error = old_error & ER_MB_ERROR;
  912. } else {
  913. error |= old_error & ER_MB_ERROR;
  914. s->error_status_table[mb_xy] |= error;
  915. }
  916. }
  917. #if 1
  918. /* handle not partitioned case */
  919. if (!s->partitioned_frame) {
  920. for (i = 0; i < s->mb_num; i++) {
  921. const int mb_xy = s->mb_index2xy[i];
  922. error = s->error_status_table[mb_xy];
  923. if (error & ER_MB_ERROR)
  924. error |= ER_MB_ERROR;
  925. s->error_status_table[mb_xy] = error;
  926. }
  927. }
  928. #endif
  929. dc_error = ac_error = mv_error = 0;
  930. for (i = 0; i < s->mb_num; i++) {
  931. const int mb_xy = s->mb_index2xy[i];
  932. error = s->error_status_table[mb_xy];
  933. if (error & ER_DC_ERROR)
  934. dc_error++;
  935. if (error & ER_AC_ERROR)
  936. ac_error++;
  937. if (error & ER_MV_ERROR)
  938. mv_error++;
  939. }
  940. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n",
  941. dc_error, ac_error, mv_error);
  942. is_intra_likely = is_intra_more_likely(s);
  943. /* set unknown mb-type to most likely */
  944. for (i = 0; i < s->mb_num; i++) {
  945. const int mb_xy = s->mb_index2xy[i];
  946. error = s->error_status_table[mb_xy];
  947. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  948. continue;
  949. if (is_intra_likely)
  950. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  951. else
  952. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  953. }
  954. // change inter to intra blocks if no reference frames are available
  955. if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
  956. for (i = 0; i < s->mb_num; i++) {
  957. const int mb_xy = s->mb_index2xy[i];
  958. if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  959. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  960. }
  961. /* handle inter blocks with damaged AC */
  962. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  963. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  964. const int mb_xy = mb_x + mb_y * s->mb_stride;
  965. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  966. int dir = !s->last_picture.f.data[0];
  967. error = s->error_status_table[mb_xy];
  968. if (IS_INTRA(mb_type))
  969. continue; // intra
  970. if (error & ER_MV_ERROR)
  971. continue; // inter with damaged MV
  972. if (!(error & ER_AC_ERROR))
  973. continue; // undamaged inter
  974. s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  975. s->mb_intra = 0;
  976. s->mb_skipped = 0;
  977. if (IS_8X8(mb_type)) {
  978. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  979. int j;
  980. s->mv_type = MV_TYPE_8X8;
  981. for (j = 0; j < 4; j++) {
  982. s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  983. s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  984. }
  985. } else {
  986. s->mv_type = MV_TYPE_16X16;
  987. s->mv[0][0][0] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  988. s->mv[0][0][1] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  989. }
  990. s->dsp.clear_blocks(s->block[0]);
  991. s->mb_x = mb_x;
  992. s->mb_y = mb_y;
  993. decode_mb(s, 0 /* FIXME h264 partitioned slices need this set */);
  994. }
  995. }
  996. /* guess MVs */
  997. if (s->pict_type == AV_PICTURE_TYPE_B) {
  998. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  999. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1000. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  1001. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1002. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1003. error = s->error_status_table[mb_xy];
  1004. if (IS_INTRA(mb_type))
  1005. continue;
  1006. if (!(error & ER_MV_ERROR))
  1007. continue; // inter with undamaged MV
  1008. if (!(error & ER_AC_ERROR))
  1009. continue; // undamaged inter
  1010. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  1011. if (!s->last_picture.f.data[0])
  1012. s->mv_dir &= ~MV_DIR_FORWARD;
  1013. if (!s->next_picture.f.data[0])
  1014. s->mv_dir &= ~MV_DIR_BACKWARD;
  1015. s->mb_intra = 0;
  1016. s->mv_type = MV_TYPE_16X16;
  1017. s->mb_skipped = 0;
  1018. if (s->pp_time) {
  1019. int time_pp = s->pp_time;
  1020. int time_pb = s->pb_time;
  1021. if (s->avctx->codec_id == CODEC_ID_H264) {
  1022. // FIXME
  1023. } else {
  1024. ff_thread_await_progress((AVFrame *) s->next_picture_ptr, mb_y, 0);
  1025. }
  1026. s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
  1027. s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
  1028. s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  1029. s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  1030. } else {
  1031. s->mv[0][0][0] = 0;
  1032. s->mv[0][0][1] = 0;
  1033. s->mv[1][0][0] = 0;
  1034. s->mv[1][0][1] = 0;
  1035. }
  1036. s->dsp.clear_blocks(s->block[0]);
  1037. s->mb_x = mb_x;
  1038. s->mb_y = mb_y;
  1039. decode_mb(s, 0);
  1040. }
  1041. }
  1042. } else
  1043. guess_mv(s);
  1044. /* the filters below are not XvMC compatible, skip them */
  1045. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1046. goto ec_clean;
  1047. /* fill DC for inter blocks */
  1048. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1049. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1050. int dc, dcu, dcv, y, n;
  1051. int16_t *dc_ptr;
  1052. uint8_t *dest_y, *dest_cb, *dest_cr;
  1053. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1054. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1055. error = s->error_status_table[mb_xy];
  1056. if (IS_INTRA(mb_type) && s->partitioned_frame)
  1057. continue;
  1058. // if (error & ER_MV_ERROR)
  1059. // continue; // inter data damaged FIXME is this good?
  1060. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1061. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1062. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1063. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  1064. for (n = 0; n < 4; n++) {
  1065. dc = 0;
  1066. for (y = 0; y < 8; y++) {
  1067. int x;
  1068. for (x = 0; x < 8; x++)
  1069. dc += dest_y[x + (n & 1) * 8 +
  1070. (y + (n >> 1) * 8) * s->linesize];
  1071. }
  1072. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  1073. }
  1074. dcu = dcv = 0;
  1075. for (y = 0; y < 8; y++) {
  1076. int x;
  1077. for (x = 0; x < 8; x++) {
  1078. dcu += dest_cb[x + y * s->uvlinesize];
  1079. dcv += dest_cr[x + y * s->uvlinesize];
  1080. }
  1081. }
  1082. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  1083. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  1084. }
  1085. }
  1086. #if 1
  1087. /* guess DC for damaged blocks */
  1088. guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
  1089. guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
  1090. guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
  1091. #endif
  1092. /* filter luma DC */
  1093. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  1094. #if 1
  1095. /* render DC only intra */
  1096. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1097. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1098. uint8_t *dest_y, *dest_cb, *dest_cr;
  1099. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1100. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1101. error = s->error_status_table[mb_xy];
  1102. if (IS_INTER(mb_type))
  1103. continue;
  1104. if (!(error & ER_AC_ERROR))
  1105. continue; // undamaged
  1106. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1107. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1108. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1109. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1110. }
  1111. }
  1112. #endif
  1113. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1114. /* filter horizontal block boundaries */
  1115. h_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1116. s->mb_height * 2, s->linesize, 1);
  1117. h_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1118. s->mb_height , s->uvlinesize, 0);
  1119. h_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1120. s->mb_height , s->uvlinesize, 0);
  1121. /* filter vertical block boundaries */
  1122. v_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1123. s->mb_height * 2, s->linesize, 1);
  1124. v_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1125. s->mb_height , s->uvlinesize, 0);
  1126. v_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1127. s->mb_height , s->uvlinesize, 0);
  1128. }
  1129. ec_clean:
  1130. /* clean a few tables */
  1131. for (i = 0; i < s->mb_num; i++) {
  1132. const int mb_xy = s->mb_index2xy[i];
  1133. int error = s->error_status_table[mb_xy];
  1134. if (s->pict_type != AV_PICTURE_TYPE_B &&
  1135. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1136. s->mbskip_table[mb_xy] = 0;
  1137. }
  1138. s->mbintra_table[mb_xy] = 1;
  1139. }
  1140. }