You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1204 lines
45KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "libavutil/internal.h"
  28. #include "avcodec.h"
  29. #include "error_resilience.h"
  30. #include "me_cmp.h"
  31. #include "mpegutils.h"
  32. #include "mpegvideo.h"
  33. #include "rectangle.h"
  34. #include "thread.h"
  35. /**
  36. * @param stride the number of MVs to get to the next row
  37. * @param mv_step the number of MVs per row or column in a macroblock
  38. */
  39. static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride)
  40. {
  41. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  42. assert(s->quarter_sample);
  43. *mv_step = 4;
  44. *stride = s->mb_width * 4;
  45. } else {
  46. *mv_step = 2;
  47. *stride = s->b8_stride;
  48. }
  49. }
  50. /**
  51. * Replace the current MB with a flat dc-only version.
  52. */
  53. static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  54. uint8_t *dest_cr, int mb_x, int mb_y)
  55. {
  56. int *linesize = s->cur_pic.f->linesize;
  57. int dc, dcu, dcv, y, i;
  58. for (i = 0; i < 4; i++) {
  59. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  60. if (dc < 0)
  61. dc = 0;
  62. else if (dc > 2040)
  63. dc = 2040;
  64. for (y = 0; y < 8; y++) {
  65. int x;
  66. for (x = 0; x < 8; x++)
  67. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
  68. }
  69. }
  70. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  71. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  72. if (dcu < 0)
  73. dcu = 0;
  74. else if (dcu > 2040)
  75. dcu = 2040;
  76. if (dcv < 0)
  77. dcv = 0;
  78. else if (dcv > 2040)
  79. dcv = 2040;
  80. for (y = 0; y < 8; y++) {
  81. int x;
  82. for (x = 0; x < 8; x++) {
  83. dest_cb[x + y * linesize[1]] = dcu / 8;
  84. dest_cr[x + y * linesize[2]] = dcv / 8;
  85. }
  86. }
  87. }
  88. static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
  89. {
  90. int x, y;
  91. /* horizontal filter */
  92. for (y = 1; y < height - 1; y++) {
  93. int prev_dc = data[0 + y * stride];
  94. for (x = 1; x < width - 1; x++) {
  95. int dc;
  96. dc = -prev_dc +
  97. data[x + y * stride] * 8 -
  98. data[x + 1 + y * stride];
  99. dc = (dc * 10923 + 32768) >> 16;
  100. prev_dc = data[x + y * stride];
  101. data[x + y * stride] = dc;
  102. }
  103. }
  104. /* vertical filter */
  105. for (x = 1; x < width - 1; x++) {
  106. int prev_dc = data[x];
  107. for (y = 1; y < height - 1; y++) {
  108. int dc;
  109. dc = -prev_dc +
  110. data[x + y * stride] * 8 -
  111. data[x + (y + 1) * stride];
  112. dc = (dc * 10923 + 32768) >> 16;
  113. prev_dc = data[x + y * stride];
  114. data[x + y * stride] = dc;
  115. }
  116. }
  117. }
  118. /**
  119. * guess the dc of blocks which do not have an undamaged dc
  120. * @param w width in 8 pixel blocks
  121. * @param h height in 8 pixel blocks
  122. */
  123. static void guess_dc(ERContext *s, int16_t *dc, int w,
  124. int h, ptrdiff_t stride, int is_luma)
  125. {
  126. int b_x, b_y;
  127. for (b_y = 0; b_y < h; b_y++) {
  128. for (b_x = 0; b_x < w; b_x++) {
  129. int color[4] = { 1024, 1024, 1024, 1024 };
  130. int distance[4] = { 9999, 9999, 9999, 9999 };
  131. int mb_index, error, j;
  132. int64_t guess, weight_sum;
  133. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  134. error = s->error_status_table[mb_index];
  135. if (IS_INTER(s->cur_pic.mb_type[mb_index]))
  136. continue; // inter
  137. if (!(error & ER_DC_ERROR))
  138. continue; // dc-ok
  139. /* right block */
  140. for (j = b_x + 1; j < w; j++) {
  141. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  142. int error_j = s->error_status_table[mb_index_j];
  143. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  144. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  145. color[0] = dc[j + b_y * stride];
  146. distance[0] = j - b_x;
  147. break;
  148. }
  149. }
  150. /* left block */
  151. for (j = b_x - 1; j >= 0; j--) {
  152. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  153. int error_j = s->error_status_table[mb_index_j];
  154. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  155. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  156. color[1] = dc[j + b_y * stride];
  157. distance[1] = b_x - j;
  158. break;
  159. }
  160. }
  161. /* bottom block */
  162. for (j = b_y + 1; j < h; j++) {
  163. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  164. int error_j = s->error_status_table[mb_index_j];
  165. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  166. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  167. color[2] = dc[b_x + j * stride];
  168. distance[2] = j - b_y;
  169. break;
  170. }
  171. }
  172. /* top block */
  173. for (j = b_y - 1; j >= 0; j--) {
  174. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  175. int error_j = s->error_status_table[mb_index_j];
  176. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  177. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  178. color[3] = dc[b_x + j * stride];
  179. distance[3] = b_y - j;
  180. break;
  181. }
  182. }
  183. weight_sum = 0;
  184. guess = 0;
  185. for (j = 0; j < 4; j++) {
  186. int64_t weight = 256 * 256 * 256 * 16 / distance[j];
  187. guess += weight * (int64_t) color[j];
  188. weight_sum += weight;
  189. }
  190. guess = (guess + weight_sum / 2) / weight_sum;
  191. dc[b_x + b_y * stride] = guess;
  192. }
  193. }
  194. }
  195. /**
  196. * simple horizontal deblocking filter used for error resilience
  197. * @param w width in 8 pixel blocks
  198. * @param h height in 8 pixel blocks
  199. */
  200. static void h_block_filter(ERContext *s, uint8_t *dst, int w,
  201. int h, ptrdiff_t stride, int is_luma)
  202. {
  203. int b_x, b_y;
  204. ptrdiff_t mvx_stride, mvy_stride;
  205. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  206. set_mv_strides(s, &mvx_stride, &mvy_stride);
  207. mvx_stride >>= is_luma;
  208. mvy_stride *= mvx_stride;
  209. for (b_y = 0; b_y < h; b_y++) {
  210. for (b_x = 0; b_x < w - 1; b_x++) {
  211. int y;
  212. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  213. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  214. int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  215. int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  216. int left_damage = left_status & ER_MB_ERROR;
  217. int right_damage = right_status & ER_MB_ERROR;
  218. int offset = b_x * 8 + b_y * stride * 8;
  219. int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  220. int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  221. if (!(left_damage || right_damage))
  222. continue; // both undamaged
  223. if ((!left_intra) && (!right_intra) &&
  224. FFABS(left_mv[0] - right_mv[0]) +
  225. FFABS(left_mv[1] + right_mv[1]) < 2)
  226. continue;
  227. for (y = 0; y < 8; y++) {
  228. int a, b, c, d;
  229. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  230. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  231. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  232. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  233. d = FFMAX(d, 0);
  234. if (b < 0)
  235. d = -d;
  236. if (d == 0)
  237. continue;
  238. if (!(left_damage && right_damage))
  239. d = d * 16 / 9;
  240. if (left_damage) {
  241. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  242. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  243. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  244. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  245. }
  246. if (right_damage) {
  247. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  248. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  249. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  250. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  251. }
  252. }
  253. }
  254. }
  255. }
  256. /**
  257. * simple vertical deblocking filter used for error resilience
  258. * @param w width in 8 pixel blocks
  259. * @param h height in 8 pixel blocks
  260. */
  261. static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
  262. ptrdiff_t stride, int is_luma)
  263. {
  264. int b_x, b_y;
  265. ptrdiff_t mvx_stride, mvy_stride;
  266. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  267. set_mv_strides(s, &mvx_stride, &mvy_stride);
  268. mvx_stride >>= is_luma;
  269. mvy_stride *= mvx_stride;
  270. for (b_y = 0; b_y < h - 1; b_y++) {
  271. for (b_x = 0; b_x < w; b_x++) {
  272. int x;
  273. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  274. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  275. int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  276. int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  277. int top_damage = top_status & ER_MB_ERROR;
  278. int bottom_damage = bottom_status & ER_MB_ERROR;
  279. int offset = b_x * 8 + b_y * stride * 8;
  280. int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  281. int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  282. if (!(top_damage || bottom_damage))
  283. continue; // both undamaged
  284. if ((!top_intra) && (!bottom_intra) &&
  285. FFABS(top_mv[0] - bottom_mv[0]) +
  286. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  287. continue;
  288. for (x = 0; x < 8; x++) {
  289. int a, b, c, d;
  290. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  291. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  292. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  293. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  294. d = FFMAX(d, 0);
  295. if (b < 0)
  296. d = -d;
  297. if (d == 0)
  298. continue;
  299. if (!(top_damage && bottom_damage))
  300. d = d * 16 / 9;
  301. if (top_damage) {
  302. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  303. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  304. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  305. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  306. }
  307. if (bottom_damage) {
  308. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  309. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  310. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  311. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  312. }
  313. }
  314. }
  315. }
  316. }
  317. static void guess_mv(ERContext *s)
  318. {
  319. uint8_t *fixed = s->er_temp_buffer;
  320. #define MV_FROZEN 3
  321. #define MV_CHANGED 2
  322. #define MV_UNCHANGED 1
  323. const ptrdiff_t mb_stride = s->mb_stride;
  324. const int mb_width = s->mb_width;
  325. const int mb_height = s->mb_height;
  326. int i, depth, num_avail;
  327. int mb_x, mb_y;
  328. ptrdiff_t mot_step, mot_stride;
  329. set_mv_strides(s, &mot_step, &mot_stride);
  330. num_avail = 0;
  331. for (i = 0; i < s->mb_num; i++) {
  332. const int mb_xy = s->mb_index2xy[i];
  333. int f = 0;
  334. int error = s->error_status_table[mb_xy];
  335. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  336. f = MV_FROZEN; // intra // FIXME check
  337. if (!(error & ER_MV_ERROR))
  338. f = MV_FROZEN; // inter with undamaged MV
  339. fixed[mb_xy] = f;
  340. if (f == MV_FROZEN)
  341. num_avail++;
  342. }
  343. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  344. num_avail <= mb_width / 2) {
  345. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  346. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  347. const int mb_xy = mb_x + mb_y * s->mb_stride;
  348. int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
  349. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  350. continue;
  351. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  352. continue;
  353. s->mv[0][0][0] = 0;
  354. s->mv[0][0][1] = 0;
  355. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  356. mb_x, mb_y, 0, 0);
  357. }
  358. }
  359. return;
  360. }
  361. for (depth = 0; ; depth++) {
  362. int changed, pass, none_left;
  363. none_left = 1;
  364. changed = 1;
  365. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  366. int mb_x, mb_y;
  367. int score_sum = 0;
  368. changed = 0;
  369. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  370. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  371. const int mb_xy = mb_x + mb_y * s->mb_stride;
  372. int mv_predictor[8][2] = { { 0 } };
  373. int ref[8] = { 0 };
  374. int pred_count = 0;
  375. int j;
  376. int best_score = 256 * 256 * 256 * 64;
  377. int best_pred = 0;
  378. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  379. int prev_x = 0, prev_y = 0, prev_ref = 0;
  380. if ((mb_x ^ mb_y ^ pass) & 1)
  381. continue;
  382. if (fixed[mb_xy] == MV_FROZEN)
  383. continue;
  384. j = 0;
  385. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  386. j = 1;
  387. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  388. j = 1;
  389. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  390. j = 1;
  391. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  392. j = 1;
  393. if (j == 0)
  394. continue;
  395. j = 0;
  396. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  397. j = 1;
  398. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  399. j = 1;
  400. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  401. j = 1;
  402. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  403. j = 1;
  404. if (j == 0 && pass > 1)
  405. continue;
  406. none_left = 0;
  407. if (mb_x > 0 && fixed[mb_xy - 1]) {
  408. mv_predictor[pred_count][0] =
  409. s->cur_pic.motion_val[0][mot_index - mot_step][0];
  410. mv_predictor[pred_count][1] =
  411. s->cur_pic.motion_val[0][mot_index - mot_step][1];
  412. ref[pred_count] =
  413. s->cur_pic.ref_index[0][4 * (mb_xy - 1)];
  414. pred_count++;
  415. }
  416. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  417. mv_predictor[pred_count][0] =
  418. s->cur_pic.motion_val[0][mot_index + mot_step][0];
  419. mv_predictor[pred_count][1] =
  420. s->cur_pic.motion_val[0][mot_index + mot_step][1];
  421. ref[pred_count] =
  422. s->cur_pic.ref_index[0][4 * (mb_xy + 1)];
  423. pred_count++;
  424. }
  425. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  426. mv_predictor[pred_count][0] =
  427. s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0];
  428. mv_predictor[pred_count][1] =
  429. s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1];
  430. ref[pred_count] =
  431. s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)];
  432. pred_count++;
  433. }
  434. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  435. mv_predictor[pred_count][0] =
  436. s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0];
  437. mv_predictor[pred_count][1] =
  438. s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1];
  439. ref[pred_count] =
  440. s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)];
  441. pred_count++;
  442. }
  443. if (pred_count == 0)
  444. continue;
  445. if (pred_count > 1) {
  446. int sum_x = 0, sum_y = 0, sum_r = 0;
  447. int max_x, max_y, min_x, min_y, max_r, min_r;
  448. for (j = 0; j < pred_count; j++) {
  449. sum_x += mv_predictor[j][0];
  450. sum_y += mv_predictor[j][1];
  451. sum_r += ref[j];
  452. if (j && ref[j] != ref[j - 1])
  453. goto skip_mean_and_median;
  454. }
  455. /* mean */
  456. mv_predictor[pred_count][0] = sum_x / j;
  457. mv_predictor[pred_count][1] = sum_y / j;
  458. ref[pred_count] = sum_r / j;
  459. /* median */
  460. if (pred_count >= 3) {
  461. min_y = min_x = min_r = 99999;
  462. max_y = max_x = max_r = -99999;
  463. } else {
  464. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  465. }
  466. for (j = 0; j < pred_count; j++) {
  467. max_x = FFMAX(max_x, mv_predictor[j][0]);
  468. max_y = FFMAX(max_y, mv_predictor[j][1]);
  469. max_r = FFMAX(max_r, ref[j]);
  470. min_x = FFMIN(min_x, mv_predictor[j][0]);
  471. min_y = FFMIN(min_y, mv_predictor[j][1]);
  472. min_r = FFMIN(min_r, ref[j]);
  473. }
  474. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  475. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  476. ref[pred_count + 1] = sum_r - max_r - min_r;
  477. if (pred_count == 4) {
  478. mv_predictor[pred_count + 1][0] /= 2;
  479. mv_predictor[pred_count + 1][1] /= 2;
  480. ref[pred_count + 1] /= 2;
  481. }
  482. pred_count += 2;
  483. }
  484. skip_mean_and_median:
  485. /* zero MV */
  486. pred_count++;
  487. if (!fixed[mb_xy]) {
  488. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  489. // FIXME
  490. } else {
  491. ff_thread_await_progress(s->last_pic.tf,
  492. mb_y, 0);
  493. }
  494. if (!s->last_pic.motion_val[0] ||
  495. !s->last_pic.ref_index[0])
  496. goto skip_last_mv;
  497. prev_x = s->last_pic.motion_val[0][mot_index][0];
  498. prev_y = s->last_pic.motion_val[0][mot_index][1];
  499. prev_ref = s->last_pic.ref_index[0][4 * mb_xy];
  500. } else {
  501. prev_x = s->cur_pic.motion_val[0][mot_index][0];
  502. prev_y = s->cur_pic.motion_val[0][mot_index][1];
  503. prev_ref = s->cur_pic.ref_index[0][4 * mb_xy];
  504. }
  505. /* last MV */
  506. mv_predictor[pred_count][0] = prev_x;
  507. mv_predictor[pred_count][1] = prev_y;
  508. ref[pred_count] = prev_ref;
  509. pred_count++;
  510. skip_last_mv:
  511. for (j = 0; j < pred_count; j++) {
  512. int *linesize = s->cur_pic.f->linesize;
  513. int score = 0;
  514. uint8_t *src = s->cur_pic.f->data[0] +
  515. mb_x * 16 + mb_y * 16 * linesize[0];
  516. s->cur_pic.motion_val[0][mot_index][0] =
  517. s->mv[0][0][0] = mv_predictor[j][0];
  518. s->cur_pic.motion_val[0][mot_index][1] =
  519. s->mv[0][0][1] = mv_predictor[j][1];
  520. // predictor intra or otherwise not available
  521. if (ref[j] < 0)
  522. continue;
  523. s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
  524. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  525. if (mb_x > 0 && fixed[mb_xy - 1]) {
  526. int k;
  527. for (k = 0; k < 16; k++)
  528. score += FFABS(src[k * linesize[0] - 1] -
  529. src[k * linesize[0]]);
  530. }
  531. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  532. int k;
  533. for (k = 0; k < 16; k++)
  534. score += FFABS(src[k * linesize[0] + 15] -
  535. src[k * linesize[0] + 16]);
  536. }
  537. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  538. int k;
  539. for (k = 0; k < 16; k++)
  540. score += FFABS(src[k - linesize[0]] - src[k]);
  541. }
  542. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  543. int k;
  544. for (k = 0; k < 16; k++)
  545. score += FFABS(src[k + linesize[0] * 15] -
  546. src[k + linesize[0] * 16]);
  547. }
  548. if (score <= best_score) { // <= will favor the last MV
  549. best_score = score;
  550. best_pred = j;
  551. }
  552. }
  553. score_sum += best_score;
  554. s->mv[0][0][0] = mv_predictor[best_pred][0];
  555. s->mv[0][0][1] = mv_predictor[best_pred][1];
  556. for (i = 0; i < mot_step; i++)
  557. for (j = 0; j < mot_step; j++) {
  558. s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  559. s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  560. }
  561. s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
  562. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  563. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  564. fixed[mb_xy] = MV_CHANGED;
  565. changed++;
  566. } else
  567. fixed[mb_xy] = MV_UNCHANGED;
  568. }
  569. }
  570. }
  571. if (none_left)
  572. return;
  573. for (i = 0; i < s->mb_num; i++) {
  574. int mb_xy = s->mb_index2xy[i];
  575. if (fixed[mb_xy])
  576. fixed[mb_xy] = MV_FROZEN;
  577. }
  578. }
  579. }
  580. static int is_intra_more_likely(ERContext *s)
  581. {
  582. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  583. if (!s->last_pic.f || !s->last_pic.f->data[0])
  584. return 1; // no previous frame available -> use spatial prediction
  585. undamaged_count = 0;
  586. for (i = 0; i < s->mb_num; i++) {
  587. const int mb_xy = s->mb_index2xy[i];
  588. const int error = s->error_status_table[mb_xy];
  589. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  590. undamaged_count++;
  591. }
  592. if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0)
  593. return 1;
  594. if (undamaged_count < 5)
  595. return 0; // almost all MBs damaged -> use temporal prediction
  596. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  597. is_intra_likely = 0;
  598. j = 0;
  599. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  600. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  601. int error;
  602. const int mb_xy = mb_x + mb_y * s->mb_stride;
  603. error = s->error_status_table[mb_xy];
  604. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  605. continue; // skip damaged
  606. j++;
  607. // skip a few to speed things up
  608. if ((j % skip_amount) != 0)
  609. continue;
  610. if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
  611. int *linesize = s->cur_pic.f->linesize;
  612. uint8_t *mb_ptr = s->cur_pic.f->data[0] +
  613. mb_x * 16 + mb_y * 16 * linesize[0];
  614. uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
  615. mb_x * 16 + mb_y * 16 * linesize[0];
  616. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  617. // FIXME
  618. } else {
  619. ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
  620. }
  621. is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr,
  622. linesize[0], 16);
  623. is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr,
  624. last_mb_ptr + linesize[0] * 16,
  625. linesize[0], 16);
  626. } else {
  627. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  628. is_intra_likely++;
  629. else
  630. is_intra_likely--;
  631. }
  632. }
  633. }
  634. return is_intra_likely > 0;
  635. }
  636. void ff_er_frame_start(ERContext *s)
  637. {
  638. if (!s->avctx->error_concealment)
  639. return;
  640. if (!s->mecc_inited) {
  641. ff_me_cmp_init(&s->mecc, s->avctx);
  642. s->mecc_inited = 1;
  643. }
  644. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  645. s->mb_stride * s->mb_height * sizeof(uint8_t));
  646. s->error_count = 3 * s->mb_num;
  647. s->error_occurred = 0;
  648. }
  649. /**
  650. * Add a slice.
  651. * @param endx x component of the last macroblock, can be -1
  652. * for the last of the previous line
  653. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  654. * assumed that no earlier end or error of the same type occurred
  655. */
  656. void ff_er_add_slice(ERContext *s, int startx, int starty,
  657. int endx, int endy, int status)
  658. {
  659. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  660. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  661. const int start_xy = s->mb_index2xy[start_i];
  662. const int end_xy = s->mb_index2xy[end_i];
  663. int mask = -1;
  664. if (s->avctx->hwaccel)
  665. return;
  666. if (start_i > end_i || start_xy > end_xy) {
  667. av_log(s->avctx, AV_LOG_ERROR,
  668. "internal error, slice end before start\n");
  669. return;
  670. }
  671. if (!s->avctx->error_concealment)
  672. return;
  673. mask &= ~VP_START;
  674. if (status & (ER_AC_ERROR | ER_AC_END)) {
  675. mask &= ~(ER_AC_ERROR | ER_AC_END);
  676. s->error_count -= end_i - start_i + 1;
  677. }
  678. if (status & (ER_DC_ERROR | ER_DC_END)) {
  679. mask &= ~(ER_DC_ERROR | ER_DC_END);
  680. s->error_count -= end_i - start_i + 1;
  681. }
  682. if (status & (ER_MV_ERROR | ER_MV_END)) {
  683. mask &= ~(ER_MV_ERROR | ER_MV_END);
  684. s->error_count -= end_i - start_i + 1;
  685. }
  686. if (status & ER_MB_ERROR) {
  687. s->error_occurred = 1;
  688. s->error_count = INT_MAX;
  689. }
  690. if (mask == ~0x7F) {
  691. memset(&s->error_status_table[start_xy], 0,
  692. (end_xy - start_xy) * sizeof(uint8_t));
  693. } else {
  694. int i;
  695. for (i = start_xy; i < end_xy; i++)
  696. s->error_status_table[i] &= mask;
  697. }
  698. if (end_i == s->mb_num)
  699. s->error_count = INT_MAX;
  700. else {
  701. s->error_status_table[end_xy] &= mask;
  702. s->error_status_table[end_xy] |= status;
  703. }
  704. s->error_status_table[start_xy] |= VP_START;
  705. if (start_xy > 0 && s->avctx->thread_count <= 1 &&
  706. s->avctx->skip_top * s->mb_width < start_i) {
  707. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  708. prev_status &= ~ VP_START;
  709. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  710. s->error_count = INT_MAX;
  711. }
  712. }
  713. void ff_er_frame_end(ERContext *s)
  714. {
  715. int *linesize = s->cur_pic.f->linesize;
  716. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  717. int distance;
  718. int threshold_part[4] = { 100, 100, 100 };
  719. int threshold = 50;
  720. int is_intra_likely;
  721. /* We do not support ER of field pictures yet,
  722. * though it should not crash if enabled. */
  723. if (!s->avctx->error_concealment || s->error_count == 0 ||
  724. s->avctx->hwaccel ||
  725. !s->cur_pic.f ||
  726. s->cur_pic.field_picture ||
  727. s->error_count == 3 * s->mb_width *
  728. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  729. return;
  730. };
  731. if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) {
  732. av_log(s->avctx, AV_LOG_ERROR, "MVs not available, ER not possible.\n");
  733. return;
  734. }
  735. if (s->avctx->debug & FF_DEBUG_ER) {
  736. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  737. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  738. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  739. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  740. }
  741. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  742. }
  743. }
  744. /* handle overlapping slices */
  745. for (error_type = 1; error_type <= 3; error_type++) {
  746. int end_ok = 0;
  747. for (i = s->mb_num - 1; i >= 0; i--) {
  748. const int mb_xy = s->mb_index2xy[i];
  749. int error = s->error_status_table[mb_xy];
  750. if (error & (1 << error_type))
  751. end_ok = 1;
  752. if (error & (8 << error_type))
  753. end_ok = 1;
  754. if (!end_ok)
  755. s->error_status_table[mb_xy] |= 1 << error_type;
  756. if (error & VP_START)
  757. end_ok = 0;
  758. }
  759. }
  760. /* handle slices with partitions of different length */
  761. if (s->partitioned_frame) {
  762. int end_ok = 0;
  763. for (i = s->mb_num - 1; i >= 0; i--) {
  764. const int mb_xy = s->mb_index2xy[i];
  765. int error = s->error_status_table[mb_xy];
  766. if (error & ER_AC_END)
  767. end_ok = 0;
  768. if ((error & ER_MV_END) ||
  769. (error & ER_DC_END) ||
  770. (error & ER_AC_ERROR))
  771. end_ok = 1;
  772. if (!end_ok)
  773. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  774. if (error & VP_START)
  775. end_ok = 0;
  776. }
  777. }
  778. /* handle missing slices */
  779. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  780. int end_ok = 1;
  781. // FIXME + 100 hack
  782. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  783. const int mb_xy = s->mb_index2xy[i];
  784. int error1 = s->error_status_table[mb_xy];
  785. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  786. if (error1 & VP_START)
  787. end_ok = 1;
  788. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  789. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  790. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  791. (error1 & ER_MV_END))) {
  792. // end & uninit
  793. end_ok = 0;
  794. }
  795. if (!end_ok)
  796. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  797. }
  798. }
  799. /* backward mark errors */
  800. distance = 9999999;
  801. for (error_type = 1; error_type <= 3; error_type++) {
  802. for (i = s->mb_num - 1; i >= 0; i--) {
  803. const int mb_xy = s->mb_index2xy[i];
  804. int error = s->error_status_table[mb_xy];
  805. if (s->mbskip_table && !s->mbskip_table[mb_xy]) // FIXME partition specific
  806. distance++;
  807. if (error & (1 << error_type))
  808. distance = 0;
  809. if (s->partitioned_frame) {
  810. if (distance < threshold_part[error_type - 1])
  811. s->error_status_table[mb_xy] |= 1 << error_type;
  812. } else {
  813. if (distance < threshold)
  814. s->error_status_table[mb_xy] |= 1 << error_type;
  815. }
  816. if (error & VP_START)
  817. distance = 9999999;
  818. }
  819. }
  820. /* forward mark errors */
  821. error = 0;
  822. for (i = 0; i < s->mb_num; i++) {
  823. const int mb_xy = s->mb_index2xy[i];
  824. int old_error = s->error_status_table[mb_xy];
  825. if (old_error & VP_START) {
  826. error = old_error & ER_MB_ERROR;
  827. } else {
  828. error |= old_error & ER_MB_ERROR;
  829. s->error_status_table[mb_xy] |= error;
  830. }
  831. }
  832. /* handle not partitioned case */
  833. if (!s->partitioned_frame) {
  834. for (i = 0; i < s->mb_num; i++) {
  835. const int mb_xy = s->mb_index2xy[i];
  836. error = s->error_status_table[mb_xy];
  837. if (error & ER_MB_ERROR)
  838. error |= ER_MB_ERROR;
  839. s->error_status_table[mb_xy] = error;
  840. }
  841. }
  842. dc_error = ac_error = mv_error = 0;
  843. for (i = 0; i < s->mb_num; i++) {
  844. const int mb_xy = s->mb_index2xy[i];
  845. error = s->error_status_table[mb_xy];
  846. if (error & ER_DC_ERROR)
  847. dc_error++;
  848. if (error & ER_AC_ERROR)
  849. ac_error++;
  850. if (error & ER_MV_ERROR)
  851. mv_error++;
  852. }
  853. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n",
  854. dc_error, ac_error, mv_error);
  855. is_intra_likely = is_intra_more_likely(s);
  856. /* set unknown mb-type to most likely */
  857. for (i = 0; i < s->mb_num; i++) {
  858. const int mb_xy = s->mb_index2xy[i];
  859. error = s->error_status_table[mb_xy];
  860. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  861. continue;
  862. if (is_intra_likely)
  863. s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  864. else
  865. s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  866. }
  867. // change inter to intra blocks if no reference frames are available
  868. if (!(s->last_pic.f && s->last_pic.f->data[0]) &&
  869. !(s->next_pic.f && s->next_pic.f->data[0]))
  870. for (i = 0; i < s->mb_num; i++) {
  871. const int mb_xy = s->mb_index2xy[i];
  872. if (!IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  873. s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  874. }
  875. /* handle inter blocks with damaged AC */
  876. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  877. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  878. const int mb_xy = mb_x + mb_y * s->mb_stride;
  879. const int mb_type = s->cur_pic.mb_type[mb_xy];
  880. const int dir = !(s->last_pic.f && s->last_pic.f->data[0]);
  881. const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  882. int mv_type;
  883. error = s->error_status_table[mb_xy];
  884. if (IS_INTRA(mb_type))
  885. continue; // intra
  886. if (error & ER_MV_ERROR)
  887. continue; // inter with damaged MV
  888. if (!(error & ER_AC_ERROR))
  889. continue; // undamaged inter
  890. if (IS_8X8(mb_type)) {
  891. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  892. int j;
  893. mv_type = MV_TYPE_8X8;
  894. for (j = 0; j < 4; j++) {
  895. s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  896. s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  897. }
  898. } else {
  899. mv_type = MV_TYPE_16X16;
  900. s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  901. s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  902. }
  903. s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */,
  904. mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
  905. }
  906. }
  907. /* guess MVs */
  908. if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) {
  909. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  910. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  911. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  912. const int mb_xy = mb_x + mb_y * s->mb_stride;
  913. const int mb_type = s->cur_pic.mb_type[mb_xy];
  914. int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  915. error = s->error_status_table[mb_xy];
  916. if (IS_INTRA(mb_type))
  917. continue;
  918. if (!(error & ER_MV_ERROR))
  919. continue; // inter with undamaged MV
  920. if (!(error & ER_AC_ERROR))
  921. continue; // undamaged inter
  922. if (!(s->last_pic.f && s->last_pic.f->data[0]))
  923. mv_dir &= ~MV_DIR_FORWARD;
  924. if (!(s->next_pic.f && s->next_pic.f->data[0]))
  925. mv_dir &= ~MV_DIR_BACKWARD;
  926. if (s->pp_time) {
  927. int time_pp = s->pp_time;
  928. int time_pb = s->pb_time;
  929. ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
  930. s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
  931. s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
  932. s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  933. s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  934. } else {
  935. s->mv[0][0][0] = 0;
  936. s->mv[0][0][1] = 0;
  937. s->mv[1][0][0] = 0;
  938. s->mv[1][0][1] = 0;
  939. }
  940. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  941. mb_x, mb_y, 0, 0);
  942. }
  943. }
  944. } else
  945. guess_mv(s);
  946. /* fill DC for inter blocks */
  947. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  948. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  949. int dc, dcu, dcv, y, n;
  950. int16_t *dc_ptr;
  951. uint8_t *dest_y, *dest_cb, *dest_cr;
  952. const int mb_xy = mb_x + mb_y * s->mb_stride;
  953. const int mb_type = s->cur_pic.mb_type[mb_xy];
  954. error = s->error_status_table[mb_xy];
  955. if (IS_INTRA(mb_type) && s->partitioned_frame)
  956. continue;
  957. // if (error & ER_MV_ERROR)
  958. // continue; // inter data damaged FIXME is this good?
  959. dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  960. dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  961. dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  962. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  963. for (n = 0; n < 4; n++) {
  964. dc = 0;
  965. for (y = 0; y < 8; y++) {
  966. int x;
  967. for (x = 0; x < 8; x++)
  968. dc += dest_y[x + (n & 1) * 8 +
  969. (y + (n >> 1) * 8) * linesize[0]];
  970. }
  971. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  972. }
  973. dcu = dcv = 0;
  974. for (y = 0; y < 8; y++) {
  975. int x;
  976. for (x = 0; x < 8; x++) {
  977. dcu += dest_cb[x + y * linesize[1]];
  978. dcv += dest_cr[x + y * linesize[2]];
  979. }
  980. }
  981. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  982. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  983. }
  984. }
  985. /* guess DC for damaged blocks */
  986. guess_dc(s, s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride, 1);
  987. guess_dc(s, s->dc_val[1], s->mb_width, s->mb_height, s->mb_stride, 0);
  988. guess_dc(s, s->dc_val[2], s->mb_width, s->mb_height, s->mb_stride, 0);
  989. /* filter luma DC */
  990. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  991. /* render DC only intra */
  992. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  993. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  994. uint8_t *dest_y, *dest_cb, *dest_cr;
  995. const int mb_xy = mb_x + mb_y * s->mb_stride;
  996. const int mb_type = s->cur_pic.mb_type[mb_xy];
  997. error = s->error_status_table[mb_xy];
  998. if (IS_INTER(mb_type))
  999. continue;
  1000. if (!(error & ER_AC_ERROR))
  1001. continue; // undamaged
  1002. dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  1003. dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  1004. dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  1005. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1006. }
  1007. }
  1008. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1009. /* filter horizontal block boundaries */
  1010. h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
  1011. s->mb_height * 2, linesize[0], 1);
  1012. h_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
  1013. s->mb_height, linesize[1], 0);
  1014. h_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
  1015. s->mb_height, linesize[2], 0);
  1016. /* filter vertical block boundaries */
  1017. v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
  1018. s->mb_height * 2, linesize[0], 1);
  1019. v_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
  1020. s->mb_height, linesize[1], 0);
  1021. v_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
  1022. s->mb_height, linesize[2], 0);
  1023. }
  1024. /* clean a few tables */
  1025. for (i = 0; i < s->mb_num; i++) {
  1026. const int mb_xy = s->mb_index2xy[i];
  1027. int error = s->error_status_table[mb_xy];
  1028. if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B &&
  1029. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1030. s->mbskip_table[mb_xy] = 0;
  1031. }
  1032. if (s->mbintra_table)
  1033. s->mbintra_table[mb_xy] = 1;
  1034. }
  1035. memset(&s->cur_pic, 0, sizeof(ERPicture));
  1036. memset(&s->last_pic, 0, sizeof(ERPicture));
  1037. memset(&s->next_pic, 0, sizeof(ERPicture));
  1038. }