You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1220 lines
46KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "error_resilience.h"
  29. #include "mpegvideo.h"
  30. #include "rectangle.h"
  31. #include "thread.h"
  32. /**
  33. * @param stride the number of MVs to get to the next row
  34. * @param mv_step the number of MVs per row or column in a macroblock
  35. */
  36. static void set_mv_strides(ERContext *s, int *mv_step, int *stride)
  37. {
  38. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  39. assert(s->quarter_sample);
  40. *mv_step = 4;
  41. *stride = s->mb_width * 4;
  42. } else {
  43. *mv_step = 2;
  44. *stride = s->b8_stride;
  45. }
  46. }
  47. /**
  48. * Replace the current MB with a flat dc-only version.
  49. */
  50. static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  51. uint8_t *dest_cr, int mb_x, int mb_y)
  52. {
  53. int *linesize = s->cur_pic->f.linesize;
  54. int dc, dcu, dcv, y, i;
  55. for (i = 0; i < 4; i++) {
  56. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  57. if (dc < 0)
  58. dc = 0;
  59. else if (dc > 2040)
  60. dc = 2040;
  61. for (y = 0; y < 8; y++) {
  62. int x;
  63. for (x = 0; x < 8; x++)
  64. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
  65. }
  66. }
  67. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  68. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  69. if (dcu < 0)
  70. dcu = 0;
  71. else if (dcu > 2040)
  72. dcu = 2040;
  73. if (dcv < 0)
  74. dcv = 0;
  75. else if (dcv > 2040)
  76. dcv = 2040;
  77. for (y = 0; y < 8; y++) {
  78. int x;
  79. for (x = 0; x < 8; x++) {
  80. dest_cb[x + y * linesize[1]] = dcu / 8;
  81. dest_cr[x + y * linesize[2]] = dcv / 8;
  82. }
  83. }
  84. }
  85. static void filter181(int16_t *data, int width, int height, int stride)
  86. {
  87. int x, y;
  88. /* horizontal filter */
  89. for (y = 1; y < height - 1; y++) {
  90. int prev_dc = data[0 + y * stride];
  91. for (x = 1; x < width - 1; x++) {
  92. int dc;
  93. dc = -prev_dc +
  94. data[x + y * stride] * 8 -
  95. data[x + 1 + y * stride];
  96. dc = (dc * 10923 + 32768) >> 16;
  97. prev_dc = data[x + y * stride];
  98. data[x + y * stride] = dc;
  99. }
  100. }
  101. /* vertical filter */
  102. for (x = 1; x < width - 1; x++) {
  103. int prev_dc = data[x];
  104. for (y = 1; y < height - 1; y++) {
  105. int dc;
  106. dc = -prev_dc +
  107. data[x + y * stride] * 8 -
  108. data[x + (y + 1) * stride];
  109. dc = (dc * 10923 + 32768) >> 16;
  110. prev_dc = data[x + y * stride];
  111. data[x + y * stride] = dc;
  112. }
  113. }
  114. }
  115. /**
  116. * guess the dc of blocks which do not have an undamaged dc
  117. * @param w width in 8 pixel blocks
  118. * @param h height in 8 pixel blocks
  119. */
  120. static void guess_dc(ERContext *s, int16_t *dc, int w,
  121. int h, int stride, int is_luma)
  122. {
  123. int b_x, b_y;
  124. for (b_y = 0; b_y < h; b_y++) {
  125. for (b_x = 0; b_x < w; b_x++) {
  126. int color[4] = { 1024, 1024, 1024, 1024 };
  127. int distance[4] = { 9999, 9999, 9999, 9999 };
  128. int mb_index, error, j;
  129. int64_t guess, weight_sum;
  130. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  131. error = s->error_status_table[mb_index];
  132. if (IS_INTER(s->cur_pic->mb_type[mb_index]))
  133. continue; // inter
  134. if (!(error & ER_DC_ERROR))
  135. continue; // dc-ok
  136. /* right block */
  137. for (j = b_x + 1; j < w; j++) {
  138. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  139. int error_j = s->error_status_table[mb_index_j];
  140. int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
  141. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  142. color[0] = dc[j + b_y * stride];
  143. distance[0] = j - b_x;
  144. break;
  145. }
  146. }
  147. /* left block */
  148. for (j = b_x - 1; j >= 0; j--) {
  149. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  150. int error_j = s->error_status_table[mb_index_j];
  151. int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
  152. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  153. color[1] = dc[j + b_y * stride];
  154. distance[1] = b_x - j;
  155. break;
  156. }
  157. }
  158. /* bottom block */
  159. for (j = b_y + 1; j < h; j++) {
  160. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  161. int error_j = s->error_status_table[mb_index_j];
  162. int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
  163. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  164. color[2] = dc[b_x + j * stride];
  165. distance[2] = j - b_y;
  166. break;
  167. }
  168. }
  169. /* top block */
  170. for (j = b_y - 1; j >= 0; j--) {
  171. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  172. int error_j = s->error_status_table[mb_index_j];
  173. int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
  174. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  175. color[3] = dc[b_x + j * stride];
  176. distance[3] = b_y - j;
  177. break;
  178. }
  179. }
  180. weight_sum = 0;
  181. guess = 0;
  182. for (j = 0; j < 4; j++) {
  183. int64_t weight = 256 * 256 * 256 * 16 / distance[j];
  184. guess += weight * (int64_t) color[j];
  185. weight_sum += weight;
  186. }
  187. guess = (guess + weight_sum / 2) / weight_sum;
  188. dc[b_x + b_y * stride] = guess;
  189. }
  190. }
  191. }
  192. /**
  193. * simple horizontal deblocking filter used for error resilience
  194. * @param w width in 8 pixel blocks
  195. * @param h height in 8 pixel blocks
  196. */
  197. static void h_block_filter(ERContext *s, uint8_t *dst, int w,
  198. int h, int stride, int is_luma)
  199. {
  200. int b_x, b_y, mvx_stride, mvy_stride;
  201. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  202. set_mv_strides(s, &mvx_stride, &mvy_stride);
  203. mvx_stride >>= is_luma;
  204. mvy_stride *= mvx_stride;
  205. for (b_y = 0; b_y < h; b_y++) {
  206. for (b_x = 0; b_x < w - 1; b_x++) {
  207. int y;
  208. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  209. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  210. int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  211. int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  212. int left_damage = left_status & ER_MB_ERROR;
  213. int right_damage = right_status & ER_MB_ERROR;
  214. int offset = b_x * 8 + b_y * stride * 8;
  215. int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  216. int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  217. if (!(left_damage || right_damage))
  218. continue; // both undamaged
  219. if ((!left_intra) && (!right_intra) &&
  220. FFABS(left_mv[0] - right_mv[0]) +
  221. FFABS(left_mv[1] + right_mv[1]) < 2)
  222. continue;
  223. for (y = 0; y < 8; y++) {
  224. int a, b, c, d;
  225. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  226. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  227. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  228. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  229. d = FFMAX(d, 0);
  230. if (b < 0)
  231. d = -d;
  232. if (d == 0)
  233. continue;
  234. if (!(left_damage && right_damage))
  235. d = d * 16 / 9;
  236. if (left_damage) {
  237. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  238. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  239. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  240. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  241. }
  242. if (right_damage) {
  243. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  244. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  245. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  246. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  247. }
  248. }
  249. }
  250. }
  251. }
  252. /**
  253. * simple vertical deblocking filter used for error resilience
  254. * @param w width in 8 pixel blocks
  255. * @param h height in 8 pixel blocks
  256. */
  257. static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
  258. int stride, int is_luma)
  259. {
  260. int b_x, b_y, mvx_stride, mvy_stride;
  261. const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  262. set_mv_strides(s, &mvx_stride, &mvy_stride);
  263. mvx_stride >>= is_luma;
  264. mvy_stride *= mvx_stride;
  265. for (b_y = 0; b_y < h - 1; b_y++) {
  266. for (b_x = 0; b_x < w; b_x++) {
  267. int x;
  268. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  269. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  270. int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  271. int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  272. int top_damage = top_status & ER_MB_ERROR;
  273. int bottom_damage = bottom_status & ER_MB_ERROR;
  274. int offset = b_x * 8 + b_y * stride * 8;
  275. int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  276. int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  277. if (!(top_damage || bottom_damage))
  278. continue; // both undamaged
  279. if ((!top_intra) && (!bottom_intra) &&
  280. FFABS(top_mv[0] - bottom_mv[0]) +
  281. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  282. continue;
  283. for (x = 0; x < 8; x++) {
  284. int a, b, c, d;
  285. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  286. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  287. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  288. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  289. d = FFMAX(d, 0);
  290. if (b < 0)
  291. d = -d;
  292. if (d == 0)
  293. continue;
  294. if (!(top_damage && bottom_damage))
  295. d = d * 16 / 9;
  296. if (top_damage) {
  297. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  298. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  299. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  300. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  301. }
  302. if (bottom_damage) {
  303. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  304. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  305. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  306. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  307. }
  308. }
  309. }
  310. }
  311. }
  312. static void guess_mv(ERContext *s)
  313. {
  314. uint8_t *fixed = s->er_temp_buffer;
  315. #define MV_FROZEN 3
  316. #define MV_CHANGED 2
  317. #define MV_UNCHANGED 1
  318. const int mb_stride = s->mb_stride;
  319. const int mb_width = s->mb_width;
  320. const int mb_height = s->mb_height;
  321. int i, depth, num_avail;
  322. int mb_x, mb_y, mot_step, mot_stride;
  323. set_mv_strides(s, &mot_step, &mot_stride);
  324. num_avail = 0;
  325. for (i = 0; i < s->mb_num; i++) {
  326. const int mb_xy = s->mb_index2xy[i];
  327. int f = 0;
  328. int error = s->error_status_table[mb_xy];
  329. if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
  330. f = MV_FROZEN; // intra // FIXME check
  331. if (!(error & ER_MV_ERROR))
  332. f = MV_FROZEN; // inter with undamaged MV
  333. fixed[mb_xy] = f;
  334. if (f == MV_FROZEN)
  335. num_avail++;
  336. }
  337. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  338. num_avail <= mb_width / 2) {
  339. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  340. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  341. const int mb_xy = mb_x + mb_y * s->mb_stride;
  342. int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
  343. if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
  344. continue;
  345. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  346. continue;
  347. s->mv[0][0][0] = 0;
  348. s->mv[0][0][1] = 0;
  349. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  350. mb_x, mb_y, 0, 0);
  351. }
  352. }
  353. return;
  354. }
  355. for (depth = 0; ; depth++) {
  356. int changed, pass, none_left;
  357. none_left = 1;
  358. changed = 1;
  359. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  360. int mb_x, mb_y;
  361. int score_sum = 0;
  362. changed = 0;
  363. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  364. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  365. const int mb_xy = mb_x + mb_y * s->mb_stride;
  366. int mv_predictor[8][2] = { { 0 } };
  367. int ref[8] = { 0 };
  368. int pred_count = 0;
  369. int j;
  370. int best_score = 256 * 256 * 256 * 64;
  371. int best_pred = 0;
  372. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  373. int prev_x, prev_y, prev_ref;
  374. if ((mb_x ^ mb_y ^ pass) & 1)
  375. continue;
  376. if (fixed[mb_xy] == MV_FROZEN)
  377. continue;
  378. assert(!IS_INTRA(s->cur_pic->mb_type[mb_xy]));
  379. assert(s->last_pic && s->last_pic->f.data[0]);
  380. j = 0;
  381. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  382. j = 1;
  383. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  384. j = 1;
  385. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  386. j = 1;
  387. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  388. j = 1;
  389. if (j == 0)
  390. continue;
  391. j = 0;
  392. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  393. j = 1;
  394. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  395. j = 1;
  396. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  397. j = 1;
  398. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  399. j = 1;
  400. if (j == 0 && pass > 1)
  401. continue;
  402. none_left = 0;
  403. if (mb_x > 0 && fixed[mb_xy - 1]) {
  404. mv_predictor[pred_count][0] =
  405. s->cur_pic->motion_val[0][mot_index - mot_step][0];
  406. mv_predictor[pred_count][1] =
  407. s->cur_pic->motion_val[0][mot_index - mot_step][1];
  408. ref[pred_count] =
  409. s->cur_pic->ref_index[0][4 * (mb_xy - 1)];
  410. pred_count++;
  411. }
  412. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  413. mv_predictor[pred_count][0] =
  414. s->cur_pic->motion_val[0][mot_index + mot_step][0];
  415. mv_predictor[pred_count][1] =
  416. s->cur_pic->motion_val[0][mot_index + mot_step][1];
  417. ref[pred_count] =
  418. s->cur_pic->ref_index[0][4 * (mb_xy + 1)];
  419. pred_count++;
  420. }
  421. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  422. mv_predictor[pred_count][0] =
  423. s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0];
  424. mv_predictor[pred_count][1] =
  425. s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1];
  426. ref[pred_count] =
  427. s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)];
  428. pred_count++;
  429. }
  430. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  431. mv_predictor[pred_count][0] =
  432. s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0];
  433. mv_predictor[pred_count][1] =
  434. s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1];
  435. ref[pred_count] =
  436. s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)];
  437. pred_count++;
  438. }
  439. if (pred_count == 0)
  440. continue;
  441. if (pred_count > 1) {
  442. int sum_x = 0, sum_y = 0, sum_r = 0;
  443. int max_x, max_y, min_x, min_y, max_r, min_r;
  444. for (j = 0; j < pred_count; j++) {
  445. sum_x += mv_predictor[j][0];
  446. sum_y += mv_predictor[j][1];
  447. sum_r += ref[j];
  448. if (j && ref[j] != ref[j - 1])
  449. goto skip_mean_and_median;
  450. }
  451. /* mean */
  452. mv_predictor[pred_count][0] = sum_x / j;
  453. mv_predictor[pred_count][1] = sum_y / j;
  454. ref[pred_count] = sum_r / j;
  455. /* median */
  456. if (pred_count >= 3) {
  457. min_y = min_x = min_r = 99999;
  458. max_y = max_x = max_r = -99999;
  459. } else {
  460. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  461. }
  462. for (j = 0; j < pred_count; j++) {
  463. max_x = FFMAX(max_x, mv_predictor[j][0]);
  464. max_y = FFMAX(max_y, mv_predictor[j][1]);
  465. max_r = FFMAX(max_r, ref[j]);
  466. min_x = FFMIN(min_x, mv_predictor[j][0]);
  467. min_y = FFMIN(min_y, mv_predictor[j][1]);
  468. min_r = FFMIN(min_r, ref[j]);
  469. }
  470. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  471. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  472. ref[pred_count + 1] = sum_r - max_r - min_r;
  473. if (pred_count == 4) {
  474. mv_predictor[pred_count + 1][0] /= 2;
  475. mv_predictor[pred_count + 1][1] /= 2;
  476. ref[pred_count + 1] /= 2;
  477. }
  478. pred_count += 2;
  479. }
  480. skip_mean_and_median:
  481. /* zero MV */
  482. pred_count++;
  483. if (!fixed[mb_xy]) {
  484. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  485. // FIXME
  486. } else {
  487. ff_thread_await_progress(&s->last_pic->tf,
  488. mb_y, 0);
  489. }
  490. if (!s->last_pic->motion_val[0] ||
  491. !s->last_pic->ref_index[0])
  492. goto skip_last_mv;
  493. prev_x = s->last_pic->motion_val[0][mot_index][0];
  494. prev_y = s->last_pic->motion_val[0][mot_index][1];
  495. prev_ref = s->last_pic->ref_index[0][4 * mb_xy];
  496. } else {
  497. prev_x = s->cur_pic->motion_val[0][mot_index][0];
  498. prev_y = s->cur_pic->motion_val[0][mot_index][1];
  499. prev_ref = s->cur_pic->ref_index[0][4 * mb_xy];
  500. }
  501. /* last MV */
  502. mv_predictor[pred_count][0] = prev_x;
  503. mv_predictor[pred_count][1] = prev_y;
  504. ref[pred_count] = prev_ref;
  505. pred_count++;
  506. skip_last_mv:
  507. for (j = 0; j < pred_count; j++) {
  508. int *linesize = s->cur_pic->f.linesize;
  509. int score = 0;
  510. uint8_t *src = s->cur_pic->f.data[0] +
  511. mb_x * 16 + mb_y * 16 * linesize[0];
  512. s->cur_pic->motion_val[0][mot_index][0] =
  513. s->mv[0][0][0] = mv_predictor[j][0];
  514. s->cur_pic->motion_val[0][mot_index][1] =
  515. s->mv[0][0][1] = mv_predictor[j][1];
  516. // predictor intra or otherwise not available
  517. if (ref[j] < 0)
  518. continue;
  519. s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
  520. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  521. if (mb_x > 0 && fixed[mb_xy - 1]) {
  522. int k;
  523. for (k = 0; k < 16; k++)
  524. score += FFABS(src[k * linesize[0] - 1] -
  525. src[k * linesize[0]]);
  526. }
  527. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  528. int k;
  529. for (k = 0; k < 16; k++)
  530. score += FFABS(src[k * linesize[0] + 15] -
  531. src[k * linesize[0] + 16]);
  532. }
  533. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  534. int k;
  535. for (k = 0; k < 16; k++)
  536. score += FFABS(src[k - linesize[0]] - src[k]);
  537. }
  538. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  539. int k;
  540. for (k = 0; k < 16; k++)
  541. score += FFABS(src[k + linesize[0] * 15] -
  542. src[k + linesize[0] * 16]);
  543. }
  544. if (score <= best_score) { // <= will favor the last MV
  545. best_score = score;
  546. best_pred = j;
  547. }
  548. }
  549. score_sum += best_score;
  550. s->mv[0][0][0] = mv_predictor[best_pred][0];
  551. s->mv[0][0][1] = mv_predictor[best_pred][1];
  552. for (i = 0; i < mot_step; i++)
  553. for (j = 0; j < mot_step; j++) {
  554. s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  555. s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  556. }
  557. s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
  558. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  559. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  560. fixed[mb_xy] = MV_CHANGED;
  561. changed++;
  562. } else
  563. fixed[mb_xy] = MV_UNCHANGED;
  564. }
  565. }
  566. }
  567. if (none_left)
  568. return;
  569. for (i = 0; i < s->mb_num; i++) {
  570. int mb_xy = s->mb_index2xy[i];
  571. if (fixed[mb_xy])
  572. fixed[mb_xy] = MV_FROZEN;
  573. }
  574. }
  575. }
  576. static int is_intra_more_likely(ERContext *s)
  577. {
  578. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  579. if (!s->last_pic || !s->last_pic->f.data[0])
  580. return 1; // no previous frame available -> use spatial prediction
  581. undamaged_count = 0;
  582. for (i = 0; i < s->mb_num; i++) {
  583. const int mb_xy = s->mb_index2xy[i];
  584. const int error = s->error_status_table[mb_xy];
  585. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  586. undamaged_count++;
  587. }
  588. if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0)
  589. return 1;
  590. if (undamaged_count < 5)
  591. return 0; // almost all MBs damaged -> use temporal prediction
  592. // prevent dsp.sad() check, that requires access to the image
  593. if (CONFIG_MPEG_XVMC_DECODER &&
  594. s->avctx->xvmc_acceleration &&
  595. s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I)
  596. return 1;
  597. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  598. is_intra_likely = 0;
  599. j = 0;
  600. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  601. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  602. int error;
  603. const int mb_xy = mb_x + mb_y * s->mb_stride;
  604. error = s->error_status_table[mb_xy];
  605. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  606. continue; // skip damaged
  607. j++;
  608. // skip a few to speed things up
  609. if ((j % skip_amount) != 0)
  610. continue;
  611. if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_I) {
  612. int *linesize = s->cur_pic->f.linesize;
  613. uint8_t *mb_ptr = s->cur_pic->f.data[0] +
  614. mb_x * 16 + mb_y * 16 * linesize[0];
  615. uint8_t *last_mb_ptr = s->last_pic->f.data[0] +
  616. mb_x * 16 + mb_y * 16 * linesize[0];
  617. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  618. // FIXME
  619. } else {
  620. ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
  621. }
  622. is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
  623. linesize[0], 16);
  624. is_intra_likely -= s->dsp->sad[0](NULL, last_mb_ptr,
  625. last_mb_ptr + linesize[0] * 16,
  626. linesize[0], 16);
  627. } else {
  628. if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
  629. is_intra_likely++;
  630. else
  631. is_intra_likely--;
  632. }
  633. }
  634. }
  635. return is_intra_likely > 0;
  636. }
  637. void ff_er_frame_start(ERContext *s)
  638. {
  639. if (!s->avctx->err_recognition)
  640. return;
  641. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  642. s->mb_stride * s->mb_height * sizeof(uint8_t));
  643. s->error_count = 3 * s->mb_num;
  644. s->error_occurred = 0;
  645. }
  646. /**
  647. * Add a slice.
  648. * @param endx x component of the last macroblock, can be -1
  649. * for the last of the previous line
  650. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  651. * assumed that no earlier end or error of the same type occurred
  652. */
  653. void ff_er_add_slice(ERContext *s, int startx, int starty,
  654. int endx, int endy, int status)
  655. {
  656. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  657. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  658. const int start_xy = s->mb_index2xy[start_i];
  659. const int end_xy = s->mb_index2xy[end_i];
  660. int mask = -1;
  661. if (s->avctx->hwaccel)
  662. return;
  663. if (start_i > end_i || start_xy > end_xy) {
  664. av_log(s->avctx, AV_LOG_ERROR,
  665. "internal error, slice end before start\n");
  666. return;
  667. }
  668. if (!s->avctx->err_recognition)
  669. return;
  670. mask &= ~VP_START;
  671. if (status & (ER_AC_ERROR | ER_AC_END)) {
  672. mask &= ~(ER_AC_ERROR | ER_AC_END);
  673. s->error_count -= end_i - start_i + 1;
  674. }
  675. if (status & (ER_DC_ERROR | ER_DC_END)) {
  676. mask &= ~(ER_DC_ERROR | ER_DC_END);
  677. s->error_count -= end_i - start_i + 1;
  678. }
  679. if (status & (ER_MV_ERROR | ER_MV_END)) {
  680. mask &= ~(ER_MV_ERROR | ER_MV_END);
  681. s->error_count -= end_i - start_i + 1;
  682. }
  683. if (status & ER_MB_ERROR) {
  684. s->error_occurred = 1;
  685. s->error_count = INT_MAX;
  686. }
  687. if (mask == ~0x7F) {
  688. memset(&s->error_status_table[start_xy], 0,
  689. (end_xy - start_xy) * sizeof(uint8_t));
  690. } else {
  691. int i;
  692. for (i = start_xy; i < end_xy; i++)
  693. s->error_status_table[i] &= mask;
  694. }
  695. if (end_i == s->mb_num)
  696. s->error_count = INT_MAX;
  697. else {
  698. s->error_status_table[end_xy] &= mask;
  699. s->error_status_table[end_xy] |= status;
  700. }
  701. s->error_status_table[start_xy] |= VP_START;
  702. if (start_xy > 0 && s->avctx->thread_count <= 1 &&
  703. s->avctx->skip_top * s->mb_width < start_i) {
  704. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  705. prev_status &= ~ VP_START;
  706. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  707. s->error_count = INT_MAX;
  708. }
  709. }
  710. void ff_er_frame_end(ERContext *s)
  711. {
  712. int *linesize = s->cur_pic->f.linesize;
  713. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  714. int distance;
  715. int threshold_part[4] = { 100, 100, 100 };
  716. int threshold = 50;
  717. int is_intra_likely;
  718. int size = s->b8_stride * 2 * s->mb_height;
  719. /* We do not support ER of field pictures yet,
  720. * though it should not crash if enabled. */
  721. if (!s->avctx->err_recognition || s->error_count == 0 ||
  722. s->avctx->hwaccel ||
  723. !s->cur_pic || s->cur_pic->field_picture ||
  724. s->error_count == 3 * s->mb_width *
  725. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  726. return;
  727. };
  728. if (s->cur_pic->motion_val[0] == NULL) {
  729. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  730. for (i = 0; i < 2; i++) {
  731. s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  732. s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
  733. if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i])
  734. break;
  735. s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data;
  736. s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
  737. }
  738. if (i < 2) {
  739. for (i = 0; i < 2; i++) {
  740. av_buffer_unref(&s->cur_pic->ref_index_buf[i]);
  741. av_buffer_unref(&s->cur_pic->motion_val_buf[i]);
  742. s->cur_pic->ref_index[i] = NULL;
  743. s->cur_pic->motion_val[i] = NULL;
  744. }
  745. return;
  746. }
  747. }
  748. if (s->avctx->debug & FF_DEBUG_ER) {
  749. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  750. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  751. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  752. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  753. }
  754. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  755. }
  756. }
  757. /* handle overlapping slices */
  758. for (error_type = 1; error_type <= 3; error_type++) {
  759. int end_ok = 0;
  760. for (i = s->mb_num - 1; i >= 0; i--) {
  761. const int mb_xy = s->mb_index2xy[i];
  762. int error = s->error_status_table[mb_xy];
  763. if (error & (1 << error_type))
  764. end_ok = 1;
  765. if (error & (8 << error_type))
  766. end_ok = 1;
  767. if (!end_ok)
  768. s->error_status_table[mb_xy] |= 1 << error_type;
  769. if (error & VP_START)
  770. end_ok = 0;
  771. }
  772. }
  773. /* handle slices with partitions of different length */
  774. if (s->partitioned_frame) {
  775. int end_ok = 0;
  776. for (i = s->mb_num - 1; i >= 0; i--) {
  777. const int mb_xy = s->mb_index2xy[i];
  778. int error = s->error_status_table[mb_xy];
  779. if (error & ER_AC_END)
  780. end_ok = 0;
  781. if ((error & ER_MV_END) ||
  782. (error & ER_DC_END) ||
  783. (error & ER_AC_ERROR))
  784. end_ok = 1;
  785. if (!end_ok)
  786. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  787. if (error & VP_START)
  788. end_ok = 0;
  789. }
  790. }
  791. /* handle missing slices */
  792. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  793. int end_ok = 1;
  794. // FIXME + 100 hack
  795. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  796. const int mb_xy = s->mb_index2xy[i];
  797. int error1 = s->error_status_table[mb_xy];
  798. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  799. if (error1 & VP_START)
  800. end_ok = 1;
  801. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  802. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  803. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  804. (error1 & ER_MV_END))) {
  805. // end & uninit
  806. end_ok = 0;
  807. }
  808. if (!end_ok)
  809. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  810. }
  811. }
  812. /* backward mark errors */
  813. distance = 9999999;
  814. for (error_type = 1; error_type <= 3; error_type++) {
  815. for (i = s->mb_num - 1; i >= 0; i--) {
  816. const int mb_xy = s->mb_index2xy[i];
  817. int error = s->error_status_table[mb_xy];
  818. if (!s->mbskip_table[mb_xy]) // FIXME partition specific
  819. distance++;
  820. if (error & (1 << error_type))
  821. distance = 0;
  822. if (s->partitioned_frame) {
  823. if (distance < threshold_part[error_type - 1])
  824. s->error_status_table[mb_xy] |= 1 << error_type;
  825. } else {
  826. if (distance < threshold)
  827. s->error_status_table[mb_xy] |= 1 << error_type;
  828. }
  829. if (error & VP_START)
  830. distance = 9999999;
  831. }
  832. }
  833. /* forward mark errors */
  834. error = 0;
  835. for (i = 0; i < s->mb_num; i++) {
  836. const int mb_xy = s->mb_index2xy[i];
  837. int old_error = s->error_status_table[mb_xy];
  838. if (old_error & VP_START) {
  839. error = old_error & ER_MB_ERROR;
  840. } else {
  841. error |= old_error & ER_MB_ERROR;
  842. s->error_status_table[mb_xy] |= error;
  843. }
  844. }
  845. /* handle not partitioned case */
  846. if (!s->partitioned_frame) {
  847. for (i = 0; i < s->mb_num; i++) {
  848. const int mb_xy = s->mb_index2xy[i];
  849. error = s->error_status_table[mb_xy];
  850. if (error & ER_MB_ERROR)
  851. error |= ER_MB_ERROR;
  852. s->error_status_table[mb_xy] = error;
  853. }
  854. }
  855. dc_error = ac_error = mv_error = 0;
  856. for (i = 0; i < s->mb_num; i++) {
  857. const int mb_xy = s->mb_index2xy[i];
  858. error = s->error_status_table[mb_xy];
  859. if (error & ER_DC_ERROR)
  860. dc_error++;
  861. if (error & ER_AC_ERROR)
  862. ac_error++;
  863. if (error & ER_MV_ERROR)
  864. mv_error++;
  865. }
  866. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n",
  867. dc_error, ac_error, mv_error);
  868. is_intra_likely = is_intra_more_likely(s);
  869. /* set unknown mb-type to most likely */
  870. for (i = 0; i < s->mb_num; i++) {
  871. const int mb_xy = s->mb_index2xy[i];
  872. error = s->error_status_table[mb_xy];
  873. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  874. continue;
  875. if (is_intra_likely)
  876. s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  877. else
  878. s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  879. }
  880. // change inter to intra blocks if no reference frames are available
  881. if (!(s->last_pic && s->last_pic->f.data[0]) &&
  882. !(s->next_pic && s->next_pic->f.data[0]))
  883. for (i = 0; i < s->mb_num; i++) {
  884. const int mb_xy = s->mb_index2xy[i];
  885. if (!IS_INTRA(s->cur_pic->mb_type[mb_xy]))
  886. s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  887. }
  888. /* handle inter blocks with damaged AC */
  889. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  890. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  891. const int mb_xy = mb_x + mb_y * s->mb_stride;
  892. const int mb_type = s->cur_pic->mb_type[mb_xy];
  893. const int dir = !(s->last_pic && s->last_pic->f.data[0]);
  894. const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  895. int mv_type;
  896. error = s->error_status_table[mb_xy];
  897. if (IS_INTRA(mb_type))
  898. continue; // intra
  899. if (error & ER_MV_ERROR)
  900. continue; // inter with damaged MV
  901. if (!(error & ER_AC_ERROR))
  902. continue; // undamaged inter
  903. if (IS_8X8(mb_type)) {
  904. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  905. int j;
  906. mv_type = MV_TYPE_8X8;
  907. for (j = 0; j < 4; j++) {
  908. s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  909. s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  910. }
  911. } else {
  912. mv_type = MV_TYPE_16X16;
  913. s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  914. s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  915. }
  916. s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
  917. mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
  918. }
  919. }
  920. /* guess MVs */
  921. if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_B) {
  922. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  923. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  924. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  925. const int mb_xy = mb_x + mb_y * s->mb_stride;
  926. const int mb_type = s->cur_pic->mb_type[mb_xy];
  927. int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  928. error = s->error_status_table[mb_xy];
  929. if (IS_INTRA(mb_type))
  930. continue;
  931. if (!(error & ER_MV_ERROR))
  932. continue; // inter with undamaged MV
  933. if (!(error & ER_AC_ERROR))
  934. continue; // undamaged inter
  935. if (!(s->last_pic && s->last_pic->f.data[0]))
  936. mv_dir &= ~MV_DIR_FORWARD;
  937. if (!(s->next_pic && s->next_pic->f.data[0]))
  938. mv_dir &= ~MV_DIR_BACKWARD;
  939. if (s->pp_time) {
  940. int time_pp = s->pp_time;
  941. int time_pb = s->pb_time;
  942. ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
  943. s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp;
  944. s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp;
  945. s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  946. s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  947. } else {
  948. s->mv[0][0][0] = 0;
  949. s->mv[0][0][1] = 0;
  950. s->mv[1][0][0] = 0;
  951. s->mv[1][0][1] = 0;
  952. }
  953. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  954. mb_x, mb_y, 0, 0);
  955. }
  956. }
  957. } else
  958. guess_mv(s);
  959. /* the filters below are not XvMC compatible, skip them */
  960. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  961. goto ec_clean;
  962. /* fill DC for inter blocks */
  963. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  964. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  965. int dc, dcu, dcv, y, n;
  966. int16_t *dc_ptr;
  967. uint8_t *dest_y, *dest_cb, *dest_cr;
  968. const int mb_xy = mb_x + mb_y * s->mb_stride;
  969. const int mb_type = s->cur_pic->mb_type[mb_xy];
  970. error = s->error_status_table[mb_xy];
  971. if (IS_INTRA(mb_type) && s->partitioned_frame)
  972. continue;
  973. // if (error & ER_MV_ERROR)
  974. // continue; // inter data damaged FIXME is this good?
  975. dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  976. dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  977. dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  978. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  979. for (n = 0; n < 4; n++) {
  980. dc = 0;
  981. for (y = 0; y < 8; y++) {
  982. int x;
  983. for (x = 0; x < 8; x++)
  984. dc += dest_y[x + (n & 1) * 8 +
  985. (y + (n >> 1) * 8) * linesize[0]];
  986. }
  987. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  988. }
  989. dcu = dcv = 0;
  990. for (y = 0; y < 8; y++) {
  991. int x;
  992. for (x = 0; x < 8; x++) {
  993. dcu += dest_cb[x + y * linesize[1]];
  994. dcv += dest_cr[x + y * linesize[2]];
  995. }
  996. }
  997. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  998. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  999. }
  1000. }
  1001. /* guess DC for damaged blocks */
  1002. guess_dc(s, s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride, 1);
  1003. guess_dc(s, s->dc_val[1], s->mb_width, s->mb_height, s->mb_stride, 0);
  1004. guess_dc(s, s->dc_val[2], s->mb_width, s->mb_height, s->mb_stride, 0);
  1005. /* filter luma DC */
  1006. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  1007. /* render DC only intra */
  1008. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1009. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1010. uint8_t *dest_y, *dest_cb, *dest_cr;
  1011. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1012. const int mb_type = s->cur_pic->mb_type[mb_xy];
  1013. error = s->error_status_table[mb_xy];
  1014. if (IS_INTER(mb_type))
  1015. continue;
  1016. if (!(error & ER_AC_ERROR))
  1017. continue; // undamaged
  1018. dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  1019. dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  1020. dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  1021. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1022. }
  1023. }
  1024. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1025. /* filter horizontal block boundaries */
  1026. h_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2,
  1027. s->mb_height * 2, linesize[0], 1);
  1028. h_block_filter(s, s->cur_pic->f.data[1], s->mb_width,
  1029. s->mb_height, linesize[1], 0);
  1030. h_block_filter(s, s->cur_pic->f.data[2], s->mb_width,
  1031. s->mb_height, linesize[2], 0);
  1032. /* filter vertical block boundaries */
  1033. v_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2,
  1034. s->mb_height * 2, linesize[0], 1);
  1035. v_block_filter(s, s->cur_pic->f.data[1], s->mb_width,
  1036. s->mb_height, linesize[1], 0);
  1037. v_block_filter(s, s->cur_pic->f.data[2], s->mb_width,
  1038. s->mb_height, linesize[2], 0);
  1039. }
  1040. ec_clean:
  1041. /* clean a few tables */
  1042. for (i = 0; i < s->mb_num; i++) {
  1043. const int mb_xy = s->mb_index2xy[i];
  1044. int error = s->error_status_table[mb_xy];
  1045. if (s->cur_pic->f.pict_type != AV_PICTURE_TYPE_B &&
  1046. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1047. s->mbskip_table[mb_xy] = 0;
  1048. }
  1049. s->mbintra_table[mb_xy] = 1;
  1050. }
  1051. s->cur_pic = NULL;
  1052. s->next_pic = NULL;
  1053. s->last_pic = NULL;
  1054. }