You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1278 lines
49KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. /*
  34. * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
  35. * but error concealment must support both h264 and h263 thus we must undo this
  36. */
  37. #undef mb_intra
  38. static void decode_mb(MpegEncContext *s, int ref)
  39. {
  40. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  41. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  42. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  43. if (CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264) {
  44. H264Context *h = (void*)s;
  45. h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
  46. memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
  47. assert(ref >= 0);
  48. /* FIXME: It is possible albeit uncommon that slice references
  49. * differ between slices. We take the easy approach and ignore
  50. * it for now. If this turns out to have any relevance in
  51. * practice then correct remapping should be added. */
  52. if (ref >= h->ref_count[0])
  53. ref = 0;
  54. fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy],
  55. 2, 2, 2, ref, 1);
  56. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  57. fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
  58. pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4);
  59. assert(!FRAME_MBAFF);
  60. ff_h264_hl_decode_mb(h);
  61. } else {
  62. assert(ref == 0);
  63. MPV_decode_mb(s, s->block);
  64. }
  65. }
  66. /**
  67. * @param stride the number of MVs to get to the next row
  68. * @param mv_step the number of MVs per row or column in a macroblock
  69. */
  70. static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride)
  71. {
  72. if (s->codec_id == CODEC_ID_H264) {
  73. H264Context *h = (void*)s;
  74. assert(s->quarter_sample);
  75. *mv_step = 4;
  76. *stride = h->b_stride;
  77. } else {
  78. *mv_step = 2;
  79. *stride = s->b8_stride;
  80. }
  81. }
  82. /**
  83. * Replace the current MB with a flat dc-only version.
  84. */
  85. static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  86. uint8_t *dest_cr, int mb_x, int mb_y)
  87. {
  88. int dc, dcu, dcv, y, i;
  89. for (i = 0; i < 4; i++) {
  90. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  91. if (dc < 0)
  92. dc = 0;
  93. else if (dc > 2040)
  94. dc = 2040;
  95. for (y = 0; y < 8; y++) {
  96. int x;
  97. for (x = 0; x < 8; x++)
  98. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * s->linesize] = dc / 8;
  99. }
  100. }
  101. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  102. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  103. if (dcu < 0)
  104. dcu = 0;
  105. else if (dcu > 2040)
  106. dcu = 2040;
  107. if (dcv < 0)
  108. dcv = 0;
  109. else if (dcv > 2040)
  110. dcv = 2040;
  111. for (y = 0; y < 8; y++) {
  112. int x;
  113. for (x = 0; x < 8; x++) {
  114. dest_cb[x + y * s->uvlinesize] = dcu / 8;
  115. dest_cr[x + y * s->uvlinesize] = dcv / 8;
  116. }
  117. }
  118. }
  119. static void filter181(int16_t *data, int width, int height, int stride)
  120. {
  121. int x, y;
  122. /* horizontal filter */
  123. for (y = 1; y < height - 1; y++) {
  124. int prev_dc = data[0 + y * stride];
  125. for (x = 1; x < width - 1; x++) {
  126. int dc;
  127. dc = -prev_dc +
  128. data[x + y * stride] * 8 -
  129. data[x + 1 + y * stride];
  130. dc = (dc * 10923 + 32768) >> 16;
  131. prev_dc = data[x + y * stride];
  132. data[x + y * stride] = dc;
  133. }
  134. }
  135. /* vertical filter */
  136. for (x = 1; x < width - 1; x++) {
  137. int prev_dc = data[x];
  138. for (y = 1; y < height - 1; y++) {
  139. int dc;
  140. dc = -prev_dc +
  141. data[x + y * stride] * 8 -
  142. data[x + (y + 1) * stride];
  143. dc = (dc * 10923 + 32768) >> 16;
  144. prev_dc = data[x + y * stride];
  145. data[x + y * stride] = dc;
  146. }
  147. }
  148. }
  149. /**
  150. * guess the dc of blocks which do not have an undamaged dc
  151. * @param w width in 8 pixel blocks
  152. * @param h height in 8 pixel blocks
  153. */
  154. static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
  155. int h, int stride, int is_luma)
  156. {
  157. int b_x, b_y;
  158. for (b_y = 0; b_y < h; b_y++) {
  159. for (b_x = 0; b_x < w; b_x++) {
  160. int color[4] = { 1024, 1024, 1024, 1024 };
  161. int distance[4] = { 9999, 9999, 9999, 9999 };
  162. int mb_index, error, j;
  163. int64_t guess, weight_sum;
  164. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  165. error = s->error_status_table[mb_index];
  166. if (IS_INTER(s->current_picture.f.mb_type[mb_index]))
  167. continue; // inter
  168. if (!(error & ER_DC_ERROR))
  169. continue; // dc-ok
  170. /* right block */
  171. for (j = b_x + 1; j < w; j++) {
  172. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  173. int error_j = s->error_status_table[mb_index_j];
  174. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  175. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  176. color[0] = dc[j + b_y * stride];
  177. distance[0] = j - b_x;
  178. break;
  179. }
  180. }
  181. /* left block */
  182. for (j = b_x - 1; j >= 0; j--) {
  183. int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  184. int error_j = s->error_status_table[mb_index_j];
  185. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  186. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  187. color[1] = dc[j + b_y * stride];
  188. distance[1] = b_x - j;
  189. break;
  190. }
  191. }
  192. /* bottom block */
  193. for (j = b_y + 1; j < h; j++) {
  194. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  195. int error_j = s->error_status_table[mb_index_j];
  196. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  197. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  198. color[2] = dc[b_x + j * stride];
  199. distance[2] = j - b_y;
  200. break;
  201. }
  202. }
  203. /* top block */
  204. for (j = b_y - 1; j >= 0; j--) {
  205. int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
  206. int error_j = s->error_status_table[mb_index_j];
  207. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  208. if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
  209. color[3] = dc[b_x + j * stride];
  210. distance[3] = b_y - j;
  211. break;
  212. }
  213. }
  214. weight_sum = 0;
  215. guess = 0;
  216. for (j = 0; j < 4; j++) {
  217. int64_t weight = 256 * 256 * 256 * 16 / distance[j];
  218. guess += weight * (int64_t) color[j];
  219. weight_sum += weight;
  220. }
  221. guess = (guess + weight_sum / 2) / weight_sum;
  222. dc[b_x + b_y * stride] = guess;
  223. }
  224. }
  225. }
  226. /**
  227. * simple horizontal deblocking filter used for error resilience
  228. * @param w width in 8 pixel blocks
  229. * @param h height in 8 pixel blocks
  230. */
  231. static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
  232. int h, int stride, int is_luma)
  233. {
  234. int b_x, b_y, mvx_stride, mvy_stride;
  235. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  236. set_mv_strides(s, &mvx_stride, &mvy_stride);
  237. mvx_stride >>= is_luma;
  238. mvy_stride *= mvx_stride;
  239. for (b_y = 0; b_y < h; b_y++) {
  240. for (b_x = 0; b_x < w - 1; b_x++) {
  241. int y;
  242. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  243. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  244. int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  245. int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  246. int left_damage = left_status & ER_MB_ERROR;
  247. int right_damage = right_status & ER_MB_ERROR;
  248. int offset = b_x * 8 + b_y * stride * 8;
  249. int16_t *left_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  250. int16_t *right_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  251. if (!(left_damage || right_damage))
  252. continue; // both undamaged
  253. if ((!left_intra) && (!right_intra) &&
  254. FFABS(left_mv[0] - right_mv[0]) +
  255. FFABS(left_mv[1] + right_mv[1]) < 2)
  256. continue;
  257. for (y = 0; y < 8; y++) {
  258. int a, b, c, d;
  259. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  260. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  261. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  262. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  263. d = FFMAX(d, 0);
  264. if (b < 0)
  265. d = -d;
  266. if (d == 0)
  267. continue;
  268. if (!(left_damage && right_damage))
  269. d = d * 16 / 9;
  270. if (left_damage) {
  271. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  272. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  273. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  274. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  275. }
  276. if (right_damage) {
  277. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  278. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  279. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  280. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  281. }
  282. }
  283. }
  284. }
  285. }
  286. /**
  287. * simple vertical deblocking filter used for error resilience
  288. * @param w width in 8 pixel blocks
  289. * @param h height in 8 pixel blocks
  290. */
  291. static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
  292. int stride, int is_luma)
  293. {
  294. int b_x, b_y, mvx_stride, mvy_stride;
  295. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  296. set_mv_strides(s, &mvx_stride, &mvy_stride);
  297. mvx_stride >>= is_luma;
  298. mvy_stride *= mvx_stride;
  299. for (b_y = 0; b_y < h - 1; b_y++) {
  300. for (b_x = 0; b_x < w; b_x++) {
  301. int x;
  302. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  303. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  304. int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  305. int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  306. int top_damage = top_status & ER_MB_ERROR;
  307. int bottom_damage = bottom_status & ER_MB_ERROR;
  308. int offset = b_x * 8 + b_y * stride * 8;
  309. int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  310. int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  311. if (!(top_damage || bottom_damage))
  312. continue; // both undamaged
  313. if ((!top_intra) && (!bottom_intra) &&
  314. FFABS(top_mv[0] - bottom_mv[0]) +
  315. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  316. continue;
  317. for (x = 0; x < 8; x++) {
  318. int a, b, c, d;
  319. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  320. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  321. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  322. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  323. d = FFMAX(d, 0);
  324. if (b < 0)
  325. d = -d;
  326. if (d == 0)
  327. continue;
  328. if (!(top_damage && bottom_damage))
  329. d = d * 16 / 9;
  330. if (top_damage) {
  331. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  332. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  333. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  334. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  335. }
  336. if (bottom_damage) {
  337. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  338. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  339. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  340. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  341. }
  342. }
  343. }
  344. }
  345. }
  346. static void guess_mv(MpegEncContext *s)
  347. {
  348. uint8_t fixed[s->mb_stride * s->mb_height];
  349. #define MV_FROZEN 3
  350. #define MV_CHANGED 2
  351. #define MV_UNCHANGED 1
  352. const int mb_stride = s->mb_stride;
  353. const int mb_width = s->mb_width;
  354. const int mb_height = s->mb_height;
  355. int i, depth, num_avail;
  356. int mb_x, mb_y, mot_step, mot_stride;
  357. set_mv_strides(s, &mot_step, &mot_stride);
  358. num_avail = 0;
  359. for (i = 0; i < s->mb_num; i++) {
  360. const int mb_xy = s->mb_index2xy[i];
  361. int f = 0;
  362. int error = s->error_status_table[mb_xy];
  363. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  364. f = MV_FROZEN; // intra // FIXME check
  365. if (!(error & ER_MV_ERROR))
  366. f = MV_FROZEN; // inter with undamaged MV
  367. fixed[mb_xy] = f;
  368. if (f == MV_FROZEN)
  369. num_avail++;
  370. }
  371. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  372. num_avail <= mb_width / 2) {
  373. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  374. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  375. const int mb_xy = mb_x + mb_y * s->mb_stride;
  376. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  377. continue;
  378. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  379. continue;
  380. s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD
  381. : MV_DIR_BACKWARD;
  382. s->mb_intra = 0;
  383. s->mv_type = MV_TYPE_16X16;
  384. s->mb_skipped = 0;
  385. s->dsp.clear_blocks(s->block[0]);
  386. s->mb_x = mb_x;
  387. s->mb_y = mb_y;
  388. s->mv[0][0][0] = 0;
  389. s->mv[0][0][1] = 0;
  390. decode_mb(s, 0);
  391. }
  392. }
  393. return;
  394. }
  395. for (depth = 0; ; depth++) {
  396. int changed, pass, none_left;
  397. none_left = 1;
  398. changed = 1;
  399. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  400. int mb_x, mb_y;
  401. int score_sum = 0;
  402. changed = 0;
  403. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  404. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  405. const int mb_xy = mb_x + mb_y * s->mb_stride;
  406. int mv_predictor[8][2] = { { 0 } };
  407. int ref[8] = { 0 };
  408. int pred_count = 0;
  409. int j;
  410. int best_score = 256 * 256 * 256 * 64;
  411. int best_pred = 0;
  412. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  413. int prev_x, prev_y, prev_ref;
  414. if ((mb_x ^ mb_y ^ pass) & 1)
  415. continue;
  416. if (fixed[mb_xy] == MV_FROZEN)
  417. continue;
  418. assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
  419. assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
  420. j = 0;
  421. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  422. j = 1;
  423. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  424. j = 1;
  425. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  426. j = 1;
  427. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  428. j = 1;
  429. if (j == 0)
  430. continue;
  431. j = 0;
  432. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  433. j = 1;
  434. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  435. j = 1;
  436. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  437. j = 1;
  438. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  439. j = 1;
  440. if (j == 0 && pass > 1)
  441. continue;
  442. none_left = 0;
  443. if (mb_x > 0 && fixed[mb_xy - 1]) {
  444. mv_predictor[pred_count][0] =
  445. s->current_picture.f.motion_val[0][mot_index - mot_step][0];
  446. mv_predictor[pred_count][1] =
  447. s->current_picture.f.motion_val[0][mot_index - mot_step][1];
  448. ref[pred_count] =
  449. s->current_picture.f.ref_index[0][4 * (mb_xy - 1)];
  450. pred_count++;
  451. }
  452. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  453. mv_predictor[pred_count][0] =
  454. s->current_picture.f.motion_val[0][mot_index + mot_step][0];
  455. mv_predictor[pred_count][1] =
  456. s->current_picture.f.motion_val[0][mot_index + mot_step][1];
  457. ref[pred_count] =
  458. s->current_picture.f.ref_index[0][4 * (mb_xy + 1)];
  459. pred_count++;
  460. }
  461. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  462. mv_predictor[pred_count][0] =
  463. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][0];
  464. mv_predictor[pred_count][1] =
  465. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][1];
  466. ref[pred_count] =
  467. s->current_picture.f.ref_index[0][4 * (mb_xy - s->mb_stride)];
  468. pred_count++;
  469. }
  470. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  471. mv_predictor[pred_count][0] =
  472. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][0];
  473. mv_predictor[pred_count][1] =
  474. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][1];
  475. ref[pred_count] =
  476. s->current_picture.f.ref_index[0][4 * (mb_xy + s->mb_stride)];
  477. pred_count++;
  478. }
  479. if (pred_count == 0)
  480. continue;
  481. if (pred_count > 1) {
  482. int sum_x = 0, sum_y = 0, sum_r = 0;
  483. int max_x, max_y, min_x, min_y, max_r, min_r;
  484. for (j = 0; j < pred_count; j++) {
  485. sum_x += mv_predictor[j][0];
  486. sum_y += mv_predictor[j][1];
  487. sum_r += ref[j];
  488. if (j && ref[j] != ref[j - 1])
  489. goto skip_mean_and_median;
  490. }
  491. /* mean */
  492. mv_predictor[pred_count][0] = sum_x / j;
  493. mv_predictor[pred_count][1] = sum_y / j;
  494. ref[pred_count] = sum_r / j;
  495. /* median */
  496. if (pred_count >= 3) {
  497. min_y = min_x = min_r = 99999;
  498. max_y = max_x = max_r = -99999;
  499. } else {
  500. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  501. }
  502. for (j = 0; j < pred_count; j++) {
  503. max_x = FFMAX(max_x, mv_predictor[j][0]);
  504. max_y = FFMAX(max_y, mv_predictor[j][1]);
  505. max_r = FFMAX(max_r, ref[j]);
  506. min_x = FFMIN(min_x, mv_predictor[j][0]);
  507. min_y = FFMIN(min_y, mv_predictor[j][1]);
  508. min_r = FFMIN(min_r, ref[j]);
  509. }
  510. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  511. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  512. ref[pred_count + 1] = sum_r - max_r - min_r;
  513. if (pred_count == 4) {
  514. mv_predictor[pred_count + 1][0] /= 2;
  515. mv_predictor[pred_count + 1][1] /= 2;
  516. ref[pred_count + 1] /= 2;
  517. }
  518. pred_count += 2;
  519. }
  520. skip_mean_and_median:
  521. /* zero MV */
  522. pred_count++;
  523. if (!fixed[mb_xy]) {
  524. if (s->avctx->codec_id == CODEC_ID_H264) {
  525. // FIXME
  526. } else {
  527. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  528. mb_y, 0);
  529. }
  530. if (!s->last_picture.f.motion_val[0] ||
  531. !s->last_picture.f.ref_index[0])
  532. goto skip_last_mv;
  533. prev_x = s->last_picture.f.motion_val[0][mot_index][0];
  534. prev_y = s->last_picture.f.motion_val[0][mot_index][1];
  535. prev_ref = s->last_picture.f.ref_index[0][4 * mb_xy];
  536. } else {
  537. prev_x = s->current_picture.f.motion_val[0][mot_index][0];
  538. prev_y = s->current_picture.f.motion_val[0][mot_index][1];
  539. prev_ref = s->current_picture.f.ref_index[0][4 * mb_xy];
  540. }
  541. /* last MV */
  542. mv_predictor[pred_count][0] = prev_x;
  543. mv_predictor[pred_count][1] = prev_y;
  544. ref[pred_count] = prev_ref;
  545. pred_count++;
  546. skip_last_mv:
  547. s->mv_dir = MV_DIR_FORWARD;
  548. s->mb_intra = 0;
  549. s->mv_type = MV_TYPE_16X16;
  550. s->mb_skipped = 0;
  551. s->dsp.clear_blocks(s->block[0]);
  552. s->mb_x = mb_x;
  553. s->mb_y = mb_y;
  554. for (j = 0; j < pred_count; j++) {
  555. int score = 0;
  556. uint8_t *src = s->current_picture.f.data[0] +
  557. mb_x * 16 + mb_y * 16 * s->linesize;
  558. s->current_picture.f.motion_val[0][mot_index][0] =
  559. s->mv[0][0][0] = mv_predictor[j][0];
  560. s->current_picture.f.motion_val[0][mot_index][1] =
  561. s->mv[0][0][1] = mv_predictor[j][1];
  562. // predictor intra or otherwise not available
  563. if (ref[j] < 0)
  564. continue;
  565. decode_mb(s, ref[j]);
  566. if (mb_x > 0 && fixed[mb_xy - 1]) {
  567. int k;
  568. for (k = 0; k < 16; k++)
  569. score += FFABS(src[k * s->linesize - 1] -
  570. src[k * s->linesize]);
  571. }
  572. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  573. int k;
  574. for (k = 0; k < 16; k++)
  575. score += FFABS(src[k * s->linesize + 15] -
  576. src[k * s->linesize + 16]);
  577. }
  578. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  579. int k;
  580. for (k = 0; k < 16; k++)
  581. score += FFABS(src[k - s->linesize] - src[k]);
  582. }
  583. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  584. int k;
  585. for (k = 0; k < 16; k++)
  586. score += FFABS(src[k + s->linesize * 15] -
  587. src[k + s->linesize * 16]);
  588. }
  589. if (score <= best_score) { // <= will favor the last MV
  590. best_score = score;
  591. best_pred = j;
  592. }
  593. }
  594. score_sum += best_score;
  595. s->mv[0][0][0] = mv_predictor[best_pred][0];
  596. s->mv[0][0][1] = mv_predictor[best_pred][1];
  597. for (i = 0; i < mot_step; i++)
  598. for (j = 0; j < mot_step; j++) {
  599. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  600. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  601. }
  602. decode_mb(s, ref[best_pred]);
  603. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  604. fixed[mb_xy] = MV_CHANGED;
  605. changed++;
  606. } else
  607. fixed[mb_xy] = MV_UNCHANGED;
  608. }
  609. }
  610. // printf(".%d/%d", changed, score_sum); fflush(stdout);
  611. }
  612. if (none_left)
  613. return;
  614. for (i = 0; i < s->mb_num; i++) {
  615. int mb_xy = s->mb_index2xy[i];
  616. if (fixed[mb_xy])
  617. fixed[mb_xy] = MV_FROZEN;
  618. }
  619. // printf(":"); fflush(stdout);
  620. }
  621. }
  622. static int is_intra_more_likely(MpegEncContext *s)
  623. {
  624. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  625. if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0])
  626. return 1; // no previous frame available -> use spatial prediction
  627. undamaged_count = 0;
  628. for (i = 0; i < s->mb_num; i++) {
  629. const int mb_xy = s->mb_index2xy[i];
  630. const int error = s->error_status_table[mb_xy];
  631. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  632. undamaged_count++;
  633. }
  634. if (s->codec_id == CODEC_ID_H264) {
  635. H264Context *h = (void*) s;
  636. if (h->list_count <= 0 || h->ref_count[0] <= 0 ||
  637. !h->ref_list[0][0].f.data[0])
  638. return 1;
  639. }
  640. if (undamaged_count < 5)
  641. return 0; // almost all MBs damaged -> use temporal prediction
  642. // prevent dsp.sad() check, that requires access to the image
  643. if (CONFIG_MPEG_XVMC_DECODER &&
  644. s->avctx->xvmc_acceleration &&
  645. s->pict_type == AV_PICTURE_TYPE_I)
  646. return 1;
  647. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  648. is_intra_likely = 0;
  649. j = 0;
  650. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  651. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  652. int error;
  653. const int mb_xy = mb_x + mb_y * s->mb_stride;
  654. error = s->error_status_table[mb_xy];
  655. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  656. continue; // skip damaged
  657. j++;
  658. // skip a few to speed things up
  659. if ((j % skip_amount) != 0)
  660. continue;
  661. if (s->pict_type == AV_PICTURE_TYPE_I) {
  662. uint8_t *mb_ptr = s->current_picture.f.data[0] +
  663. mb_x * 16 + mb_y * 16 * s->linesize;
  664. uint8_t *last_mb_ptr = s->last_picture.f.data[0] +
  665. mb_x * 16 + mb_y * 16 * s->linesize;
  666. if (s->avctx->codec_id == CODEC_ID_H264) {
  667. // FIXME
  668. } else {
  669. ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
  670. mb_y, 0);
  671. }
  672. is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr,
  673. s->linesize, 16);
  674. is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr,
  675. last_mb_ptr + s->linesize * 16,
  676. s->linesize, 16);
  677. } else {
  678. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  679. is_intra_likely++;
  680. else
  681. is_intra_likely--;
  682. }
  683. }
  684. }
  685. // printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  686. return is_intra_likely > 0;
  687. }
  688. void ff_er_frame_start(MpegEncContext *s)
  689. {
  690. if (!s->err_recognition)
  691. return;
  692. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  693. s->mb_stride * s->mb_height * sizeof(uint8_t));
  694. s->error_count = 3 * s->mb_num;
  695. s->error_occurred = 0;
  696. }
  697. /**
  698. * Add a slice.
  699. * @param endx x component of the last macroblock, can be -1
  700. * for the last of the previous line
  701. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  702. * assumed that no earlier end or error of the same type occurred
  703. */
  704. void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
  705. int endx, int endy, int status)
  706. {
  707. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  708. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  709. const int start_xy = s->mb_index2xy[start_i];
  710. const int end_xy = s->mb_index2xy[end_i];
  711. int mask = -1;
  712. if (s->avctx->hwaccel)
  713. return;
  714. if (start_i > end_i || start_xy > end_xy) {
  715. av_log(s->avctx, AV_LOG_ERROR,
  716. "internal error, slice end before start\n");
  717. return;
  718. }
  719. if (!s->err_recognition)
  720. return;
  721. mask &= ~VP_START;
  722. if (status & (ER_AC_ERROR | ER_AC_END)) {
  723. mask &= ~(ER_AC_ERROR | ER_AC_END);
  724. s->error_count -= end_i - start_i + 1;
  725. }
  726. if (status & (ER_DC_ERROR | ER_DC_END)) {
  727. mask &= ~(ER_DC_ERROR | ER_DC_END);
  728. s->error_count -= end_i - start_i + 1;
  729. }
  730. if (status & (ER_MV_ERROR | ER_MV_END)) {
  731. mask &= ~(ER_MV_ERROR | ER_MV_END);
  732. s->error_count -= end_i - start_i + 1;
  733. }
  734. if (status & ER_MB_ERROR) {
  735. s->error_occurred = 1;
  736. s->error_count = INT_MAX;
  737. }
  738. if (mask == ~0x7F) {
  739. memset(&s->error_status_table[start_xy], 0,
  740. (end_xy - start_xy) * sizeof(uint8_t));
  741. } else {
  742. int i;
  743. for (i = start_xy; i < end_xy; i++)
  744. s->error_status_table[i] &= mask;
  745. }
  746. if (end_i == s->mb_num)
  747. s->error_count = INT_MAX;
  748. else {
  749. s->error_status_table[end_xy] &= mask;
  750. s->error_status_table[end_xy] |= status;
  751. }
  752. s->error_status_table[start_xy] |= VP_START;
  753. if (start_xy > 0 && s->avctx->thread_count <= 1 &&
  754. s->avctx->skip_top * s->mb_width < start_i) {
  755. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  756. prev_status &= ~ VP_START;
  757. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  758. s->error_count = INT_MAX;
  759. }
  760. }
  761. void ff_er_frame_end(MpegEncContext *s)
  762. {
  763. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  764. int distance;
  765. int threshold_part[4] = { 100, 100, 100 };
  766. int threshold = 50;
  767. int is_intra_likely;
  768. int size = s->b8_stride * 2 * s->mb_height;
  769. Picture *pic = s->current_picture_ptr;
  770. /* We do not support ER of field pictures yet,
  771. * though it should not crash if enabled. */
  772. if (!s->err_recognition || s->error_count == 0 || s->avctx->lowres ||
  773. s->avctx->hwaccel ||
  774. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  775. s->picture_structure != PICT_FRAME ||
  776. s->error_count == 3 * s->mb_width *
  777. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  778. return;
  779. };
  780. if (s->current_picture.f.motion_val[0] == NULL) {
  781. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  782. for (i = 0; i < 2; i++) {
  783. pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  784. pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
  785. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  786. }
  787. pic->f.motion_subsample_log2 = 3;
  788. s->current_picture = *s->current_picture_ptr;
  789. }
  790. if (s->avctx->debug & FF_DEBUG_ER) {
  791. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  792. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  793. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  794. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  795. }
  796. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  797. }
  798. }
  799. /* handle overlapping slices */
  800. for (error_type = 1; error_type <= 3; error_type++) {
  801. int end_ok = 0;
  802. for (i = s->mb_num - 1; i >= 0; i--) {
  803. const int mb_xy = s->mb_index2xy[i];
  804. int error = s->error_status_table[mb_xy];
  805. if (error & (1 << error_type))
  806. end_ok = 1;
  807. if (error & (8 << error_type))
  808. end_ok = 1;
  809. if (!end_ok)
  810. s->error_status_table[mb_xy] |= 1 << error_type;
  811. if (error & VP_START)
  812. end_ok = 0;
  813. }
  814. }
  815. /* handle slices with partitions of different length */
  816. if (s->partitioned_frame) {
  817. int end_ok = 0;
  818. for (i = s->mb_num - 1; i >= 0; i--) {
  819. const int mb_xy = s->mb_index2xy[i];
  820. int error = s->error_status_table[mb_xy];
  821. if (error & ER_AC_END)
  822. end_ok = 0;
  823. if ((error & ER_MV_END) ||
  824. (error & ER_DC_END) ||
  825. (error & ER_AC_ERROR))
  826. end_ok = 1;
  827. if (!end_ok)
  828. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  829. if (error & VP_START)
  830. end_ok = 0;
  831. }
  832. }
  833. /* handle missing slices */
  834. if (s->err_recognition & AV_EF_EXPLODE) {
  835. int end_ok = 1;
  836. // FIXME + 100 hack
  837. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  838. const int mb_xy = s->mb_index2xy[i];
  839. int error1 = s->error_status_table[mb_xy];
  840. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  841. if (error1 & VP_START)
  842. end_ok = 1;
  843. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  844. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  845. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  846. (error1 & ER_MV_END))) {
  847. // end & uninit
  848. end_ok = 0;
  849. }
  850. if (!end_ok)
  851. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  852. }
  853. }
  854. /* backward mark errors */
  855. distance = 9999999;
  856. for (error_type = 1; error_type <= 3; error_type++) {
  857. for (i = s->mb_num - 1; i >= 0; i--) {
  858. const int mb_xy = s->mb_index2xy[i];
  859. int error = s->error_status_table[mb_xy];
  860. if (!s->mbskip_table[mb_xy]) // FIXME partition specific
  861. distance++;
  862. if (error & (1 << error_type))
  863. distance = 0;
  864. if (s->partitioned_frame) {
  865. if (distance < threshold_part[error_type - 1])
  866. s->error_status_table[mb_xy] |= 1 << error_type;
  867. } else {
  868. if (distance < threshold)
  869. s->error_status_table[mb_xy] |= 1 << error_type;
  870. }
  871. if (error & VP_START)
  872. distance = 9999999;
  873. }
  874. }
  875. /* forward mark errors */
  876. error = 0;
  877. for (i = 0; i < s->mb_num; i++) {
  878. const int mb_xy = s->mb_index2xy[i];
  879. int old_error = s->error_status_table[mb_xy];
  880. if (old_error & VP_START) {
  881. error = old_error & ER_MB_ERROR;
  882. } else {
  883. error |= old_error & ER_MB_ERROR;
  884. s->error_status_table[mb_xy] |= error;
  885. }
  886. }
  887. /* handle not partitioned case */
  888. if (!s->partitioned_frame) {
  889. for (i = 0; i < s->mb_num; i++) {
  890. const int mb_xy = s->mb_index2xy[i];
  891. error = s->error_status_table[mb_xy];
  892. if (error & ER_MB_ERROR)
  893. error |= ER_MB_ERROR;
  894. s->error_status_table[mb_xy] = error;
  895. }
  896. }
  897. dc_error = ac_error = mv_error = 0;
  898. for (i = 0; i < s->mb_num; i++) {
  899. const int mb_xy = s->mb_index2xy[i];
  900. error = s->error_status_table[mb_xy];
  901. if (error & ER_DC_ERROR)
  902. dc_error++;
  903. if (error & ER_AC_ERROR)
  904. ac_error++;
  905. if (error & ER_MV_ERROR)
  906. mv_error++;
  907. }
  908. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n",
  909. dc_error, ac_error, mv_error);
  910. is_intra_likely = is_intra_more_likely(s);
  911. /* set unknown mb-type to most likely */
  912. for (i = 0; i < s->mb_num; i++) {
  913. const int mb_xy = s->mb_index2xy[i];
  914. error = s->error_status_table[mb_xy];
  915. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  916. continue;
  917. if (is_intra_likely)
  918. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  919. else
  920. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  921. }
  922. // change inter to intra blocks if no reference frames are available
  923. if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
  924. for (i = 0; i < s->mb_num; i++) {
  925. const int mb_xy = s->mb_index2xy[i];
  926. if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  927. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  928. }
  929. /* handle inter blocks with damaged AC */
  930. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  931. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  932. const int mb_xy = mb_x + mb_y * s->mb_stride;
  933. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  934. int dir = !s->last_picture.f.data[0];
  935. error = s->error_status_table[mb_xy];
  936. if (IS_INTRA(mb_type))
  937. continue; // intra
  938. if (error & ER_MV_ERROR)
  939. continue; // inter with damaged MV
  940. if (!(error & ER_AC_ERROR))
  941. continue; // undamaged inter
  942. s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  943. s->mb_intra = 0;
  944. s->mb_skipped = 0;
  945. if (IS_8X8(mb_type)) {
  946. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  947. int j;
  948. s->mv_type = MV_TYPE_8X8;
  949. for (j = 0; j < 4; j++) {
  950. s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  951. s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  952. }
  953. } else {
  954. s->mv_type = MV_TYPE_16X16;
  955. s->mv[0][0][0] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  956. s->mv[0][0][1] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  957. }
  958. s->dsp.clear_blocks(s->block[0]);
  959. s->mb_x = mb_x;
  960. s->mb_y = mb_y;
  961. decode_mb(s, 0 /* FIXME h264 partitioned slices need this set */);
  962. }
  963. }
  964. /* guess MVs */
  965. if (s->pict_type == AV_PICTURE_TYPE_B) {
  966. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  967. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  968. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  969. const int mb_xy = mb_x + mb_y * s->mb_stride;
  970. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  971. error = s->error_status_table[mb_xy];
  972. if (IS_INTRA(mb_type))
  973. continue;
  974. if (!(error & ER_MV_ERROR))
  975. continue; // inter with undamaged MV
  976. if (!(error & ER_AC_ERROR))
  977. continue; // undamaged inter
  978. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  979. if (!s->last_picture.f.data[0])
  980. s->mv_dir &= ~MV_DIR_FORWARD;
  981. if (!s->next_picture.f.data[0])
  982. s->mv_dir &= ~MV_DIR_BACKWARD;
  983. s->mb_intra = 0;
  984. s->mv_type = MV_TYPE_16X16;
  985. s->mb_skipped = 0;
  986. if (s->pp_time) {
  987. int time_pp = s->pp_time;
  988. int time_pb = s->pb_time;
  989. if (s->avctx->codec_id == CODEC_ID_H264) {
  990. // FIXME
  991. } else {
  992. ff_thread_await_progress((AVFrame *) s->next_picture_ptr, mb_y, 0);
  993. }
  994. s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
  995. s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
  996. s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  997. s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  998. } else {
  999. s->mv[0][0][0] = 0;
  1000. s->mv[0][0][1] = 0;
  1001. s->mv[1][0][0] = 0;
  1002. s->mv[1][0][1] = 0;
  1003. }
  1004. s->dsp.clear_blocks(s->block[0]);
  1005. s->mb_x = mb_x;
  1006. s->mb_y = mb_y;
  1007. decode_mb(s, 0);
  1008. }
  1009. }
  1010. } else
  1011. guess_mv(s);
  1012. /* the filters below are not XvMC compatible, skip them */
  1013. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1014. goto ec_clean;
  1015. /* fill DC for inter blocks */
  1016. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1017. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1018. int dc, dcu, dcv, y, n;
  1019. int16_t *dc_ptr;
  1020. uint8_t *dest_y, *dest_cb, *dest_cr;
  1021. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1022. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1023. error = s->error_status_table[mb_xy];
  1024. if (IS_INTRA(mb_type) && s->partitioned_frame)
  1025. continue;
  1026. // if (error & ER_MV_ERROR)
  1027. // continue; // inter data damaged FIXME is this good?
  1028. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1029. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1030. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1031. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  1032. for (n = 0; n < 4; n++) {
  1033. dc = 0;
  1034. for (y = 0; y < 8; y++) {
  1035. int x;
  1036. for (x = 0; x < 8; x++)
  1037. dc += dest_y[x + (n & 1) * 8 +
  1038. (y + (n >> 1) * 8) * s->linesize];
  1039. }
  1040. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  1041. }
  1042. dcu = dcv = 0;
  1043. for (y = 0; y < 8; y++) {
  1044. int x;
  1045. for (x = 0; x < 8; x++) {
  1046. dcu += dest_cb[x + y * s->uvlinesize];
  1047. dcv += dest_cr[x + y * s->uvlinesize];
  1048. }
  1049. }
  1050. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  1051. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  1052. }
  1053. }
  1054. /* guess DC for damaged blocks */
  1055. guess_dc(s, s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride, 1);
  1056. guess_dc(s, s->dc_val[1], s->mb_width, s->mb_height, s->mb_stride, 0);
  1057. guess_dc(s, s->dc_val[2], s->mb_width, s->mb_height, s->mb_stride, 0);
  1058. /* filter luma DC */
  1059. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  1060. /* render DC only intra */
  1061. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1062. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1063. uint8_t *dest_y, *dest_cb, *dest_cr;
  1064. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1065. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1066. error = s->error_status_table[mb_xy];
  1067. if (IS_INTER(mb_type))
  1068. continue;
  1069. if (!(error & ER_AC_ERROR))
  1070. continue; // undamaged
  1071. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1072. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1073. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1074. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1075. }
  1076. }
  1077. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1078. /* filter horizontal block boundaries */
  1079. h_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1080. s->mb_height * 2, s->linesize, 1);
  1081. h_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1082. s->mb_height , s->uvlinesize, 0);
  1083. h_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1084. s->mb_height , s->uvlinesize, 0);
  1085. /* filter vertical block boundaries */
  1086. v_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1087. s->mb_height * 2, s->linesize, 1);
  1088. v_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1089. s->mb_height , s->uvlinesize, 0);
  1090. v_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1091. s->mb_height , s->uvlinesize, 0);
  1092. }
  1093. ec_clean:
  1094. /* clean a few tables */
  1095. for (i = 0; i < s->mb_num; i++) {
  1096. const int mb_xy = s->mb_index2xy[i];
  1097. int error = s->error_status_table[mb_xy];
  1098. if (s->pict_type != AV_PICTURE_TYPE_B &&
  1099. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1100. s->mbskip_table[mb_xy] = 0;
  1101. }
  1102. s->mbintra_table[mb_xy] = 1;
  1103. }
  1104. }