You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1315 lines
50KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "libavutil/internal.h"
  28. #include "avcodec.h"
  29. #include "error_resilience.h"
  30. #include "mpegutils.h"
  31. #include "mpegvideo.h"
  32. #include "rectangle.h"
  33. #include "thread.h"
  34. #include "version.h"
  35. /**
  36. * @param stride the number of MVs to get to the next row
  37. * @param mv_step the number of MVs per row or column in a macroblock
  38. */
  39. static void set_mv_strides(ERContext *s, int *mv_step, int *stride)
  40. {
  41. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  42. av_assert0(s->quarter_sample);
  43. *mv_step = 4;
  44. *stride = s->mb_width * 4;
  45. } else {
  46. *mv_step = 2;
  47. *stride = s->b8_stride;
  48. }
  49. }
  50. /**
  51. * Replace the current MB with a flat dc-only version.
  52. */
  53. static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  54. uint8_t *dest_cr, int mb_x, int mb_y)
  55. {
  56. int *linesize = s->cur_pic.f->linesize;
  57. int dc, dcu, dcv, y, i;
  58. for (i = 0; i < 4; i++) {
  59. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  60. if (dc < 0)
  61. dc = 0;
  62. else if (dc > 2040)
  63. dc = 2040;
  64. for (y = 0; y < 8; y++) {
  65. int x;
  66. for (x = 0; x < 8; x++)
  67. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
  68. }
  69. }
  70. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  71. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  72. if (dcu < 0)
  73. dcu = 0;
  74. else if (dcu > 2040)
  75. dcu = 2040;
  76. if (dcv < 0)
  77. dcv = 0;
  78. else if (dcv > 2040)
  79. dcv = 2040;
  80. for (y = 0; y < 8; y++) {
  81. int x;
  82. for (x = 0; x < 8; x++) {
  83. dest_cb[x + y * linesize[1]] = dcu / 8;
  84. dest_cr[x + y * linesize[2]] = dcv / 8;
  85. }
  86. }
  87. }
  88. static void filter181(int16_t *data, int width, int height, int stride)
  89. {
  90. int x, y;
  91. /* horizontal filter */
  92. for (y = 1; y < height - 1; y++) {
  93. int prev_dc = data[0 + y * stride];
  94. for (x = 1; x < width - 1; x++) {
  95. int dc;
  96. dc = -prev_dc +
  97. data[x + y * stride] * 8 -
  98. data[x + 1 + y * stride];
  99. dc = (dc * 10923 + 32768) >> 16;
  100. prev_dc = data[x + y * stride];
  101. data[x + y * stride] = dc;
  102. }
  103. }
  104. /* vertical filter */
  105. for (x = 1; x < width - 1; x++) {
  106. int prev_dc = data[x];
  107. for (y = 1; y < height - 1; y++) {
  108. int dc;
  109. dc = -prev_dc +
  110. data[x + y * stride] * 8 -
  111. data[x + (y + 1) * stride];
  112. dc = (dc * 10923 + 32768) >> 16;
  113. prev_dc = data[x + y * stride];
  114. data[x + y * stride] = dc;
  115. }
  116. }
  117. }
  118. /**
  119. * guess the dc of blocks which do not have an undamaged dc
  120. * @param w width in 8 pixel blocks
  121. * @param h height in 8 pixel blocks
  122. */
  123. static void guess_dc(ERContext *s, int16_t *dc, int w,
  124. int h, int stride, int is_luma)
  125. {
  126. int b_x, b_y;
  127. int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4);
  128. uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4);
  129. if(!col || !dist) {
  130. av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n");
  131. goto fail;
  132. }
  133. for(b_y=0; b_y<h; b_y++){
  134. int color= 1024;
  135. int distance= -1;
  136. for(b_x=0; b_x<w; b_x++){
  137. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  138. int error_j= s->error_status_table[mb_index_j];
  139. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  140. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  141. color= dc[b_x + b_y*stride];
  142. distance= b_x;
  143. }
  144. col [b_x + b_y*stride][1]= color;
  145. dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
  146. }
  147. color= 1024;
  148. distance= -1;
  149. for(b_x=w-1; b_x>=0; b_x--){
  150. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  151. int error_j= s->error_status_table[mb_index_j];
  152. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  153. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  154. color= dc[b_x + b_y*stride];
  155. distance= b_x;
  156. }
  157. col [b_x + b_y*stride][0]= color;
  158. dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
  159. }
  160. }
  161. for(b_x=0; b_x<w; b_x++){
  162. int color= 1024;
  163. int distance= -1;
  164. for(b_y=0; b_y<h; b_y++){
  165. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  166. int error_j= s->error_status_table[mb_index_j];
  167. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  168. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  169. color= dc[b_x + b_y*stride];
  170. distance= b_y;
  171. }
  172. col [b_x + b_y*stride][3]= color;
  173. dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
  174. }
  175. color= 1024;
  176. distance= -1;
  177. for(b_y=h-1; b_y>=0; b_y--){
  178. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  179. int error_j= s->error_status_table[mb_index_j];
  180. int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
  181. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  182. color= dc[b_x + b_y*stride];
  183. distance= b_y;
  184. }
  185. col [b_x + b_y*stride][2]= color;
  186. dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
  187. }
  188. }
  189. for (b_y = 0; b_y < h; b_y++) {
  190. for (b_x = 0; b_x < w; b_x++) {
  191. int mb_index, error, j;
  192. int64_t guess, weight_sum;
  193. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  194. error = s->error_status_table[mb_index];
  195. if (IS_INTER(s->cur_pic.mb_type[mb_index]))
  196. continue; // inter
  197. if (!(error & ER_DC_ERROR))
  198. continue; // dc-ok
  199. weight_sum = 0;
  200. guess = 0;
  201. for (j = 0; j < 4; j++) {
  202. int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1);
  203. guess += weight*(int64_t)col[b_x + b_y*stride][j];
  204. weight_sum += weight;
  205. }
  206. guess = (guess + weight_sum / 2) / weight_sum;
  207. dc[b_x + b_y * stride] = guess;
  208. }
  209. }
  210. fail:
  211. av_freep(&col);
  212. av_freep(&dist);
  213. }
  214. /**
  215. * simple horizontal deblocking filter used for error resilience
  216. * @param w width in 8 pixel blocks
  217. * @param h height in 8 pixel blocks
  218. */
  219. static void h_block_filter(ERContext *s, uint8_t *dst, int w,
  220. int h, int stride, int is_luma)
  221. {
  222. int b_x, b_y, mvx_stride, mvy_stride;
  223. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  224. set_mv_strides(s, &mvx_stride, &mvy_stride);
  225. mvx_stride >>= is_luma;
  226. mvy_stride *= mvx_stride;
  227. for (b_y = 0; b_y < h; b_y++) {
  228. for (b_x = 0; b_x < w - 1; b_x++) {
  229. int y;
  230. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  231. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  232. int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  233. int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  234. int left_damage = left_status & ER_MB_ERROR;
  235. int right_damage = right_status & ER_MB_ERROR;
  236. int offset = b_x * 8 + b_y * stride * 8;
  237. int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  238. int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  239. if (!(left_damage || right_damage))
  240. continue; // both undamaged
  241. if ((!left_intra) && (!right_intra) &&
  242. FFABS(left_mv[0] - right_mv[0]) +
  243. FFABS(left_mv[1] + right_mv[1]) < 2)
  244. continue;
  245. for (y = 0; y < 8; y++) {
  246. int a, b, c, d;
  247. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  248. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  249. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  250. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  251. d = FFMAX(d, 0);
  252. if (b < 0)
  253. d = -d;
  254. if (d == 0)
  255. continue;
  256. if (!(left_damage && right_damage))
  257. d = d * 16 / 9;
  258. if (left_damage) {
  259. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  260. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  261. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  262. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  263. }
  264. if (right_damage) {
  265. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  266. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  267. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  268. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  269. }
  270. }
  271. }
  272. }
  273. }
  274. /**
  275. * simple vertical deblocking filter used for error resilience
  276. * @param w width in 8 pixel blocks
  277. * @param h height in 8 pixel blocks
  278. */
  279. static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
  280. int stride, int is_luma)
  281. {
  282. int b_x, b_y, mvx_stride, mvy_stride;
  283. const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
  284. set_mv_strides(s, &mvx_stride, &mvy_stride);
  285. mvx_stride >>= is_luma;
  286. mvy_stride *= mvx_stride;
  287. for (b_y = 0; b_y < h - 1; b_y++) {
  288. for (b_x = 0; b_x < w; b_x++) {
  289. int x;
  290. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  291. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  292. int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  293. int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  294. int top_damage = top_status & ER_MB_ERROR;
  295. int bottom_damage = bottom_status & ER_MB_ERROR;
  296. int offset = b_x * 8 + b_y * stride * 8;
  297. int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  298. int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  299. if (!(top_damage || bottom_damage))
  300. continue; // both undamaged
  301. if ((!top_intra) && (!bottom_intra) &&
  302. FFABS(top_mv[0] - bottom_mv[0]) +
  303. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  304. continue;
  305. for (x = 0; x < 8; x++) {
  306. int a, b, c, d;
  307. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  308. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  309. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  310. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  311. d = FFMAX(d, 0);
  312. if (b < 0)
  313. d = -d;
  314. if (d == 0)
  315. continue;
  316. if (!(top_damage && bottom_damage))
  317. d = d * 16 / 9;
  318. if (top_damage) {
  319. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  320. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  321. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  322. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  323. }
  324. if (bottom_damage) {
  325. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  326. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  327. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  328. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  329. }
  330. }
  331. }
  332. }
  333. }
  334. static void guess_mv(ERContext *s)
  335. {
  336. uint8_t *fixed = s->er_temp_buffer;
  337. #define MV_FROZEN 3
  338. #define MV_CHANGED 2
  339. #define MV_UNCHANGED 1
  340. const int mb_stride = s->mb_stride;
  341. const int mb_width = s->mb_width;
  342. const int mb_height = s->mb_height;
  343. int i, depth, num_avail;
  344. int mb_x, mb_y, mot_step, mot_stride;
  345. set_mv_strides(s, &mot_step, &mot_stride);
  346. num_avail = 0;
  347. for (i = 0; i < s->mb_num; i++) {
  348. const int mb_xy = s->mb_index2xy[i];
  349. int f = 0;
  350. int error = s->error_status_table[mb_xy];
  351. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  352. f = MV_FROZEN; // intra // FIXME check
  353. if (!(error & ER_MV_ERROR))
  354. f = MV_FROZEN; // inter with undamaged MV
  355. fixed[mb_xy] = f;
  356. if (f == MV_FROZEN)
  357. num_avail++;
  358. else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){
  359. const int mb_y= mb_xy / s->mb_stride;
  360. const int mb_x= mb_xy % s->mb_stride;
  361. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  362. s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0];
  363. s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1];
  364. s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy];
  365. }
  366. }
  367. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  368. num_avail <= mb_width / 2) {
  369. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  370. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  371. const int mb_xy = mb_x + mb_y * s->mb_stride;
  372. int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
  373. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  374. continue;
  375. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  376. continue;
  377. s->mv[0][0][0] = 0;
  378. s->mv[0][0][1] = 0;
  379. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  380. mb_x, mb_y, 0, 0);
  381. }
  382. }
  383. return;
  384. }
  385. for (depth = 0; ; depth++) {
  386. int changed, pass, none_left;
  387. none_left = 1;
  388. changed = 1;
  389. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  390. int mb_x, mb_y;
  391. int score_sum = 0;
  392. changed = 0;
  393. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  394. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  395. const int mb_xy = mb_x + mb_y * s->mb_stride;
  396. int mv_predictor[8][2] = { { 0 } };
  397. int ref[8] = { 0 };
  398. int pred_count = 0;
  399. int j;
  400. int best_score = 256 * 256 * 256 * 64;
  401. int best_pred = 0;
  402. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  403. int prev_x = 0, prev_y = 0, prev_ref = 0;
  404. if ((mb_x ^ mb_y ^ pass) & 1)
  405. continue;
  406. if (fixed[mb_xy] == MV_FROZEN)
  407. continue;
  408. av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy]));
  409. av_assert1(s->last_pic.f && s->last_pic.f->data[0]);
  410. j = 0;
  411. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  412. j = 1;
  413. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  414. j = 1;
  415. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  416. j = 1;
  417. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  418. j = 1;
  419. if (j == 0)
  420. continue;
  421. j = 0;
  422. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  423. j = 1;
  424. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  425. j = 1;
  426. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  427. j = 1;
  428. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  429. j = 1;
  430. if (j == 0 && pass > 1)
  431. continue;
  432. none_left = 0;
  433. if (mb_x > 0 && fixed[mb_xy - 1]) {
  434. mv_predictor[pred_count][0] =
  435. s->cur_pic.motion_val[0][mot_index - mot_step][0];
  436. mv_predictor[pred_count][1] =
  437. s->cur_pic.motion_val[0][mot_index - mot_step][1];
  438. ref[pred_count] =
  439. s->cur_pic.ref_index[0][4 * (mb_xy - 1)];
  440. pred_count++;
  441. }
  442. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  443. mv_predictor[pred_count][0] =
  444. s->cur_pic.motion_val[0][mot_index + mot_step][0];
  445. mv_predictor[pred_count][1] =
  446. s->cur_pic.motion_val[0][mot_index + mot_step][1];
  447. ref[pred_count] =
  448. s->cur_pic.ref_index[0][4 * (mb_xy + 1)];
  449. pred_count++;
  450. }
  451. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  452. mv_predictor[pred_count][0] =
  453. s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0];
  454. mv_predictor[pred_count][1] =
  455. s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1];
  456. ref[pred_count] =
  457. s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)];
  458. pred_count++;
  459. }
  460. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  461. mv_predictor[pred_count][0] =
  462. s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0];
  463. mv_predictor[pred_count][1] =
  464. s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1];
  465. ref[pred_count] =
  466. s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)];
  467. pred_count++;
  468. }
  469. if (pred_count == 0)
  470. continue;
  471. if (pred_count > 1) {
  472. int sum_x = 0, sum_y = 0, sum_r = 0;
  473. int max_x, max_y, min_x, min_y, max_r, min_r;
  474. for (j = 0; j < pred_count; j++) {
  475. sum_x += mv_predictor[j][0];
  476. sum_y += mv_predictor[j][1];
  477. sum_r += ref[j];
  478. if (j && ref[j] != ref[j - 1])
  479. goto skip_mean_and_median;
  480. }
  481. /* mean */
  482. mv_predictor[pred_count][0] = sum_x / j;
  483. mv_predictor[pred_count][1] = sum_y / j;
  484. ref[pred_count] = sum_r / j;
  485. /* median */
  486. if (pred_count >= 3) {
  487. min_y = min_x = min_r = 99999;
  488. max_y = max_x = max_r = -99999;
  489. } else {
  490. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  491. }
  492. for (j = 0; j < pred_count; j++) {
  493. max_x = FFMAX(max_x, mv_predictor[j][0]);
  494. max_y = FFMAX(max_y, mv_predictor[j][1]);
  495. max_r = FFMAX(max_r, ref[j]);
  496. min_x = FFMIN(min_x, mv_predictor[j][0]);
  497. min_y = FFMIN(min_y, mv_predictor[j][1]);
  498. min_r = FFMIN(min_r, ref[j]);
  499. }
  500. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  501. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  502. ref[pred_count + 1] = sum_r - max_r - min_r;
  503. if (pred_count == 4) {
  504. mv_predictor[pred_count + 1][0] /= 2;
  505. mv_predictor[pred_count + 1][1] /= 2;
  506. ref[pred_count + 1] /= 2;
  507. }
  508. pred_count += 2;
  509. }
  510. skip_mean_and_median:
  511. /* zero MV */
  512. pred_count++;
  513. if (!fixed[mb_xy] && 0) {
  514. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  515. // FIXME
  516. } else {
  517. ff_thread_await_progress(s->last_pic.tf,
  518. mb_y, 0);
  519. }
  520. if (!s->last_pic.motion_val[0] ||
  521. !s->last_pic.ref_index[0])
  522. goto skip_last_mv;
  523. prev_x = s->last_pic.motion_val[0][mot_index][0];
  524. prev_y = s->last_pic.motion_val[0][mot_index][1];
  525. prev_ref = s->last_pic.ref_index[0][4 * mb_xy];
  526. } else {
  527. prev_x = s->cur_pic.motion_val[0][mot_index][0];
  528. prev_y = s->cur_pic.motion_val[0][mot_index][1];
  529. prev_ref = s->cur_pic.ref_index[0][4 * mb_xy];
  530. }
  531. /* last MV */
  532. mv_predictor[pred_count][0] = prev_x;
  533. mv_predictor[pred_count][1] = prev_y;
  534. ref[pred_count] = prev_ref;
  535. pred_count++;
  536. skip_last_mv:
  537. for (j = 0; j < pred_count; j++) {
  538. int *linesize = s->cur_pic.f->linesize;
  539. int score = 0;
  540. uint8_t *src = s->cur_pic.f->data[0] +
  541. mb_x * 16 + mb_y * 16 * linesize[0];
  542. s->cur_pic.motion_val[0][mot_index][0] =
  543. s->mv[0][0][0] = mv_predictor[j][0];
  544. s->cur_pic.motion_val[0][mot_index][1] =
  545. s->mv[0][0][1] = mv_predictor[j][1];
  546. // predictor intra or otherwise not available
  547. if (ref[j] < 0)
  548. continue;
  549. s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
  550. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  551. if (mb_x > 0 && fixed[mb_xy - 1]) {
  552. int k;
  553. for (k = 0; k < 16; k++)
  554. score += FFABS(src[k * linesize[0] - 1] -
  555. src[k * linesize[0]]);
  556. }
  557. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  558. int k;
  559. for (k = 0; k < 16; k++)
  560. score += FFABS(src[k * linesize[0] + 15] -
  561. src[k * linesize[0] + 16]);
  562. }
  563. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  564. int k;
  565. for (k = 0; k < 16; k++)
  566. score += FFABS(src[k - linesize[0]] - src[k]);
  567. }
  568. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  569. int k;
  570. for (k = 0; k < 16; k++)
  571. score += FFABS(src[k + linesize[0] * 15] -
  572. src[k + linesize[0] * 16]);
  573. }
  574. if (score <= best_score) { // <= will favor the last MV
  575. best_score = score;
  576. best_pred = j;
  577. }
  578. }
  579. score_sum += best_score;
  580. s->mv[0][0][0] = mv_predictor[best_pred][0];
  581. s->mv[0][0][1] = mv_predictor[best_pred][1];
  582. for (i = 0; i < mot_step; i++)
  583. for (j = 0; j < mot_step; j++) {
  584. s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  585. s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  586. }
  587. s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
  588. MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
  589. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  590. fixed[mb_xy] = MV_CHANGED;
  591. changed++;
  592. } else
  593. fixed[mb_xy] = MV_UNCHANGED;
  594. }
  595. }
  596. }
  597. if (none_left)
  598. return;
  599. for (i = 0; i < s->mb_num; i++) {
  600. int mb_xy = s->mb_index2xy[i];
  601. if (fixed[mb_xy])
  602. fixed[mb_xy] = MV_FROZEN;
  603. }
  604. }
  605. }
  606. static int is_intra_more_likely(ERContext *s)
  607. {
  608. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  609. if (!s->last_pic.f || !s->last_pic.f->data[0])
  610. return 1; // no previous frame available -> use spatial prediction
  611. if (s->avctx->error_concealment & FF_EC_FAVOR_INTER)
  612. return 0;
  613. undamaged_count = 0;
  614. for (i = 0; i < s->mb_num; i++) {
  615. const int mb_xy = s->mb_index2xy[i];
  616. const int error = s->error_status_table[mb_xy];
  617. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  618. undamaged_count++;
  619. }
  620. if (s->avctx->codec_id == AV_CODEC_ID_H264 && s->ref_count <= 0)
  621. return 1;
  622. if (undamaged_count < 5)
  623. return 0; // almost all MBs damaged -> use temporal prediction
  624. // prevent dsp.sad() check, that requires access to the image
  625. if (CONFIG_XVMC &&
  626. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb &&
  627. s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I)
  628. return 1;
  629. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  630. is_intra_likely = 0;
  631. j = 0;
  632. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  633. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  634. int error;
  635. const int mb_xy = mb_x + mb_y * s->mb_stride;
  636. error = s->error_status_table[mb_xy];
  637. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  638. continue; // skip damaged
  639. j++;
  640. // skip a few to speed things up
  641. if ((j % skip_amount) != 0)
  642. continue;
  643. if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
  644. int *linesize = s->cur_pic.f->linesize;
  645. uint8_t *mb_ptr = s->cur_pic.f->data[0] +
  646. mb_x * 16 + mb_y * 16 * linesize[0];
  647. uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
  648. mb_x * 16 + mb_y * 16 * linesize[0];
  649. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  650. // FIXME
  651. } else {
  652. ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
  653. }
  654. is_intra_likely += s->mecc->sad[0](NULL, last_mb_ptr, mb_ptr,
  655. linesize[0], 16);
  656. // FIXME need await_progress() here
  657. is_intra_likely -= s->mecc->sad[0](NULL, last_mb_ptr,
  658. last_mb_ptr + linesize[0] * 16,
  659. linesize[0], 16);
  660. } else {
  661. if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  662. is_intra_likely++;
  663. else
  664. is_intra_likely--;
  665. }
  666. }
  667. }
  668. // av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  669. return is_intra_likely > 0;
  670. }
  671. void ff_er_frame_start(ERContext *s)
  672. {
  673. if (!s->avctx->error_concealment)
  674. return;
  675. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  676. s->mb_stride * s->mb_height * sizeof(uint8_t));
  677. s->error_count = 3 * s->mb_num;
  678. s->error_occurred = 0;
  679. }
  680. static int er_supported(ERContext *s)
  681. {
  682. if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
  683. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  684. !s->cur_pic.f ||
  685. s->cur_pic.field_picture
  686. )
  687. return 0;
  688. return 1;
  689. }
  690. /**
  691. * Add a slice.
  692. * @param endx x component of the last macroblock, can be -1
  693. * for the last of the previous line
  694. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  695. * assumed that no earlier end or error of the same type occurred
  696. */
  697. void ff_er_add_slice(ERContext *s, int startx, int starty,
  698. int endx, int endy, int status)
  699. {
  700. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  701. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  702. const int start_xy = s->mb_index2xy[start_i];
  703. const int end_xy = s->mb_index2xy[end_i];
  704. int mask = -1;
  705. if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice)
  706. return;
  707. if (start_i > end_i || start_xy > end_xy) {
  708. av_log(s->avctx, AV_LOG_ERROR,
  709. "internal error, slice end before start\n");
  710. return;
  711. }
  712. if (!s->avctx->error_concealment)
  713. return;
  714. mask &= ~VP_START;
  715. if (status & (ER_AC_ERROR | ER_AC_END)) {
  716. mask &= ~(ER_AC_ERROR | ER_AC_END);
  717. s->error_count -= end_i - start_i + 1;
  718. }
  719. if (status & (ER_DC_ERROR | ER_DC_END)) {
  720. mask &= ~(ER_DC_ERROR | ER_DC_END);
  721. s->error_count -= end_i - start_i + 1;
  722. }
  723. if (status & (ER_MV_ERROR | ER_MV_END)) {
  724. mask &= ~(ER_MV_ERROR | ER_MV_END);
  725. s->error_count -= end_i - start_i + 1;
  726. }
  727. if (status & ER_MB_ERROR) {
  728. s->error_occurred = 1;
  729. s->error_count = INT_MAX;
  730. }
  731. if (mask == ~0x7F) {
  732. memset(&s->error_status_table[start_xy], 0,
  733. (end_xy - start_xy) * sizeof(uint8_t));
  734. } else {
  735. int i;
  736. for (i = start_xy; i < end_xy; i++)
  737. s->error_status_table[i] &= mask;
  738. }
  739. if (end_i == s->mb_num)
  740. s->error_count = INT_MAX;
  741. else {
  742. s->error_status_table[end_xy] &= mask;
  743. s->error_status_table[end_xy] |= status;
  744. }
  745. s->error_status_table[start_xy] |= VP_START;
  746. if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
  747. er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
  748. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  749. prev_status &= ~ VP_START;
  750. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) {
  751. s->error_occurred = 1;
  752. s->error_count = INT_MAX;
  753. }
  754. }
  755. }
  756. void ff_er_frame_end(ERContext *s)
  757. {
  758. int *linesize = NULL;
  759. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  760. int distance;
  761. int threshold_part[4] = { 100, 100, 100 };
  762. int threshold = 50;
  763. int is_intra_likely;
  764. int size = s->b8_stride * 2 * s->mb_height;
  765. /* We do not support ER of field pictures yet,
  766. * though it should not crash if enabled. */
  767. if (!s->avctx->error_concealment || s->error_count == 0 ||
  768. s->avctx->lowres ||
  769. !er_supported(s) ||
  770. s->error_count == 3 * s->mb_width *
  771. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  772. return;
  773. }
  774. linesize = s->cur_pic.f->linesize;
  775. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  776. int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride];
  777. if (status != 0x7F)
  778. break;
  779. }
  780. if ( mb_x == s->mb_width
  781. && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO
  782. && (s->avctx->height&16)
  783. && s->error_count == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1)
  784. ) {
  785. av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n");
  786. return;
  787. }
  788. if (s->last_pic.f) {
  789. if (s->last_pic.f->width != s->cur_pic.f->width ||
  790. s->last_pic.f->height != s->cur_pic.f->height ||
  791. s->last_pic.f->format != s->cur_pic.f->format) {
  792. av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n");
  793. memset(&s->last_pic, 0, sizeof(s->last_pic));
  794. }
  795. }
  796. if (s->next_pic.f) {
  797. if (s->next_pic.f->width != s->cur_pic.f->width ||
  798. s->next_pic.f->height != s->cur_pic.f->height ||
  799. s->next_pic.f->format != s->cur_pic.f->format) {
  800. av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n");
  801. memset(&s->next_pic, 0, sizeof(s->next_pic));
  802. }
  803. }
  804. if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) {
  805. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  806. for (i = 0; i < 2; i++) {
  807. s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  808. s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
  809. if (!s->ref_index_buf[i] || !s->motion_val_buf[i])
  810. break;
  811. s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data;
  812. s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4;
  813. }
  814. if (i < 2) {
  815. for (i = 0; i < 2; i++) {
  816. av_buffer_unref(&s->ref_index_buf[i]);
  817. av_buffer_unref(&s->motion_val_buf[i]);
  818. s->cur_pic.ref_index[i] = NULL;
  819. s->cur_pic.motion_val[i] = NULL;
  820. }
  821. return;
  822. }
  823. }
  824. if (s->avctx->debug & FF_DEBUG_ER) {
  825. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  826. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  827. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  828. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  829. }
  830. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  831. }
  832. }
  833. #if 1
  834. /* handle overlapping slices */
  835. for (error_type = 1; error_type <= 3; error_type++) {
  836. int end_ok = 0;
  837. for (i = s->mb_num - 1; i >= 0; i--) {
  838. const int mb_xy = s->mb_index2xy[i];
  839. int error = s->error_status_table[mb_xy];
  840. if (error & (1 << error_type))
  841. end_ok = 1;
  842. if (error & (8 << error_type))
  843. end_ok = 1;
  844. if (!end_ok)
  845. s->error_status_table[mb_xy] |= 1 << error_type;
  846. if (error & VP_START)
  847. end_ok = 0;
  848. }
  849. }
  850. #endif
  851. #if 1
  852. /* handle slices with partitions of different length */
  853. if (s->partitioned_frame) {
  854. int end_ok = 0;
  855. for (i = s->mb_num - 1; i >= 0; i--) {
  856. const int mb_xy = s->mb_index2xy[i];
  857. int error = s->error_status_table[mb_xy];
  858. if (error & ER_AC_END)
  859. end_ok = 0;
  860. if ((error & ER_MV_END) ||
  861. (error & ER_DC_END) ||
  862. (error & ER_AC_ERROR))
  863. end_ok = 1;
  864. if (!end_ok)
  865. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  866. if (error & VP_START)
  867. end_ok = 0;
  868. }
  869. }
  870. #endif
  871. /* handle missing slices */
  872. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  873. int end_ok = 1;
  874. // FIXME + 100 hack
  875. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  876. const int mb_xy = s->mb_index2xy[i];
  877. int error1 = s->error_status_table[mb_xy];
  878. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  879. if (error1 & VP_START)
  880. end_ok = 1;
  881. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  882. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  883. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  884. (error1 & ER_MV_END))) {
  885. // end & uninit
  886. end_ok = 0;
  887. }
  888. if (!end_ok)
  889. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  890. }
  891. }
  892. #if 1
  893. /* backward mark errors */
  894. distance = 9999999;
  895. for (error_type = 1; error_type <= 3; error_type++) {
  896. for (i = s->mb_num - 1; i >= 0; i--) {
  897. const int mb_xy = s->mb_index2xy[i];
  898. int error = s->error_status_table[mb_xy];
  899. if (!s->mbskip_table[mb_xy]) // FIXME partition specific
  900. distance++;
  901. if (error & (1 << error_type))
  902. distance = 0;
  903. if (s->partitioned_frame) {
  904. if (distance < threshold_part[error_type - 1])
  905. s->error_status_table[mb_xy] |= 1 << error_type;
  906. } else {
  907. if (distance < threshold)
  908. s->error_status_table[mb_xy] |= 1 << error_type;
  909. }
  910. if (error & VP_START)
  911. distance = 9999999;
  912. }
  913. }
  914. #endif
  915. /* forward mark errors */
  916. error = 0;
  917. for (i = 0; i < s->mb_num; i++) {
  918. const int mb_xy = s->mb_index2xy[i];
  919. int old_error = s->error_status_table[mb_xy];
  920. if (old_error & VP_START) {
  921. error = old_error & ER_MB_ERROR;
  922. } else {
  923. error |= old_error & ER_MB_ERROR;
  924. s->error_status_table[mb_xy] |= error;
  925. }
  926. }
  927. #if 1
  928. /* handle not partitioned case */
  929. if (!s->partitioned_frame) {
  930. for (i = 0; i < s->mb_num; i++) {
  931. const int mb_xy = s->mb_index2xy[i];
  932. int error = s->error_status_table[mb_xy];
  933. if (error & ER_MB_ERROR)
  934. error |= ER_MB_ERROR;
  935. s->error_status_table[mb_xy] = error;
  936. }
  937. }
  938. #endif
  939. dc_error = ac_error = mv_error = 0;
  940. for (i = 0; i < s->mb_num; i++) {
  941. const int mb_xy = s->mb_index2xy[i];
  942. int error = s->error_status_table[mb_xy];
  943. if (error & ER_DC_ERROR)
  944. dc_error++;
  945. if (error & ER_AC_ERROR)
  946. ac_error++;
  947. if (error & ER_MV_ERROR)
  948. mv_error++;
  949. }
  950. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n",
  951. dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type));
  952. is_intra_likely = is_intra_more_likely(s);
  953. /* set unknown mb-type to most likely */
  954. for (i = 0; i < s->mb_num; i++) {
  955. const int mb_xy = s->mb_index2xy[i];
  956. int error = s->error_status_table[mb_xy];
  957. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  958. continue;
  959. if (is_intra_likely)
  960. s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  961. else
  962. s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  963. }
  964. // change inter to intra blocks if no reference frames are available
  965. if (!(s->last_pic.f && s->last_pic.f->data[0]) &&
  966. !(s->next_pic.f && s->next_pic.f->data[0]))
  967. for (i = 0; i < s->mb_num; i++) {
  968. const int mb_xy = s->mb_index2xy[i];
  969. if (!IS_INTRA(s->cur_pic.mb_type[mb_xy]))
  970. s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  971. }
  972. /* handle inter blocks with damaged AC */
  973. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  974. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  975. const int mb_xy = mb_x + mb_y * s->mb_stride;
  976. const int mb_type = s->cur_pic.mb_type[mb_xy];
  977. const int dir = !(s->last_pic.f && s->last_pic.f->data[0]);
  978. const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  979. int mv_type;
  980. int error = s->error_status_table[mb_xy];
  981. if (IS_INTRA(mb_type))
  982. continue; // intra
  983. if (error & ER_MV_ERROR)
  984. continue; // inter with damaged MV
  985. if (!(error & ER_AC_ERROR))
  986. continue; // undamaged inter
  987. if (IS_8X8(mb_type)) {
  988. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  989. int j;
  990. mv_type = MV_TYPE_8X8;
  991. for (j = 0; j < 4; j++) {
  992. s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  993. s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  994. }
  995. } else {
  996. mv_type = MV_TYPE_16X16;
  997. s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  998. s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  999. }
  1000. s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
  1001. mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
  1002. }
  1003. }
  1004. /* guess MVs */
  1005. if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) {
  1006. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1007. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1008. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  1009. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1010. const int mb_type = s->cur_pic.mb_type[mb_xy];
  1011. int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  1012. int error = s->error_status_table[mb_xy];
  1013. if (IS_INTRA(mb_type))
  1014. continue;
  1015. if (!(error & ER_MV_ERROR))
  1016. continue; // inter with undamaged MV
  1017. if (!(error & ER_AC_ERROR))
  1018. continue; // undamaged inter
  1019. if (!(s->last_pic.f && s->last_pic.f->data[0]))
  1020. mv_dir &= ~MV_DIR_FORWARD;
  1021. if (!(s->next_pic.f && s->next_pic.f->data[0]))
  1022. mv_dir &= ~MV_DIR_BACKWARD;
  1023. if (s->pp_time) {
  1024. int time_pp = s->pp_time;
  1025. int time_pb = s->pb_time;
  1026. av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264);
  1027. ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
  1028. s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
  1029. s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
  1030. s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  1031. s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  1032. } else {
  1033. s->mv[0][0][0] = 0;
  1034. s->mv[0][0][1] = 0;
  1035. s->mv[1][0][0] = 0;
  1036. s->mv[1][0][1] = 0;
  1037. }
  1038. s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
  1039. mb_x, mb_y, 0, 0);
  1040. }
  1041. }
  1042. } else
  1043. guess_mv(s);
  1044. /* the filters below manipulate raw image, skip them */
  1045. if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb)
  1046. goto ec_clean;
  1047. /* fill DC for inter blocks */
  1048. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1049. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1050. int dc, dcu, dcv, y, n;
  1051. int16_t *dc_ptr;
  1052. uint8_t *dest_y, *dest_cb, *dest_cr;
  1053. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1054. const int mb_type = s->cur_pic.mb_type[mb_xy];
  1055. // error = s->error_status_table[mb_xy];
  1056. if (IS_INTRA(mb_type) && s->partitioned_frame)
  1057. continue;
  1058. // if (error & ER_MV_ERROR)
  1059. // continue; // inter data damaged FIXME is this good?
  1060. dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  1061. dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  1062. dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  1063. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  1064. for (n = 0; n < 4; n++) {
  1065. dc = 0;
  1066. for (y = 0; y < 8; y++) {
  1067. int x;
  1068. for (x = 0; x < 8; x++)
  1069. dc += dest_y[x + (n & 1) * 8 +
  1070. (y + (n >> 1) * 8) * linesize[0]];
  1071. }
  1072. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  1073. }
  1074. dcu = dcv = 0;
  1075. for (y = 0; y < 8; y++) {
  1076. int x;
  1077. for (x = 0; x < 8; x++) {
  1078. dcu += dest_cb[x + y * linesize[1]];
  1079. dcv += dest_cr[x + y * linesize[2]];
  1080. }
  1081. }
  1082. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  1083. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  1084. }
  1085. }
  1086. #if 1
  1087. /* guess DC for damaged blocks */
  1088. guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
  1089. guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
  1090. guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
  1091. #endif
  1092. /* filter luma DC */
  1093. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  1094. #if 1
  1095. /* render DC only intra */
  1096. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1097. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1098. uint8_t *dest_y, *dest_cb, *dest_cr;
  1099. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1100. const int mb_type = s->cur_pic.mb_type[mb_xy];
  1101. int error = s->error_status_table[mb_xy];
  1102. if (IS_INTER(mb_type))
  1103. continue;
  1104. if (!(error & ER_AC_ERROR))
  1105. continue; // undamaged
  1106. dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
  1107. dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
  1108. dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
  1109. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1110. }
  1111. }
  1112. #endif
  1113. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1114. /* filter horizontal block boundaries */
  1115. h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
  1116. s->mb_height * 2, linesize[0], 1);
  1117. h_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
  1118. s->mb_height, linesize[1], 0);
  1119. h_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
  1120. s->mb_height, linesize[2], 0);
  1121. /* filter vertical block boundaries */
  1122. v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
  1123. s->mb_height * 2, linesize[0], 1);
  1124. v_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
  1125. s->mb_height, linesize[1], 0);
  1126. v_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
  1127. s->mb_height, linesize[2], 0);
  1128. }
  1129. ec_clean:
  1130. /* clean a few tables */
  1131. for (i = 0; i < s->mb_num; i++) {
  1132. const int mb_xy = s->mb_index2xy[i];
  1133. int error = s->error_status_table[mb_xy];
  1134. if (s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B &&
  1135. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1136. s->mbskip_table[mb_xy] = 0;
  1137. }
  1138. s->mbintra_table[mb_xy] = 1;
  1139. }
  1140. for (i = 0; i < 2; i++) {
  1141. av_buffer_unref(&s->ref_index_buf[i]);
  1142. av_buffer_unref(&s->motion_val_buf[i]);
  1143. s->cur_pic.ref_index[i] = NULL;
  1144. s->cur_pic.motion_val[i] = NULL;
  1145. }
  1146. memset(&s->cur_pic, 0, sizeof(ERPicture));
  1147. memset(&s->last_pic, 0, sizeof(ERPicture));
  1148. memset(&s->next_pic, 0, sizeof(ERPicture));
  1149. }