You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1319 lines
50KB

  1. /*
  2. * Error resilience / concealment
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Error resilience / concealment.
  25. */
  26. #include <limits.h>
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. /*
  34. * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
  35. * but error concealment must support both h264 and h263 thus we must undo this
  36. */
  37. #undef mb_intra
  38. static void decode_mb(MpegEncContext *s, int ref)
  39. {
  40. s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  41. s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  42. s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  43. ff_init_block_index(s);
  44. ff_update_block_index(s);
  45. s->dest[1] += (16 >> s->chroma_x_shift) - 8;
  46. s->dest[2] += (16 >> s->chroma_x_shift) - 8;
  47. if (CONFIG_H264_DECODER && s->codec_id == AV_CODEC_ID_H264) {
  48. H264Context *h = (void*)s;
  49. h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
  50. memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
  51. av_assert1(ref >= 0);
  52. /* FIXME: It is possible albeit uncommon that slice references
  53. * differ between slices. We take the easy approach and ignore
  54. * it for now. If this turns out to have any relevance in
  55. * practice then correct remapping should be added. */
  56. if (ref >= h->ref_count[0])
  57. ref = 0;
  58. if (!h->ref_list[0][ref].f.data[0]) {
  59. av_log(s->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
  60. ref = 0;
  61. }
  62. fill_rectangle(&s->current_picture.f.ref_index[0][4 * h->mb_xy],
  63. 2, 2, 2, ref, 1);
  64. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  65. fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
  66. pack16to32(s->mv[0][0][0], s->mv[0][0][1]), 4);
  67. h->mb_mbaff =
  68. h->mb_field_decoding_flag = 0;
  69. ff_h264_hl_decode_mb(h);
  70. } else {
  71. assert(ref == 0);
  72. ff_MPV_decode_mb(s, s->block);
  73. }
  74. }
  75. /**
  76. * @param stride the number of MVs to get to the next row
  77. * @param mv_step the number of MVs per row or column in a macroblock
  78. */
  79. static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride)
  80. {
  81. if (s->codec_id == AV_CODEC_ID_H264) {
  82. H264Context *h = (void*)s;
  83. av_assert0(s->quarter_sample);
  84. *mv_step = 4;
  85. *stride = h->b_stride;
  86. } else {
  87. *mv_step = 2;
  88. *stride = s->b8_stride;
  89. }
  90. }
  91. /**
  92. * Replace the current MB with a flat dc-only version.
  93. */
  94. static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
  95. uint8_t *dest_cr, int mb_x, int mb_y)
  96. {
  97. int dc, dcu, dcv, y, i;
  98. for (i = 0; i < 4; i++) {
  99. dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
  100. if (dc < 0)
  101. dc = 0;
  102. else if (dc > 2040)
  103. dc = 2040;
  104. for (y = 0; y < 8; y++) {
  105. int x;
  106. for (x = 0; x < 8; x++)
  107. dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * s->linesize] = dc / 8;
  108. }
  109. }
  110. dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
  111. dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
  112. if (dcu < 0)
  113. dcu = 0;
  114. else if (dcu > 2040)
  115. dcu = 2040;
  116. if (dcv < 0)
  117. dcv = 0;
  118. else if (dcv > 2040)
  119. dcv = 2040;
  120. for (y = 0; y < 8; y++) {
  121. int x;
  122. for (x = 0; x < 8; x++) {
  123. dest_cb[x + y * s->uvlinesize] = dcu / 8;
  124. dest_cr[x + y * s->uvlinesize] = dcv / 8;
  125. }
  126. }
  127. }
  128. static void filter181(int16_t *data, int width, int height, int stride)
  129. {
  130. int x, y;
  131. /* horizontal filter */
  132. for (y = 1; y < height - 1; y++) {
  133. int prev_dc = data[0 + y * stride];
  134. for (x = 1; x < width - 1; x++) {
  135. int dc;
  136. dc = -prev_dc +
  137. data[x + y * stride] * 8 -
  138. data[x + 1 + y * stride];
  139. dc = (dc * 10923 + 32768) >> 16;
  140. prev_dc = data[x + y * stride];
  141. data[x + y * stride] = dc;
  142. }
  143. }
  144. /* vertical filter */
  145. for (x = 1; x < width - 1; x++) {
  146. int prev_dc = data[x];
  147. for (y = 1; y < height - 1; y++) {
  148. int dc;
  149. dc = -prev_dc +
  150. data[x + y * stride] * 8 -
  151. data[x + (y + 1) * stride];
  152. dc = (dc * 10923 + 32768) >> 16;
  153. prev_dc = data[x + y * stride];
  154. data[x + y * stride] = dc;
  155. }
  156. }
  157. }
  158. /**
  159. * guess the dc of blocks which do not have an undamaged dc
  160. * @param w width in 8 pixel blocks
  161. * @param h height in 8 pixel blocks
  162. */
  163. static void guess_dc(MpegEncContext *s, int16_t *dc, int w,
  164. int h, int stride, int is_luma)
  165. {
  166. int b_x, b_y;
  167. int16_t (*col )[4] = av_malloc(stride*h*sizeof( int16_t)*4);
  168. uint32_t (*dist)[4] = av_malloc(stride*h*sizeof(uint32_t)*4);
  169. if(!col || !dist) {
  170. av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n");
  171. goto fail;
  172. }
  173. for(b_y=0; b_y<h; b_y++){
  174. int color= 1024;
  175. int distance= -1;
  176. for(b_x=0; b_x<w; b_x++){
  177. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  178. int error_j= s->error_status_table[mb_index_j];
  179. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  180. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  181. color= dc[b_x + b_y*stride];
  182. distance= b_x;
  183. }
  184. col [b_x + b_y*stride][1]= color;
  185. dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
  186. }
  187. color= 1024;
  188. distance= -1;
  189. for(b_x=w-1; b_x>=0; b_x--){
  190. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  191. int error_j= s->error_status_table[mb_index_j];
  192. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  193. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  194. color= dc[b_x + b_y*stride];
  195. distance= b_x;
  196. }
  197. col [b_x + b_y*stride][0]= color;
  198. dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
  199. }
  200. }
  201. for(b_x=0; b_x<w; b_x++){
  202. int color= 1024;
  203. int distance= -1;
  204. for(b_y=0; b_y<h; b_y++){
  205. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  206. int error_j= s->error_status_table[mb_index_j];
  207. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  208. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  209. color= dc[b_x + b_y*stride];
  210. distance= b_y;
  211. }
  212. col [b_x + b_y*stride][3]= color;
  213. dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
  214. }
  215. color= 1024;
  216. distance= -1;
  217. for(b_y=h-1; b_y>=0; b_y--){
  218. int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
  219. int error_j= s->error_status_table[mb_index_j];
  220. int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
  221. if(intra_j==0 || !(error_j&ER_DC_ERROR)){
  222. color= dc[b_x + b_y*stride];
  223. distance= b_y;
  224. }
  225. col [b_x + b_y*stride][2]= color;
  226. dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
  227. }
  228. }
  229. for (b_y = 0; b_y < h; b_y++) {
  230. for (b_x = 0; b_x < w; b_x++) {
  231. int mb_index, error, j;
  232. int64_t guess, weight_sum;
  233. mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
  234. error = s->error_status_table[mb_index];
  235. if (IS_INTER(s->current_picture.f.mb_type[mb_index]))
  236. continue; // inter
  237. if (!(error & ER_DC_ERROR))
  238. continue; // dc-ok
  239. weight_sum = 0;
  240. guess = 0;
  241. for (j = 0; j < 4; j++) {
  242. int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1);
  243. guess += weight*(int64_t)col[b_x + b_y*stride][j];
  244. weight_sum += weight;
  245. }
  246. guess = (guess + weight_sum / 2) / weight_sum;
  247. dc[b_x + b_y * stride] = guess;
  248. }
  249. }
  250. fail:
  251. av_freep(&col);
  252. av_freep(&dist);
  253. }
  254. /**
  255. * simple horizontal deblocking filter used for error resilience
  256. * @param w width in 8 pixel blocks
  257. * @param h height in 8 pixel blocks
  258. */
  259. static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w,
  260. int h, int stride, int is_luma)
  261. {
  262. int b_x, b_y, mvx_stride, mvy_stride;
  263. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  264. set_mv_strides(s, &mvx_stride, &mvy_stride);
  265. mvx_stride >>= is_luma;
  266. mvy_stride *= mvx_stride;
  267. for (b_y = 0; b_y < h; b_y++) {
  268. for (b_x = 0; b_x < w - 1; b_x++) {
  269. int y;
  270. int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  271. int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  272. int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  273. int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
  274. int left_damage = left_status & ER_MB_ERROR;
  275. int right_damage = right_status & ER_MB_ERROR;
  276. int offset = b_x * 8 + b_y * stride * 8;
  277. int16_t *left_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  278. int16_t *right_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
  279. if (!(left_damage || right_damage))
  280. continue; // both undamaged
  281. if ((!left_intra) && (!right_intra) &&
  282. FFABS(left_mv[0] - right_mv[0]) +
  283. FFABS(left_mv[1] + right_mv[1]) < 2)
  284. continue;
  285. for (y = 0; y < 8; y++) {
  286. int a, b, c, d;
  287. a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
  288. b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
  289. c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
  290. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  291. d = FFMAX(d, 0);
  292. if (b < 0)
  293. d = -d;
  294. if (d == 0)
  295. continue;
  296. if (!(left_damage && right_damage))
  297. d = d * 16 / 9;
  298. if (left_damage) {
  299. dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
  300. dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
  301. dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
  302. dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
  303. }
  304. if (right_damage) {
  305. dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
  306. dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
  307. dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
  308. dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
  309. }
  310. }
  311. }
  312. }
  313. }
  314. /**
  315. * simple vertical deblocking filter used for error resilience
  316. * @param w width in 8 pixel blocks
  317. * @param h height in 8 pixel blocks
  318. */
  319. static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h,
  320. int stride, int is_luma)
  321. {
  322. int b_x, b_y, mvx_stride, mvy_stride;
  323. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  324. set_mv_strides(s, &mvx_stride, &mvy_stride);
  325. mvx_stride >>= is_luma;
  326. mvy_stride *= mvx_stride;
  327. for (b_y = 0; b_y < h - 1; b_y++) {
  328. for (b_x = 0; b_x < w; b_x++) {
  329. int x;
  330. int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
  331. int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
  332. int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
  333. int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
  334. int top_damage = top_status & ER_MB_ERROR;
  335. int bottom_damage = bottom_status & ER_MB_ERROR;
  336. int offset = b_x * 8 + b_y * stride * 8;
  337. int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
  338. int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
  339. if (!(top_damage || bottom_damage))
  340. continue; // both undamaged
  341. if ((!top_intra) && (!bottom_intra) &&
  342. FFABS(top_mv[0] - bottom_mv[0]) +
  343. FFABS(top_mv[1] + bottom_mv[1]) < 2)
  344. continue;
  345. for (x = 0; x < 8; x++) {
  346. int a, b, c, d;
  347. a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
  348. b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
  349. c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
  350. d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
  351. d = FFMAX(d, 0);
  352. if (b < 0)
  353. d = -d;
  354. if (d == 0)
  355. continue;
  356. if (!(top_damage && bottom_damage))
  357. d = d * 16 / 9;
  358. if (top_damage) {
  359. dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
  360. dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
  361. dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
  362. dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
  363. }
  364. if (bottom_damage) {
  365. dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
  366. dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
  367. dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
  368. dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
  369. }
  370. }
  371. }
  372. }
  373. }
  374. static void guess_mv(MpegEncContext *s)
  375. {
  376. uint8_t *fixed = s->er_temp_buffer;
  377. #define MV_FROZEN 3
  378. #define MV_CHANGED 2
  379. #define MV_UNCHANGED 1
  380. const int mb_stride = s->mb_stride;
  381. const int mb_width = s->mb_width;
  382. const int mb_height = s->mb_height;
  383. int i, depth, num_avail;
  384. int mb_x, mb_y, mot_step, mot_stride;
  385. set_mv_strides(s, &mot_step, &mot_stride);
  386. num_avail = 0;
  387. for (i = 0; i < s->mb_num; i++) {
  388. const int mb_xy = s->mb_index2xy[i];
  389. int f = 0;
  390. int error = s->error_status_table[mb_xy];
  391. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  392. f = MV_FROZEN; // intra // FIXME check
  393. if (!(error & ER_MV_ERROR))
  394. f = MV_FROZEN; // inter with undamaged MV
  395. fixed[mb_xy] = f;
  396. if (f == MV_FROZEN)
  397. num_avail++;
  398. else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
  399. const int mb_y= mb_xy / s->mb_stride;
  400. const int mb_x= mb_xy % s->mb_stride;
  401. const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
  402. s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
  403. s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
  404. s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
  405. }
  406. }
  407. if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
  408. num_avail <= mb_width / 2) {
  409. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  410. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  411. const int mb_xy = mb_x + mb_y * s->mb_stride;
  412. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  413. continue;
  414. if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
  415. continue;
  416. s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD
  417. : MV_DIR_BACKWARD;
  418. s->mb_intra = 0;
  419. s->mv_type = MV_TYPE_16X16;
  420. s->mb_skipped = 0;
  421. s->dsp.clear_blocks(s->block[0]);
  422. s->mb_x = mb_x;
  423. s->mb_y = mb_y;
  424. s->mv[0][0][0] = 0;
  425. s->mv[0][0][1] = 0;
  426. decode_mb(s, 0);
  427. }
  428. }
  429. return;
  430. }
  431. for (depth = 0; ; depth++) {
  432. int changed, pass, none_left;
  433. none_left = 1;
  434. changed = 1;
  435. for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
  436. int mb_x, mb_y;
  437. int score_sum = 0;
  438. changed = 0;
  439. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  440. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  441. const int mb_xy = mb_x + mb_y * s->mb_stride;
  442. int mv_predictor[8][2] = { { 0 } };
  443. int ref[8] = { 0 };
  444. int pred_count = 0;
  445. int j;
  446. int best_score = 256 * 256 * 256 * 64;
  447. int best_pred = 0;
  448. const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
  449. int prev_x, prev_y, prev_ref;
  450. if ((mb_x ^ mb_y ^ pass) & 1)
  451. continue;
  452. if (fixed[mb_xy] == MV_FROZEN)
  453. continue;
  454. av_assert1(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
  455. av_assert1(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
  456. j = 0;
  457. if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN)
  458. j = 1;
  459. if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN)
  460. j = 1;
  461. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN)
  462. j = 1;
  463. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN)
  464. j = 1;
  465. if (j == 0)
  466. continue;
  467. j = 0;
  468. if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED)
  469. j = 1;
  470. if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED)
  471. j = 1;
  472. if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED)
  473. j = 1;
  474. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED)
  475. j = 1;
  476. if (j == 0 && pass > 1)
  477. continue;
  478. none_left = 0;
  479. if (mb_x > 0 && fixed[mb_xy - 1]) {
  480. mv_predictor[pred_count][0] =
  481. s->current_picture.f.motion_val[0][mot_index - mot_step][0];
  482. mv_predictor[pred_count][1] =
  483. s->current_picture.f.motion_val[0][mot_index - mot_step][1];
  484. ref[pred_count] =
  485. s->current_picture.f.ref_index[0][4 * (mb_xy - 1)];
  486. pred_count++;
  487. }
  488. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  489. mv_predictor[pred_count][0] =
  490. s->current_picture.f.motion_val[0][mot_index + mot_step][0];
  491. mv_predictor[pred_count][1] =
  492. s->current_picture.f.motion_val[0][mot_index + mot_step][1];
  493. ref[pred_count] =
  494. s->current_picture.f.ref_index[0][4 * (mb_xy + 1)];
  495. pred_count++;
  496. }
  497. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  498. mv_predictor[pred_count][0] =
  499. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][0];
  500. mv_predictor[pred_count][1] =
  501. s->current_picture.f.motion_val[0][mot_index - mot_stride * mot_step][1];
  502. ref[pred_count] =
  503. s->current_picture.f.ref_index[0][4 * (mb_xy - s->mb_stride)];
  504. pred_count++;
  505. }
  506. if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
  507. mv_predictor[pred_count][0] =
  508. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][0];
  509. mv_predictor[pred_count][1] =
  510. s->current_picture.f.motion_val[0][mot_index + mot_stride * mot_step][1];
  511. ref[pred_count] =
  512. s->current_picture.f.ref_index[0][4 * (mb_xy + s->mb_stride)];
  513. pred_count++;
  514. }
  515. if (pred_count == 0)
  516. continue;
  517. if (pred_count > 1) {
  518. int sum_x = 0, sum_y = 0, sum_r = 0;
  519. int max_x, max_y, min_x, min_y, max_r, min_r;
  520. for (j = 0; j < pred_count; j++) {
  521. sum_x += mv_predictor[j][0];
  522. sum_y += mv_predictor[j][1];
  523. sum_r += ref[j];
  524. if (j && ref[j] != ref[j - 1])
  525. goto skip_mean_and_median;
  526. }
  527. /* mean */
  528. mv_predictor[pred_count][0] = sum_x / j;
  529. mv_predictor[pred_count][1] = sum_y / j;
  530. ref[pred_count] = sum_r / j;
  531. /* median */
  532. if (pred_count >= 3) {
  533. min_y = min_x = min_r = 99999;
  534. max_y = max_x = max_r = -99999;
  535. } else {
  536. min_x = min_y = max_x = max_y = min_r = max_r = 0;
  537. }
  538. for (j = 0; j < pred_count; j++) {
  539. max_x = FFMAX(max_x, mv_predictor[j][0]);
  540. max_y = FFMAX(max_y, mv_predictor[j][1]);
  541. max_r = FFMAX(max_r, ref[j]);
  542. min_x = FFMIN(min_x, mv_predictor[j][0]);
  543. min_y = FFMIN(min_y, mv_predictor[j][1]);
  544. min_r = FFMIN(min_r, ref[j]);
  545. }
  546. mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
  547. mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
  548. ref[pred_count + 1] = sum_r - max_r - min_r;
  549. if (pred_count == 4) {
  550. mv_predictor[pred_count + 1][0] /= 2;
  551. mv_predictor[pred_count + 1][1] /= 2;
  552. ref[pred_count + 1] /= 2;
  553. }
  554. pred_count += 2;
  555. }
  556. skip_mean_and_median:
  557. /* zero MV */
  558. pred_count++;
  559. if (!fixed[mb_xy] && 0) {
  560. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  561. // FIXME
  562. } else {
  563. ff_thread_await_progress(&s->last_picture_ptr->f,
  564. mb_y, 0);
  565. }
  566. if (!s->last_picture.f.motion_val[0] ||
  567. !s->last_picture.f.ref_index[0])
  568. goto skip_last_mv;
  569. prev_x = s->last_picture.f.motion_val[0][mot_index][0];
  570. prev_y = s->last_picture.f.motion_val[0][mot_index][1];
  571. prev_ref = s->last_picture.f.ref_index[0][4 * mb_xy];
  572. } else {
  573. prev_x = s->current_picture.f.motion_val[0][mot_index][0];
  574. prev_y = s->current_picture.f.motion_val[0][mot_index][1];
  575. prev_ref = s->current_picture.f.ref_index[0][4 * mb_xy];
  576. }
  577. /* last MV */
  578. mv_predictor[pred_count][0] = prev_x;
  579. mv_predictor[pred_count][1] = prev_y;
  580. ref[pred_count] = prev_ref;
  581. pred_count++;
  582. skip_last_mv:
  583. s->mv_dir = MV_DIR_FORWARD;
  584. s->mb_intra = 0;
  585. s->mv_type = MV_TYPE_16X16;
  586. s->mb_skipped = 0;
  587. s->dsp.clear_blocks(s->block[0]);
  588. s->mb_x = mb_x;
  589. s->mb_y = mb_y;
  590. for (j = 0; j < pred_count; j++) {
  591. int score = 0;
  592. uint8_t *src = s->current_picture.f.data[0] +
  593. mb_x * 16 + mb_y * 16 * s->linesize;
  594. s->current_picture.f.motion_val[0][mot_index][0] =
  595. s->mv[0][0][0] = mv_predictor[j][0];
  596. s->current_picture.f.motion_val[0][mot_index][1] =
  597. s->mv[0][0][1] = mv_predictor[j][1];
  598. // predictor intra or otherwise not available
  599. if (ref[j] < 0)
  600. continue;
  601. decode_mb(s, ref[j]);
  602. if (mb_x > 0 && fixed[mb_xy - 1]) {
  603. int k;
  604. for (k = 0; k < 16; k++)
  605. score += FFABS(src[k * s->linesize - 1] -
  606. src[k * s->linesize]);
  607. }
  608. if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
  609. int k;
  610. for (k = 0; k < 16; k++)
  611. score += FFABS(src[k * s->linesize + 15] -
  612. src[k * s->linesize + 16]);
  613. }
  614. if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
  615. int k;
  616. for (k = 0; k < 16; k++)
  617. score += FFABS(src[k - s->linesize] - src[k]);
  618. }
  619. if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) {
  620. int k;
  621. for (k = 0; k < 16; k++)
  622. score += FFABS(src[k + s->linesize * 15] -
  623. src[k + s->linesize * 16]);
  624. }
  625. if (score <= best_score) { // <= will favor the last MV
  626. best_score = score;
  627. best_pred = j;
  628. }
  629. }
  630. score_sum += best_score;
  631. s->mv[0][0][0] = mv_predictor[best_pred][0];
  632. s->mv[0][0][1] = mv_predictor[best_pred][1];
  633. for (i = 0; i < mot_step; i++)
  634. for (j = 0; j < mot_step; j++) {
  635. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
  636. s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
  637. }
  638. decode_mb(s, ref[best_pred]);
  639. if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
  640. fixed[mb_xy] = MV_CHANGED;
  641. changed++;
  642. } else
  643. fixed[mb_xy] = MV_UNCHANGED;
  644. }
  645. }
  646. // printf(".%d/%d", changed, score_sum); fflush(stdout);
  647. }
  648. if (none_left)
  649. return;
  650. for (i = 0; i < s->mb_num; i++) {
  651. int mb_xy = s->mb_index2xy[i];
  652. if (fixed[mb_xy])
  653. fixed[mb_xy] = MV_FROZEN;
  654. }
  655. // printf(":"); fflush(stdout);
  656. }
  657. }
  658. static int is_intra_more_likely(MpegEncContext *s)
  659. {
  660. int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
  661. if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0])
  662. return 1; // no previous frame available -> use spatial prediction
  663. undamaged_count = 0;
  664. for (i = 0; i < s->mb_num; i++) {
  665. const int mb_xy = s->mb_index2xy[i];
  666. const int error = s->error_status_table[mb_xy];
  667. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  668. undamaged_count++;
  669. }
  670. if (s->codec_id == AV_CODEC_ID_H264) {
  671. H264Context *h = (void*) s;
  672. if (h->list_count <= 0 || h->ref_count[0] <= 0 ||
  673. !h->ref_list[0][0].f.data[0])
  674. return 1;
  675. }
  676. if (undamaged_count < 5)
  677. return 0; // almost all MBs damaged -> use temporal prediction
  678. // prevent dsp.sad() check, that requires access to the image
  679. if (CONFIG_MPEG_XVMC_DECODER &&
  680. s->avctx->xvmc_acceleration &&
  681. s->pict_type == AV_PICTURE_TYPE_I)
  682. return 1;
  683. skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
  684. is_intra_likely = 0;
  685. j = 0;
  686. for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
  687. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  688. int error;
  689. const int mb_xy = mb_x + mb_y * s->mb_stride;
  690. error = s->error_status_table[mb_xy];
  691. if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
  692. continue; // skip damaged
  693. j++;
  694. // skip a few to speed things up
  695. if ((j % skip_amount) != 0)
  696. continue;
  697. if (s->pict_type == AV_PICTURE_TYPE_I) {
  698. uint8_t *mb_ptr = s->current_picture.f.data[0] +
  699. mb_x * 16 + mb_y * 16 * s->linesize;
  700. uint8_t *last_mb_ptr = s->last_picture.f.data[0] +
  701. mb_x * 16 + mb_y * 16 * s->linesize;
  702. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  703. // FIXME
  704. } else {
  705. ff_thread_await_progress(&s->last_picture_ptr->f,
  706. mb_y, 0);
  707. }
  708. is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
  709. // FIXME need await_progress() here
  710. is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
  711. } else {
  712. if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  713. is_intra_likely++;
  714. else
  715. is_intra_likely--;
  716. }
  717. }
  718. }
  719. // printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
  720. return is_intra_likely > 0;
  721. }
  722. void ff_er_frame_start(MpegEncContext *s)
  723. {
  724. if (!s->err_recognition)
  725. return;
  726. memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
  727. s->mb_stride * s->mb_height * sizeof(uint8_t));
  728. s->error_count = 3 * s->mb_num;
  729. s->error_occurred = 0;
  730. }
  731. /**
  732. * Add a slice.
  733. * @param endx x component of the last macroblock, can be -1
  734. * for the last of the previous line
  735. * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
  736. * assumed that no earlier end or error of the same type occurred
  737. */
  738. void ff_er_add_slice(MpegEncContext *s, int startx, int starty,
  739. int endx, int endy, int status)
  740. {
  741. const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
  742. const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
  743. const int start_xy = s->mb_index2xy[start_i];
  744. const int end_xy = s->mb_index2xy[end_i];
  745. int mask = -1;
  746. if (s->avctx->hwaccel)
  747. return;
  748. if (start_i > end_i || start_xy > end_xy) {
  749. av_log(s->avctx, AV_LOG_ERROR,
  750. "internal error, slice end before start\n");
  751. return;
  752. }
  753. if (!s->err_recognition)
  754. return;
  755. mask &= ~VP_START;
  756. if (status & (ER_AC_ERROR | ER_AC_END)) {
  757. mask &= ~(ER_AC_ERROR | ER_AC_END);
  758. s->error_count -= end_i - start_i + 1;
  759. }
  760. if (status & (ER_DC_ERROR | ER_DC_END)) {
  761. mask &= ~(ER_DC_ERROR | ER_DC_END);
  762. s->error_count -= end_i - start_i + 1;
  763. }
  764. if (status & (ER_MV_ERROR | ER_MV_END)) {
  765. mask &= ~(ER_MV_ERROR | ER_MV_END);
  766. s->error_count -= end_i - start_i + 1;
  767. }
  768. if (status & ER_MB_ERROR) {
  769. s->error_occurred = 1;
  770. s->error_count = INT_MAX;
  771. }
  772. if (mask == ~0x7F) {
  773. memset(&s->error_status_table[start_xy], 0,
  774. (end_xy - start_xy) * sizeof(uint8_t));
  775. } else {
  776. int i;
  777. for (i = start_xy; i < end_xy; i++)
  778. s->error_status_table[i] &= mask;
  779. }
  780. if (end_i == s->mb_num)
  781. s->error_count = INT_MAX;
  782. else {
  783. s->error_status_table[end_xy] &= mask;
  784. s->error_status_table[end_xy] |= status;
  785. }
  786. s->error_status_table[start_xy] |= VP_START;
  787. if (start_xy > 0 && s->avctx->thread_count <= 1 &&
  788. s->avctx->skip_top * s->mb_width < start_i) {
  789. int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
  790. prev_status &= ~ VP_START;
  791. if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
  792. s->error_count = INT_MAX;
  793. }
  794. }
  795. void ff_er_frame_end(MpegEncContext *s)
  796. {
  797. int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
  798. int distance;
  799. int threshold_part[4] = { 100, 100, 100 };
  800. int threshold = 50;
  801. int is_intra_likely;
  802. int size = s->b8_stride * 2 * s->mb_height;
  803. Picture *pic = s->current_picture_ptr;
  804. /* We do not support ER of field pictures yet,
  805. * though it should not crash if enabled. */
  806. if (!s->err_recognition || s->error_count == 0 || s->avctx->lowres ||
  807. s->avctx->hwaccel ||
  808. s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
  809. s->picture_structure != PICT_FRAME ||
  810. s->error_count == 3 * s->mb_width *
  811. (s->avctx->skip_top + s->avctx->skip_bottom)) {
  812. return;
  813. };
  814. if (s->current_picture.f.motion_val[0] == NULL) {
  815. av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
  816. for (i = 0; i < 2; i++) {
  817. pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
  818. pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t));
  819. pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
  820. }
  821. pic->f.motion_subsample_log2 = 3;
  822. s->current_picture = *s->current_picture_ptr;
  823. }
  824. if (s->avctx->debug & FF_DEBUG_ER) {
  825. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  826. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  827. int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
  828. av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
  829. }
  830. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  831. }
  832. }
  833. #if 1
  834. /* handle overlapping slices */
  835. for (error_type = 1; error_type <= 3; error_type++) {
  836. int end_ok = 0;
  837. for (i = s->mb_num - 1; i >= 0; i--) {
  838. const int mb_xy = s->mb_index2xy[i];
  839. int error = s->error_status_table[mb_xy];
  840. if (error & (1 << error_type))
  841. end_ok = 1;
  842. if (error & (8 << error_type))
  843. end_ok = 1;
  844. if (!end_ok)
  845. s->error_status_table[mb_xy] |= 1 << error_type;
  846. if (error & VP_START)
  847. end_ok = 0;
  848. }
  849. }
  850. #endif
  851. #if 1
  852. /* handle slices with partitions of different length */
  853. if (s->partitioned_frame) {
  854. int end_ok = 0;
  855. for (i = s->mb_num - 1; i >= 0; i--) {
  856. const int mb_xy = s->mb_index2xy[i];
  857. int error = s->error_status_table[mb_xy];
  858. if (error & ER_AC_END)
  859. end_ok = 0;
  860. if ((error & ER_MV_END) ||
  861. (error & ER_DC_END) ||
  862. (error & ER_AC_ERROR))
  863. end_ok = 1;
  864. if (!end_ok)
  865. s->error_status_table[mb_xy]|= ER_AC_ERROR;
  866. if (error & VP_START)
  867. end_ok = 0;
  868. }
  869. }
  870. #endif
  871. /* handle missing slices */
  872. if (s->err_recognition & AV_EF_EXPLODE) {
  873. int end_ok = 1;
  874. // FIXME + 100 hack
  875. for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
  876. const int mb_xy = s->mb_index2xy[i];
  877. int error1 = s->error_status_table[mb_xy];
  878. int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
  879. if (error1 & VP_START)
  880. end_ok = 1;
  881. if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
  882. error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
  883. ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
  884. (error1 & ER_MV_END))) {
  885. // end & uninit
  886. end_ok = 0;
  887. }
  888. if (!end_ok)
  889. s->error_status_table[mb_xy] |= ER_MB_ERROR;
  890. }
  891. }
  892. #if 1
  893. /* backward mark errors */
  894. distance = 9999999;
  895. for (error_type = 1; error_type <= 3; error_type++) {
  896. for (i = s->mb_num - 1; i >= 0; i--) {
  897. const int mb_xy = s->mb_index2xy[i];
  898. int error = s->error_status_table[mb_xy];
  899. if (!s->mbskip_table[mb_xy]) // FIXME partition specific
  900. distance++;
  901. if (error & (1 << error_type))
  902. distance = 0;
  903. if (s->partitioned_frame) {
  904. if (distance < threshold_part[error_type - 1])
  905. s->error_status_table[mb_xy] |= 1 << error_type;
  906. } else {
  907. if (distance < threshold)
  908. s->error_status_table[mb_xy] |= 1 << error_type;
  909. }
  910. if (error & VP_START)
  911. distance = 9999999;
  912. }
  913. }
  914. #endif
  915. /* forward mark errors */
  916. error = 0;
  917. for (i = 0; i < s->mb_num; i++) {
  918. const int mb_xy = s->mb_index2xy[i];
  919. int old_error = s->error_status_table[mb_xy];
  920. if (old_error & VP_START) {
  921. error = old_error & ER_MB_ERROR;
  922. } else {
  923. error |= old_error & ER_MB_ERROR;
  924. s->error_status_table[mb_xy] |= error;
  925. }
  926. }
  927. #if 1
  928. /* handle not partitioned case */
  929. if (!s->partitioned_frame) {
  930. for (i = 0; i < s->mb_num; i++) {
  931. const int mb_xy = s->mb_index2xy[i];
  932. error = s->error_status_table[mb_xy];
  933. if (error & ER_MB_ERROR)
  934. error |= ER_MB_ERROR;
  935. s->error_status_table[mb_xy] = error;
  936. }
  937. }
  938. #endif
  939. dc_error = ac_error = mv_error = 0;
  940. for (i = 0; i < s->mb_num; i++) {
  941. const int mb_xy = s->mb_index2xy[i];
  942. error = s->error_status_table[mb_xy];
  943. if (error & ER_DC_ERROR)
  944. dc_error++;
  945. if (error & ER_AC_ERROR)
  946. ac_error++;
  947. if (error & ER_MV_ERROR)
  948. mv_error++;
  949. }
  950. av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n",
  951. dc_error, ac_error, mv_error, av_get_picture_type_char(s->pict_type));
  952. is_intra_likely = is_intra_more_likely(s);
  953. /* set unknown mb-type to most likely */
  954. for (i = 0; i < s->mb_num; i++) {
  955. const int mb_xy = s->mb_index2xy[i];
  956. error = s->error_status_table[mb_xy];
  957. if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
  958. continue;
  959. if (is_intra_likely)
  960. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  961. else
  962. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
  963. }
  964. // change inter to intra blocks if no reference frames are available
  965. if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
  966. for (i = 0; i < s->mb_num; i++) {
  967. const int mb_xy = s->mb_index2xy[i];
  968. if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
  969. s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
  970. }
  971. /* handle inter blocks with damaged AC */
  972. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  973. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  974. const int mb_xy = mb_x + mb_y * s->mb_stride;
  975. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  976. int dir = !s->last_picture.f.data[0];
  977. error = s->error_status_table[mb_xy];
  978. if (IS_INTRA(mb_type))
  979. continue; // intra
  980. if (error & ER_MV_ERROR)
  981. continue; // inter with damaged MV
  982. if (!(error & ER_AC_ERROR))
  983. continue; // undamaged inter
  984. s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
  985. s->mb_intra = 0;
  986. s->mb_skipped = 0;
  987. if (IS_8X8(mb_type)) {
  988. int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
  989. int j;
  990. s->mv_type = MV_TYPE_8X8;
  991. for (j = 0; j < 4; j++) {
  992. s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
  993. s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
  994. }
  995. } else {
  996. s->mv_type = MV_TYPE_16X16;
  997. s->mv[0][0][0] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
  998. s->mv[0][0][1] = s->current_picture.f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
  999. }
  1000. s->dsp.clear_blocks(s->block[0]);
  1001. s->mb_x = mb_x;
  1002. s->mb_y = mb_y;
  1003. decode_mb(s, 0 /* FIXME h264 partitioned slices need this set */);
  1004. }
  1005. }
  1006. /* guess MVs */
  1007. if (s->pict_type == AV_PICTURE_TYPE_B) {
  1008. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1009. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1010. int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
  1011. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1012. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1013. error = s->error_status_table[mb_xy];
  1014. if (IS_INTRA(mb_type))
  1015. continue;
  1016. if (!(error & ER_MV_ERROR))
  1017. continue; // inter with undamaged MV
  1018. if (!(error & ER_AC_ERROR))
  1019. continue; // undamaged inter
  1020. s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
  1021. if (!s->last_picture.f.data[0])
  1022. s->mv_dir &= ~MV_DIR_FORWARD;
  1023. if (!s->next_picture.f.data[0])
  1024. s->mv_dir &= ~MV_DIR_BACKWARD;
  1025. s->mb_intra = 0;
  1026. s->mv_type = MV_TYPE_16X16;
  1027. s->mb_skipped = 0;
  1028. if (s->pp_time) {
  1029. int time_pp = s->pp_time;
  1030. int time_pb = s->pb_time;
  1031. if (s->avctx->codec_id == AV_CODEC_ID_H264) {
  1032. // FIXME
  1033. } else {
  1034. ff_thread_await_progress(&s->next_picture_ptr->f, mb_y, 0);
  1035. }
  1036. s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
  1037. s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
  1038. s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
  1039. s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
  1040. } else {
  1041. s->mv[0][0][0] = 0;
  1042. s->mv[0][0][1] = 0;
  1043. s->mv[1][0][0] = 0;
  1044. s->mv[1][0][1] = 0;
  1045. }
  1046. s->dsp.clear_blocks(s->block[0]);
  1047. s->mb_x = mb_x;
  1048. s->mb_y = mb_y;
  1049. decode_mb(s, 0);
  1050. }
  1051. }
  1052. } else
  1053. guess_mv(s);
  1054. /* the filters below are not XvMC compatible, skip them */
  1055. if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
  1056. goto ec_clean;
  1057. /* fill DC for inter blocks */
  1058. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1059. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1060. int dc, dcu, dcv, y, n;
  1061. int16_t *dc_ptr;
  1062. uint8_t *dest_y, *dest_cb, *dest_cr;
  1063. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1064. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1065. error = s->error_status_table[mb_xy];
  1066. if (IS_INTRA(mb_type) && s->partitioned_frame)
  1067. continue;
  1068. // if (error & ER_MV_ERROR)
  1069. // continue; // inter data damaged FIXME is this good?
  1070. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1071. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1072. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1073. dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
  1074. for (n = 0; n < 4; n++) {
  1075. dc = 0;
  1076. for (y = 0; y < 8; y++) {
  1077. int x;
  1078. for (x = 0; x < 8; x++)
  1079. dc += dest_y[x + (n & 1) * 8 +
  1080. (y + (n >> 1) * 8) * s->linesize];
  1081. }
  1082. dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
  1083. }
  1084. dcu = dcv = 0;
  1085. for (y = 0; y < 8; y++) {
  1086. int x;
  1087. for (x = 0; x < 8; x++) {
  1088. dcu += dest_cb[x + y * s->uvlinesize];
  1089. dcv += dest_cr[x + y * s->uvlinesize];
  1090. }
  1091. }
  1092. s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
  1093. s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
  1094. }
  1095. }
  1096. #if 1
  1097. /* guess DC for damaged blocks */
  1098. guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
  1099. guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
  1100. guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
  1101. #endif
  1102. /* filter luma DC */
  1103. filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
  1104. #if 1
  1105. /* render DC only intra */
  1106. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1107. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  1108. uint8_t *dest_y, *dest_cb, *dest_cr;
  1109. const int mb_xy = mb_x + mb_y * s->mb_stride;
  1110. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  1111. error = s->error_status_table[mb_xy];
  1112. if (IS_INTER(mb_type))
  1113. continue;
  1114. if (!(error & ER_AC_ERROR))
  1115. continue; // undamaged
  1116. dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
  1117. dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1118. dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
  1119. put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
  1120. }
  1121. }
  1122. #endif
  1123. if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
  1124. /* filter horizontal block boundaries */
  1125. h_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1126. s->mb_height * 2, s->linesize, 1);
  1127. h_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1128. s->mb_height , s->uvlinesize, 0);
  1129. h_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1130. s->mb_height , s->uvlinesize, 0);
  1131. /* filter vertical block boundaries */
  1132. v_block_filter(s, s->current_picture.f.data[0], s->mb_width * 2,
  1133. s->mb_height * 2, s->linesize, 1);
  1134. v_block_filter(s, s->current_picture.f.data[1], s->mb_width,
  1135. s->mb_height , s->uvlinesize, 0);
  1136. v_block_filter(s, s->current_picture.f.data[2], s->mb_width,
  1137. s->mb_height , s->uvlinesize, 0);
  1138. }
  1139. ec_clean:
  1140. /* clean a few tables */
  1141. for (i = 0; i < s->mb_num; i++) {
  1142. const int mb_xy = s->mb_index2xy[i];
  1143. int error = s->error_status_table[mb_xy];
  1144. if (s->pict_type != AV_PICTURE_TYPE_B &&
  1145. (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
  1146. s->mbskip_table[mb_xy] = 0;
  1147. }
  1148. s->mbintra_table[mb_xy] = 1;
  1149. }
  1150. }