You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

687 lines
27KB

  1. /*
  2. * RV40 decoder
  3. * Copyright (c) 2007 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RV40 decoder
  24. */
  25. #include "libavutil/imgutils.h"
  26. #include "avcodec.h"
  27. #include "dsputil.h"
  28. #include "mpegvideo.h"
  29. #include "golomb.h"
  30. #include "rv34.h"
  31. #include "rv40vlc2.h"
  32. #include "rv40data.h"
  33. static VLC aic_top_vlc;
  34. static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM];
  35. static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS];
  36. static const int16_t mode2_offs[] = {
  37. 0, 614, 1222, 1794, 2410, 3014, 3586, 4202, 4792, 5382, 5966, 6542,
  38. 7138, 7716, 8292, 8864, 9444, 10030, 10642, 11212, 11814
  39. };
  40. /**
  41. * Initialize all tables.
  42. */
  43. static av_cold void rv40_init_tables(void)
  44. {
  45. int i;
  46. static VLC_TYPE aic_table[1 << AIC_TOP_BITS][2];
  47. static VLC_TYPE aic_mode1_table[AIC_MODE1_NUM << AIC_MODE1_BITS][2];
  48. static VLC_TYPE aic_mode2_table[11814][2];
  49. static VLC_TYPE ptype_table[NUM_PTYPE_VLCS << PTYPE_VLC_BITS][2];
  50. static VLC_TYPE btype_table[NUM_BTYPE_VLCS << BTYPE_VLC_BITS][2];
  51. aic_top_vlc.table = aic_table;
  52. aic_top_vlc.table_allocated = 1 << AIC_TOP_BITS;
  53. init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE,
  54. rv40_aic_top_vlc_bits, 1, 1,
  55. rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
  56. for(i = 0; i < AIC_MODE1_NUM; i++){
  57. // Every tenth VLC table is empty
  58. if((i % 10) == 9) continue;
  59. aic_mode1_vlc[i].table = &aic_mode1_table[i << AIC_MODE1_BITS];
  60. aic_mode1_vlc[i].table_allocated = 1 << AIC_MODE1_BITS;
  61. init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE,
  62. aic_mode1_vlc_bits[i], 1, 1,
  63. aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
  64. }
  65. for(i = 0; i < AIC_MODE2_NUM; i++){
  66. aic_mode2_vlc[i].table = &aic_mode2_table[mode2_offs[i]];
  67. aic_mode2_vlc[i].table_allocated = mode2_offs[i + 1] - mode2_offs[i];
  68. init_vlc(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE,
  69. aic_mode2_vlc_bits[i], 1, 1,
  70. aic_mode2_vlc_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
  71. }
  72. for(i = 0; i < NUM_PTYPE_VLCS; i++){
  73. ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS];
  74. ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS;
  75. init_vlc_sparse(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
  76. ptype_vlc_bits[i], 1, 1,
  77. ptype_vlc_codes[i], 1, 1,
  78. ptype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
  79. }
  80. for(i = 0; i < NUM_BTYPE_VLCS; i++){
  81. btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS];
  82. btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS;
  83. init_vlc_sparse(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
  84. btype_vlc_bits[i], 1, 1,
  85. btype_vlc_codes[i], 1, 1,
  86. btype_vlc_syms, 1, 1, INIT_VLC_USE_NEW_STATIC);
  87. }
  88. }
  89. /**
  90. * Get stored dimension from bitstream.
  91. *
  92. * If the width/height is the standard one then it's coded as a 3-bit index.
  93. * Otherwise it is coded as escaped 8-bit portions.
  94. */
  95. static int get_dimension(GetBitContext *gb, const int *dim)
  96. {
  97. int t = get_bits(gb, 3);
  98. int val = dim[t];
  99. if(val < 0)
  100. val = dim[get_bits1(gb) - val];
  101. if(!val){
  102. do{
  103. t = get_bits(gb, 8);
  104. val += t << 2;
  105. }while(t == 0xFF);
  106. }
  107. return val;
  108. }
  109. /**
  110. * Get encoded picture size - usually this is called from rv40_parse_slice_header.
  111. */
  112. static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
  113. {
  114. *w = get_dimension(gb, rv40_standard_widths);
  115. *h = get_dimension(gb, rv40_standard_heights);
  116. }
  117. static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
  118. {
  119. int mb_bits;
  120. int w = r->s.width, h = r->s.height;
  121. int mb_size;
  122. memset(si, 0, sizeof(SliceInfo));
  123. if(get_bits1(gb))
  124. return -1;
  125. si->type = get_bits(gb, 2);
  126. if(si->type == 1) si->type = 0;
  127. si->quant = get_bits(gb, 5);
  128. if(get_bits(gb, 2))
  129. return -1;
  130. si->vlc_set = get_bits(gb, 2);
  131. skip_bits1(gb);
  132. si->pts = get_bits(gb, 13);
  133. if(!si->type || !get_bits1(gb))
  134. rv40_parse_picture_size(gb, &w, &h);
  135. if(av_image_check_size(w, h, 0, r->s.avctx) < 0)
  136. return -1;
  137. si->width = w;
  138. si->height = h;
  139. mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
  140. mb_bits = ff_rv34_get_start_offset(gb, mb_size);
  141. si->start = get_bits(gb, mb_bits);
  142. return 0;
  143. }
  144. /**
  145. * Decode 4x4 intra types array.
  146. */
  147. static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
  148. {
  149. MpegEncContext *s = &r->s;
  150. int i, j, k, v;
  151. int A, B, C;
  152. int pattern;
  153. int8_t *ptr;
  154. for(i = 0; i < 4; i++, dst += r->intra_types_stride){
  155. if(!i && s->first_slice_line){
  156. pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1);
  157. dst[0] = (pattern >> 2) & 2;
  158. dst[1] = (pattern >> 1) & 2;
  159. dst[2] = pattern & 2;
  160. dst[3] = (pattern << 1) & 2;
  161. continue;
  162. }
  163. ptr = dst;
  164. for(j = 0; j < 4; j++){
  165. /* Coefficients are read using VLC chosen by the prediction pattern
  166. * The first one (used for retrieving a pair of coefficients) is
  167. * constructed from the top, top right and left coefficients
  168. * The second one (used for retrieving only one coefficient) is
  169. * top + 10 * left.
  170. */
  171. A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row
  172. B = ptr[-r->intra_types_stride];
  173. C = ptr[-1];
  174. pattern = A + (B << 4) + (C << 8);
  175. for(k = 0; k < MODE2_PATTERNS_NUM; k++)
  176. if(pattern == rv40_aic_table_index[k])
  177. break;
  178. if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
  179. v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2);
  180. *ptr++ = v/9;
  181. *ptr++ = v%9;
  182. j++;
  183. }else{
  184. if(B != -1 && C != -1)
  185. v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1);
  186. else{ // tricky decoding
  187. v = 0;
  188. switch(C){
  189. case -1: // code 0 -> 1, 1 -> 0
  190. if(B < 2)
  191. v = get_bits1(gb) ^ 1;
  192. break;
  193. case 0:
  194. case 2: // code 0 -> 2, 1 -> 0
  195. v = (get_bits1(gb) ^ 1) << 1;
  196. break;
  197. }
  198. }
  199. *ptr++ = v;
  200. }
  201. }
  202. }
  203. return 0;
  204. }
  205. /**
  206. * Decode macroblock information.
  207. */
  208. static int rv40_decode_mb_info(RV34DecContext *r)
  209. {
  210. MpegEncContext *s = &r->s;
  211. GetBitContext *gb = &s->gb;
  212. int q, i;
  213. int prev_type = 0;
  214. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  215. int blocks[RV34_MB_TYPES] = {0};
  216. int count = 0;
  217. if(!r->s.mb_skip_run) {
  218. r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1;
  219. if(r->s.mb_skip_run > (unsigned)s->mb_num)
  220. return -1;
  221. }
  222. if(--r->s.mb_skip_run)
  223. return RV34_MB_SKIP;
  224. if(r->avail_cache[6-1])
  225. blocks[r->mb_type[mb_pos - 1]]++;
  226. if(r->avail_cache[6-4]){
  227. blocks[r->mb_type[mb_pos - s->mb_stride]]++;
  228. if(r->avail_cache[6-2])
  229. blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
  230. if(r->avail_cache[6-5])
  231. blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
  232. }
  233. for(i = 0; i < RV34_MB_TYPES; i++){
  234. if(blocks[i] > count){
  235. count = blocks[i];
  236. prev_type = i;
  237. }
  238. }
  239. if(s->pict_type == AV_PICTURE_TYPE_P){
  240. prev_type = block_num_to_ptype_vlc_num[prev_type];
  241. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  242. if(q < PBTYPE_ESCAPE)
  243. return q;
  244. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  245. av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
  246. }else{
  247. prev_type = block_num_to_btype_vlc_num[prev_type];
  248. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  249. if(q < PBTYPE_ESCAPE)
  250. return q;
  251. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  252. av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
  253. }
  254. return 0;
  255. }
  256. #define CLIP_SYMM(a, b) av_clip(a, -(b), b)
  257. /**
  258. * weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
  259. */
  260. static inline void rv40_weak_loop_filter(uint8_t *src, const int step,
  261. const int filter_p1, const int filter_q1,
  262. const int alpha, const int beta,
  263. const int lim_p0q0,
  264. const int lim_q1, const int lim_p1,
  265. const int diff_p1p0, const int diff_q1q0,
  266. const int diff_p1p2, const int diff_q1q2)
  267. {
  268. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  269. int t, u, diff;
  270. t = src[0*step] - src[-1*step];
  271. if(!t)
  272. return;
  273. u = (alpha * FFABS(t)) >> 7;
  274. if(u > 3 - (filter_p1 && filter_q1))
  275. return;
  276. t <<= 2;
  277. if(filter_p1 && filter_q1)
  278. t += src[-2*step] - src[1*step];
  279. diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0);
  280. src[-1*step] = cm[src[-1*step] + diff];
  281. src[ 0*step] = cm[src[ 0*step] - diff];
  282. if(FFABS(diff_p1p2) <= beta && filter_p1){
  283. t = (diff_p1p0 + diff_p1p2 - diff) >> 1;
  284. src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)];
  285. }
  286. if(FFABS(diff_q1q2) <= beta && filter_q1){
  287. t = (diff_q1q0 + diff_q1q2 + diff) >> 1;
  288. src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)];
  289. }
  290. }
  291. static av_always_inline void rv40_adaptive_loop_filter(uint8_t *src, const int step,
  292. const int stride, const int dmode,
  293. const int lim_q1, const int lim_p1,
  294. const int alpha,
  295. const int beta, const int beta2,
  296. const int chroma, const int edge)
  297. {
  298. int diff_p1p0[4], diff_q1q0[4], diff_p1p2[4], diff_q1q2[4];
  299. int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
  300. uint8_t *ptr;
  301. int flag_strong0 = 1, flag_strong1 = 1;
  302. int filter_p1, filter_q1;
  303. int i;
  304. int lims;
  305. for(i = 0, ptr = src; i < 4; i++, ptr += stride){
  306. diff_p1p0[i] = ptr[-2*step] - ptr[-1*step];
  307. diff_q1q0[i] = ptr[ 1*step] - ptr[ 0*step];
  308. sum_p1p0 += diff_p1p0[i];
  309. sum_q1q0 += diff_q1q0[i];
  310. }
  311. filter_p1 = FFABS(sum_p1p0) < (beta<<2);
  312. filter_q1 = FFABS(sum_q1q0) < (beta<<2);
  313. if(!filter_p1 && !filter_q1)
  314. return;
  315. for(i = 0, ptr = src; i < 4; i++, ptr += stride){
  316. diff_p1p2[i] = ptr[-2*step] - ptr[-3*step];
  317. diff_q1q2[i] = ptr[ 1*step] - ptr[ 2*step];
  318. sum_p1p2 += diff_p1p2[i];
  319. sum_q1q2 += diff_q1q2[i];
  320. }
  321. if(edge){
  322. flag_strong0 = filter_p1 && (FFABS(sum_p1p2) < beta2);
  323. flag_strong1 = filter_q1 && (FFABS(sum_q1q2) < beta2);
  324. }else{
  325. flag_strong0 = flag_strong1 = 0;
  326. }
  327. lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
  328. if(flag_strong0 && flag_strong1){ /* strong filtering */
  329. for(i = 0; i < 4; i++, src += stride){
  330. int sflag, p0, q0, p1, q1;
  331. int t = src[0*step] - src[-1*step];
  332. if(!t) continue;
  333. sflag = (alpha * FFABS(t)) >> 7;
  334. if(sflag > 1) continue;
  335. p0 = (25*src[-3*step] + 26*src[-2*step]
  336. + 26*src[-1*step]
  337. + 26*src[ 0*step] + 25*src[ 1*step] + rv40_dither_l[dmode + i]) >> 7;
  338. q0 = (25*src[-2*step] + 26*src[-1*step]
  339. + 26*src[ 0*step]
  340. + 26*src[ 1*step] + 25*src[ 2*step] + rv40_dither_r[dmode + i]) >> 7;
  341. if(sflag){
  342. p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
  343. q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
  344. }
  345. p1 = (25*src[-4*step] + 26*src[-3*step]
  346. + 26*src[-2*step]
  347. + 26*p0 + 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7;
  348. q1 = (25*src[-1*step] + 26*q0
  349. + 26*src[ 1*step]
  350. + 26*src[ 2*step] + 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
  351. if(sflag){
  352. p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
  353. q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
  354. }
  355. src[-2*step] = p1;
  356. src[-1*step] = p0;
  357. src[ 0*step] = q0;
  358. src[ 1*step] = q1;
  359. if(!chroma){
  360. src[-3*step] = (25*src[-1*step] + 26*src[-2*step] + 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
  361. src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] + 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
  362. }
  363. }
  364. }else if(filter_p1 && filter_q1){
  365. for(i = 0; i < 4; i++, src += stride)
  366. rv40_weak_loop_filter(src, step, 1, 1, alpha, beta, lims, lim_q1, lim_p1,
  367. diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
  368. }else{
  369. for(i = 0; i < 4; i++, src += stride)
  370. rv40_weak_loop_filter(src, step, filter_p1, filter_q1,
  371. alpha, beta, lims>>1, lim_q1>>1, lim_p1>>1,
  372. diff_p1p0[i], diff_q1q0[i], diff_p1p2[i], diff_q1q2[i]);
  373. }
  374. }
  375. static void rv40_v_loop_filter(uint8_t *src, int stride, int dmode,
  376. int lim_q1, int lim_p1,
  377. int alpha, int beta, int beta2, int chroma, int edge){
  378. rv40_adaptive_loop_filter(src, 1, stride, dmode, lim_q1, lim_p1,
  379. alpha, beta, beta2, chroma, edge);
  380. }
  381. static void rv40_h_loop_filter(uint8_t *src, int stride, int dmode,
  382. int lim_q1, int lim_p1,
  383. int alpha, int beta, int beta2, int chroma, int edge){
  384. rv40_adaptive_loop_filter(src, stride, 1, dmode, lim_q1, lim_p1,
  385. alpha, beta, beta2, chroma, edge);
  386. }
  387. enum RV40BlockPos{
  388. POS_CUR,
  389. POS_TOP,
  390. POS_LEFT,
  391. POS_BOTTOM,
  392. };
  393. #define MASK_CUR 0x0001
  394. #define MASK_RIGHT 0x0008
  395. #define MASK_BOTTOM 0x0010
  396. #define MASK_TOP 0x1000
  397. #define MASK_Y_TOP_ROW 0x000F
  398. #define MASK_Y_LAST_ROW 0xF000
  399. #define MASK_Y_LEFT_COL 0x1111
  400. #define MASK_Y_RIGHT_COL 0x8888
  401. #define MASK_C_TOP_ROW 0x0003
  402. #define MASK_C_LAST_ROW 0x000C
  403. #define MASK_C_LEFT_COL 0x0005
  404. #define MASK_C_RIGHT_COL 0x000A
  405. static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
  406. static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
  407. /**
  408. * RV40 loop filtering function
  409. */
  410. static void rv40_loop_filter(RV34DecContext *r, int row)
  411. {
  412. MpegEncContext *s = &r->s;
  413. int mb_pos, mb_x;
  414. int i, j, k;
  415. uint8_t *Y, *C;
  416. int alpha, beta, betaY, betaC;
  417. int q;
  418. int mbtype[4]; ///< current macroblock and its neighbours types
  419. /**
  420. * flags indicating that macroblock can be filtered with strong filter
  421. * it is set only for intra coded MB and MB with DCs coded separately
  422. */
  423. int mb_strong[4];
  424. int clip[4]; ///< MB filter clipping value calculated from filtering strength
  425. /**
  426. * coded block patterns for luma part of current macroblock and its neighbours
  427. * Format:
  428. * LSB corresponds to the top left block,
  429. * each nibble represents one row of subblocks.
  430. */
  431. int cbp[4];
  432. /**
  433. * coded block patterns for chroma part of current macroblock and its neighbours
  434. * Format is the same as for luma with two subblocks in a row.
  435. */
  436. int uvcbp[4][2];
  437. /**
  438. * This mask represents the pattern of luma subblocks that should be filtered
  439. * in addition to the coded ones because because they lie at the edge of
  440. * 8x8 block with different enough motion vectors
  441. */
  442. int mvmasks[4];
  443. mb_pos = row * s->mb_stride;
  444. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  445. int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
  446. if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
  447. r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
  448. if(IS_INTRA(mbtype))
  449. r->cbp_chroma[mb_pos] = 0xFF;
  450. }
  451. mb_pos = row * s->mb_stride;
  452. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  453. int y_h_deblock, y_v_deblock;
  454. int c_v_deblock[2], c_h_deblock[2];
  455. int clip_left;
  456. int avail[4];
  457. int y_to_deblock, c_to_deblock[2];
  458. q = s->current_picture_ptr->f.qscale_table[mb_pos];
  459. alpha = rv40_alpha_tab[q];
  460. beta = rv40_beta_tab [q];
  461. betaY = betaC = beta * 3;
  462. if(s->width * s->height <= 176*144)
  463. betaY += beta;
  464. avail[0] = 1;
  465. avail[1] = row;
  466. avail[2] = mb_x;
  467. avail[3] = row < s->mb_height - 1;
  468. for(i = 0; i < 4; i++){
  469. if(avail[i]){
  470. int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
  471. mvmasks[i] = r->deblock_coefs[pos];
  472. mbtype [i] = s->current_picture_ptr->f.mb_type[pos];
  473. cbp [i] = r->cbp_luma[pos];
  474. uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
  475. uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
  476. }else{
  477. mvmasks[i] = 0;
  478. mbtype [i] = mbtype[0];
  479. cbp [i] = 0;
  480. uvcbp[i][0] = uvcbp[i][1] = 0;
  481. }
  482. mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
  483. clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
  484. }
  485. y_to_deblock = mvmasks[POS_CUR]
  486. | (mvmasks[POS_BOTTOM] << 16);
  487. /* This pattern contains bits signalling that horizontal edges of
  488. * the current block can be filtered.
  489. * That happens when either of adjacent subblocks is coded or lies on
  490. * the edge of 8x8 blocks with motion vectors differing by more than
  491. * 3/4 pel in any component (any edge orientation for some reason).
  492. */
  493. y_h_deblock = y_to_deblock
  494. | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
  495. | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
  496. /* This pattern contains bits signalling that vertical edges of
  497. * the current block can be filtered.
  498. * That happens when either of adjacent subblocks is coded or lies on
  499. * the edge of 8x8 blocks with motion vectors differing by more than
  500. * 3/4 pel in any component (any edge orientation for some reason).
  501. */
  502. y_v_deblock = y_to_deblock
  503. | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
  504. | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
  505. if(!mb_x)
  506. y_v_deblock &= ~MASK_Y_LEFT_COL;
  507. if(!row)
  508. y_h_deblock &= ~MASK_Y_TOP_ROW;
  509. if(row == s->mb_height - 1 || (mb_strong[POS_CUR] || mb_strong[POS_BOTTOM]))
  510. y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
  511. /* Calculating chroma patterns is similar and easier since there is
  512. * no motion vector pattern for them.
  513. */
  514. for(i = 0; i < 2; i++){
  515. c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
  516. c_v_deblock[i] = c_to_deblock[i]
  517. | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
  518. | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
  519. c_h_deblock[i] = c_to_deblock[i]
  520. | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
  521. | (uvcbp[POS_CUR][i] << 2);
  522. if(!mb_x)
  523. c_v_deblock[i] &= ~MASK_C_LEFT_COL;
  524. if(!row)
  525. c_h_deblock[i] &= ~MASK_C_TOP_ROW;
  526. if(row == s->mb_height - 1 || mb_strong[POS_CUR] || mb_strong[POS_BOTTOM])
  527. c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
  528. }
  529. for(j = 0; j < 16; j += 4){
  530. Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
  531. for(i = 0; i < 4; i++, Y += 4){
  532. int ij = i + j;
  533. int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  534. int dither = j ? ij : i*4;
  535. // if bottom block is coded then we can filter its top edge
  536. // (or bottom edge of this block, which is the same)
  537. if(y_h_deblock & (MASK_BOTTOM << ij)){
  538. rv40_h_loop_filter(Y+4*s->linesize, s->linesize, dither,
  539. y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
  540. clip_cur,
  541. alpha, beta, betaY, 0, 0);
  542. }
  543. // filter left block edge in ordinary mode (with low filtering strength)
  544. if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
  545. if(!i)
  546. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  547. else
  548. clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  549. rv40_v_loop_filter(Y, s->linesize, dither,
  550. clip_cur,
  551. clip_left,
  552. alpha, beta, betaY, 0, 0);
  553. }
  554. // filter top edge of the current macroblock when filtering strength is high
  555. if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
  556. rv40_h_loop_filter(Y, s->linesize, dither,
  557. clip_cur,
  558. mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
  559. alpha, beta, betaY, 0, 1);
  560. }
  561. // filter left block edge in edge mode (with high filtering strength)
  562. if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
  563. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  564. rv40_v_loop_filter(Y, s->linesize, dither,
  565. clip_cur,
  566. clip_left,
  567. alpha, beta, betaY, 0, 1);
  568. }
  569. }
  570. }
  571. for(k = 0; k < 2; k++){
  572. for(j = 0; j < 2; j++){
  573. C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
  574. for(i = 0; i < 2; i++, C += 4){
  575. int ij = i + j*2;
  576. int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  577. if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
  578. int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
  579. rv40_h_loop_filter(C+4*s->uvlinesize, s->uvlinesize, i*8,
  580. clip_bot,
  581. clip_cur,
  582. alpha, beta, betaC, 1, 0);
  583. }
  584. if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] || mb_strong[POS_LEFT]))){
  585. if(!i)
  586. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  587. else
  588. clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  589. rv40_v_loop_filter(C, s->uvlinesize, j*8,
  590. clip_cur,
  591. clip_left,
  592. alpha, beta, betaC, 1, 0);
  593. }
  594. if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] || mb_strong[POS_TOP])){
  595. int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
  596. rv40_h_loop_filter(C, s->uvlinesize, i*8,
  597. clip_cur,
  598. clip_top,
  599. alpha, beta, betaC, 1, 1);
  600. }
  601. if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] || mb_strong[POS_LEFT])){
  602. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  603. rv40_v_loop_filter(C, s->uvlinesize, j*8,
  604. clip_cur,
  605. clip_left,
  606. alpha, beta, betaC, 1, 1);
  607. }
  608. }
  609. }
  610. }
  611. }
  612. }
  613. /**
  614. * Initialize decoder.
  615. */
  616. static av_cold int rv40_decode_init(AVCodecContext *avctx)
  617. {
  618. RV34DecContext *r = avctx->priv_data;
  619. r->rv30 = 0;
  620. ff_rv34_decode_init(avctx);
  621. if(!aic_top_vlc.bits)
  622. rv40_init_tables();
  623. r->parse_slice_header = rv40_parse_slice_header;
  624. r->decode_intra_types = rv40_decode_intra_types;
  625. r->decode_mb_info = rv40_decode_mb_info;
  626. r->loop_filter = rv40_loop_filter;
  627. r->luma_dc_quant_i = rv40_luma_dc_quant[0];
  628. r->luma_dc_quant_p = rv40_luma_dc_quant[1];
  629. return 0;
  630. }
  631. AVCodec ff_rv40_decoder = {
  632. .name = "rv40",
  633. .type = AVMEDIA_TYPE_VIDEO,
  634. .id = CODEC_ID_RV40,
  635. .priv_data_size = sizeof(RV34DecContext),
  636. .init = rv40_decode_init,
  637. .close = ff_rv34_decode_end,
  638. .decode = ff_rv34_decode_frame,
  639. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  640. .flush = ff_mpeg_flush,
  641. .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
  642. .pix_fmts = ff_pixfmt_list_420,
  643. };