You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

600 lines
23KB

  1. /*
  2. * RV40 decoder
  3. * Copyright (c) 2007 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RV40 decoder
  24. */
  25. #include "config.h"
  26. #include "libavutil/imgutils.h"
  27. #include "avcodec.h"
  28. #include "mpegutils.h"
  29. #include "mpegvideo.h"
  30. #include "golomb.h"
  31. #include "rv34.h"
  32. #include "rv40vlc2.h"
  33. #include "rv40data.h"
  34. static VLC aic_top_vlc;
  35. static VLC aic_mode1_vlc[AIC_MODE1_NUM], aic_mode2_vlc[AIC_MODE2_NUM];
  36. static VLC ptype_vlc[NUM_PTYPE_VLCS], btype_vlc[NUM_BTYPE_VLCS];
  37. static const int16_t mode2_offs[] = {
  38. 0, 614, 1222, 1794, 2410, 3014, 3586, 4202, 4792, 5382, 5966, 6542,
  39. 7138, 7716, 8292, 8864, 9444, 10030, 10642, 11212, 11814
  40. };
  41. /**
  42. * Initialize all tables.
  43. */
  44. static av_cold void rv40_init_tables(void)
  45. {
  46. int i;
  47. static VLC_TYPE aic_table[1 << AIC_TOP_BITS][2];
  48. static VLC_TYPE aic_mode1_table[AIC_MODE1_NUM << AIC_MODE1_BITS][2];
  49. static VLC_TYPE aic_mode2_table[11814][2];
  50. static VLC_TYPE ptype_table[NUM_PTYPE_VLCS << PTYPE_VLC_BITS][2];
  51. static VLC_TYPE btype_table[NUM_BTYPE_VLCS << BTYPE_VLC_BITS][2];
  52. aic_top_vlc.table = aic_table;
  53. aic_top_vlc.table_allocated = 1 << AIC_TOP_BITS;
  54. init_vlc(&aic_top_vlc, AIC_TOP_BITS, AIC_TOP_SIZE,
  55. rv40_aic_top_vlc_bits, 1, 1,
  56. rv40_aic_top_vlc_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
  57. for(i = 0; i < AIC_MODE1_NUM; i++){
  58. // Every tenth VLC table is empty
  59. if((i % 10) == 9) continue;
  60. aic_mode1_vlc[i].table = &aic_mode1_table[i << AIC_MODE1_BITS];
  61. aic_mode1_vlc[i].table_allocated = 1 << AIC_MODE1_BITS;
  62. init_vlc(&aic_mode1_vlc[i], AIC_MODE1_BITS, AIC_MODE1_SIZE,
  63. aic_mode1_vlc_bits[i], 1, 1,
  64. aic_mode1_vlc_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
  65. }
  66. for(i = 0; i < AIC_MODE2_NUM; i++){
  67. uint16_t syms[AIC_MODE2_SIZE];
  68. for (int j = 0; j < AIC_MODE2_SIZE; j++) {
  69. int first = aic_mode2_vlc_syms[i][j] >> 4;
  70. int second = aic_mode2_vlc_syms[i][j] & 0xF;
  71. if (HAVE_BIGENDIAN)
  72. syms[j] = (first << 8) | second;
  73. else
  74. syms[j] = first | (second << 8);
  75. }
  76. aic_mode2_vlc[i].table = &aic_mode2_table[mode2_offs[i]];
  77. aic_mode2_vlc[i].table_allocated = mode2_offs[i + 1] - mode2_offs[i];
  78. ff_init_vlc_from_lengths(&aic_mode2_vlc[i], AIC_MODE2_BITS, AIC_MODE2_SIZE,
  79. aic_mode2_vlc_bits[i], 1,
  80. syms, 2, 2, 0, INIT_VLC_USE_NEW_STATIC, NULL);
  81. }
  82. for(i = 0; i < NUM_PTYPE_VLCS; i++){
  83. ptype_vlc[i].table = &ptype_table[i << PTYPE_VLC_BITS];
  84. ptype_vlc[i].table_allocated = 1 << PTYPE_VLC_BITS;
  85. ff_init_vlc_from_lengths(&ptype_vlc[i], PTYPE_VLC_BITS, PTYPE_VLC_SIZE,
  86. &ptype_vlc_tabs[i][0][1], 2,
  87. &ptype_vlc_tabs[i][0][0], 2, 1,
  88. 0, INIT_VLC_USE_NEW_STATIC, NULL);
  89. }
  90. for(i = 0; i < NUM_BTYPE_VLCS; i++){
  91. btype_vlc[i].table = &btype_table[i << BTYPE_VLC_BITS];
  92. btype_vlc[i].table_allocated = 1 << BTYPE_VLC_BITS;
  93. ff_init_vlc_from_lengths(&btype_vlc[i], BTYPE_VLC_BITS, BTYPE_VLC_SIZE,
  94. &btype_vlc_tabs[i][0][1], 2,
  95. &btype_vlc_tabs[i][0][0], 2, 1,
  96. 0, INIT_VLC_USE_NEW_STATIC, NULL);
  97. }
  98. }
  99. /**
  100. * Get stored dimension from bitstream.
  101. *
  102. * If the width/height is the standard one then it's coded as a 3-bit index.
  103. * Otherwise it is coded as escaped 8-bit portions.
  104. */
  105. static int get_dimension(GetBitContext *gb, const int *dim)
  106. {
  107. int t = get_bits(gb, 3);
  108. int val = dim[t];
  109. if(val < 0)
  110. val = dim[get_bits1(gb) - val];
  111. if(!val){
  112. do{
  113. if (get_bits_left(gb) < 8)
  114. return AVERROR_INVALIDDATA;
  115. t = get_bits(gb, 8);
  116. val += t << 2;
  117. }while(t == 0xFF);
  118. }
  119. return val;
  120. }
  121. /**
  122. * Get encoded picture size - usually this is called from rv40_parse_slice_header.
  123. */
  124. static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
  125. {
  126. *w = get_dimension(gb, rv40_standard_widths);
  127. *h = get_dimension(gb, rv40_standard_heights);
  128. }
  129. static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
  130. {
  131. int mb_bits;
  132. int w = r->s.width, h = r->s.height;
  133. int mb_size;
  134. int ret;
  135. memset(si, 0, sizeof(SliceInfo));
  136. if(get_bits1(gb))
  137. return AVERROR_INVALIDDATA;
  138. si->type = get_bits(gb, 2);
  139. if(si->type == 1) si->type = 0;
  140. si->quant = get_bits(gb, 5);
  141. if(get_bits(gb, 2))
  142. return AVERROR_INVALIDDATA;
  143. si->vlc_set = get_bits(gb, 2);
  144. skip_bits1(gb);
  145. si->pts = get_bits(gb, 13);
  146. if(!si->type || !get_bits1(gb))
  147. rv40_parse_picture_size(gb, &w, &h);
  148. if ((ret = av_image_check_size(w, h, 0, r->s.avctx)) < 0)
  149. return ret;
  150. si->width = w;
  151. si->height = h;
  152. mb_size = ((w + 15) >> 4) * ((h + 15) >> 4);
  153. mb_bits = ff_rv34_get_start_offset(gb, mb_size);
  154. si->start = get_bits(gb, mb_bits);
  155. return 0;
  156. }
  157. /**
  158. * Decode 4x4 intra types array.
  159. */
  160. static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
  161. {
  162. MpegEncContext *s = &r->s;
  163. int i, j, k, v;
  164. int A, B, C;
  165. int pattern;
  166. int8_t *ptr;
  167. for(i = 0; i < 4; i++, dst += r->intra_types_stride){
  168. if(!i && s->first_slice_line){
  169. pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1);
  170. dst[0] = (pattern >> 2) & 2;
  171. dst[1] = (pattern >> 1) & 2;
  172. dst[2] = pattern & 2;
  173. dst[3] = (pattern << 1) & 2;
  174. continue;
  175. }
  176. ptr = dst;
  177. for(j = 0; j < 4; j++){
  178. /* Coefficients are read using VLC chosen by the prediction pattern
  179. * The first one (used for retrieving a pair of coefficients) is
  180. * constructed from the top, top right and left coefficients
  181. * The second one (used for retrieving only one coefficient) is
  182. * top + 10 * left.
  183. */
  184. A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row
  185. B = ptr[-r->intra_types_stride];
  186. C = ptr[-1];
  187. pattern = A + B * (1 << 4) + C * (1 << 8);
  188. for(k = 0; k < MODE2_PATTERNS_NUM; k++)
  189. if(pattern == rv40_aic_table_index[k])
  190. break;
  191. if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients
  192. AV_WN16(ptr, get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2));
  193. ptr += 2;
  194. j++;
  195. }else{
  196. if(B != -1 && C != -1)
  197. v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1);
  198. else{ // tricky decoding
  199. v = 0;
  200. switch(C){
  201. case -1: // code 0 -> 1, 1 -> 0
  202. if(B < 2)
  203. v = get_bits1(gb) ^ 1;
  204. break;
  205. case 0:
  206. case 2: // code 0 -> 2, 1 -> 0
  207. v = (get_bits1(gb) ^ 1) << 1;
  208. break;
  209. }
  210. }
  211. *ptr++ = v;
  212. }
  213. }
  214. }
  215. return 0;
  216. }
  217. /**
  218. * Decode macroblock information.
  219. */
  220. static int rv40_decode_mb_info(RV34DecContext *r)
  221. {
  222. MpegEncContext *s = &r->s;
  223. GetBitContext *gb = &s->gb;
  224. int q, i;
  225. int prev_type = 0;
  226. int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
  227. if(!r->s.mb_skip_run) {
  228. r->s.mb_skip_run = get_interleaved_ue_golomb(gb) + 1;
  229. if(r->s.mb_skip_run > (unsigned)s->mb_num)
  230. return -1;
  231. }
  232. if(--r->s.mb_skip_run)
  233. return RV34_MB_SKIP;
  234. if(r->avail_cache[6-4]){
  235. int blocks[RV34_MB_TYPES] = {0};
  236. int count = 0;
  237. if(r->avail_cache[6-1])
  238. blocks[r->mb_type[mb_pos - 1]]++;
  239. blocks[r->mb_type[mb_pos - s->mb_stride]]++;
  240. if(r->avail_cache[6-2])
  241. blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
  242. if(r->avail_cache[6-5])
  243. blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
  244. for(i = 0; i < RV34_MB_TYPES; i++){
  245. if(blocks[i] > count){
  246. count = blocks[i];
  247. prev_type = i;
  248. if(count>1)
  249. break;
  250. }
  251. }
  252. } else if (r->avail_cache[6-1])
  253. prev_type = r->mb_type[mb_pos - 1];
  254. if(s->pict_type == AV_PICTURE_TYPE_P){
  255. prev_type = block_num_to_ptype_vlc_num[prev_type];
  256. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  257. if(q < PBTYPE_ESCAPE)
  258. return q;
  259. q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
  260. av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
  261. }else{
  262. prev_type = block_num_to_btype_vlc_num[prev_type];
  263. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  264. if(q < PBTYPE_ESCAPE)
  265. return q;
  266. q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
  267. av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
  268. }
  269. return 0;
  270. }
  271. enum RV40BlockPos{
  272. POS_CUR,
  273. POS_TOP,
  274. POS_LEFT,
  275. POS_BOTTOM,
  276. };
  277. #define MASK_CUR 0x0001
  278. #define MASK_RIGHT 0x0008
  279. #define MASK_BOTTOM 0x0010
  280. #define MASK_TOP 0x1000
  281. #define MASK_Y_TOP_ROW 0x000F
  282. #define MASK_Y_LAST_ROW 0xF000
  283. #define MASK_Y_LEFT_COL 0x1111
  284. #define MASK_Y_RIGHT_COL 0x8888
  285. #define MASK_C_TOP_ROW 0x0003
  286. #define MASK_C_LAST_ROW 0x000C
  287. #define MASK_C_LEFT_COL 0x0005
  288. #define MASK_C_RIGHT_COL 0x000A
  289. static const int neighbour_offs_x[4] = { 0, 0, -1, 0 };
  290. static const int neighbour_offs_y[4] = { 0, -1, 0, 1 };
  291. static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp,
  292. uint8_t *src, int stride, int dmode,
  293. int lim_q1, int lim_p1,
  294. int alpha, int beta, int beta2,
  295. int chroma, int edge, int dir)
  296. {
  297. int filter_p1, filter_q1;
  298. int strong;
  299. int lims;
  300. strong = rdsp->rv40_loop_filter_strength[dir](src, stride, beta, beta2,
  301. edge, &filter_p1, &filter_q1);
  302. lims = filter_p1 + filter_q1 + ((lim_q1 + lim_p1) >> 1) + 1;
  303. if (strong) {
  304. rdsp->rv40_strong_loop_filter[dir](src, stride, alpha,
  305. lims, dmode, chroma);
  306. } else if (filter_p1 & filter_q1) {
  307. rdsp->rv40_weak_loop_filter[dir](src, stride, 1, 1, alpha, beta,
  308. lims, lim_q1, lim_p1);
  309. } else if (filter_p1 | filter_q1) {
  310. rdsp->rv40_weak_loop_filter[dir](src, stride, filter_p1, filter_q1,
  311. alpha, beta, lims >> 1, lim_q1 >> 1,
  312. lim_p1 >> 1);
  313. }
  314. }
  315. /**
  316. * RV40 loop filtering function
  317. */
  318. static void rv40_loop_filter(RV34DecContext *r, int row)
  319. {
  320. MpegEncContext *s = &r->s;
  321. int mb_pos, mb_x;
  322. int i, j, k;
  323. uint8_t *Y, *C;
  324. int alpha, beta, betaY, betaC;
  325. int q;
  326. int mbtype[4]; ///< current macroblock and its neighbours types
  327. /**
  328. * flags indicating that macroblock can be filtered with strong filter
  329. * it is set only for intra coded MB and MB with DCs coded separately
  330. */
  331. int mb_strong[4];
  332. int clip[4]; ///< MB filter clipping value calculated from filtering strength
  333. /**
  334. * coded block patterns for luma part of current macroblock and its neighbours
  335. * Format:
  336. * LSB corresponds to the top left block,
  337. * each nibble represents one row of subblocks.
  338. */
  339. int cbp[4];
  340. /**
  341. * coded block patterns for chroma part of current macroblock and its neighbours
  342. * Format is the same as for luma with two subblocks in a row.
  343. */
  344. int uvcbp[4][2];
  345. /**
  346. * This mask represents the pattern of luma subblocks that should be filtered
  347. * in addition to the coded ones because they lie at the edge of
  348. * 8x8 block with different enough motion vectors
  349. */
  350. unsigned mvmasks[4];
  351. mb_pos = row * s->mb_stride;
  352. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  353. int mbtype = s->current_picture_ptr->mb_type[mb_pos];
  354. if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
  355. r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
  356. if(IS_INTRA(mbtype))
  357. r->cbp_chroma[mb_pos] = 0xFF;
  358. }
  359. mb_pos = row * s->mb_stride;
  360. for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
  361. int y_h_deblock, y_v_deblock;
  362. int c_v_deblock[2], c_h_deblock[2];
  363. int clip_left;
  364. int avail[4];
  365. unsigned y_to_deblock;
  366. int c_to_deblock[2];
  367. q = s->current_picture_ptr->qscale_table[mb_pos];
  368. alpha = rv40_alpha_tab[q];
  369. beta = rv40_beta_tab [q];
  370. betaY = betaC = beta * 3;
  371. if(s->width * s->height <= 176*144)
  372. betaY += beta;
  373. avail[0] = 1;
  374. avail[1] = row;
  375. avail[2] = mb_x;
  376. avail[3] = row < s->mb_height - 1;
  377. for(i = 0; i < 4; i++){
  378. if(avail[i]){
  379. int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
  380. mvmasks[i] = r->deblock_coefs[pos];
  381. mbtype [i] = s->current_picture_ptr->mb_type[pos];
  382. cbp [i] = r->cbp_luma[pos];
  383. uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
  384. uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
  385. }else{
  386. mvmasks[i] = 0;
  387. mbtype [i] = mbtype[0];
  388. cbp [i] = 0;
  389. uvcbp[i][0] = uvcbp[i][1] = 0;
  390. }
  391. mb_strong[i] = IS_INTRA(mbtype[i]) || IS_SEPARATE_DC(mbtype[i]);
  392. clip[i] = rv40_filter_clip_tbl[mb_strong[i] + 1][q];
  393. }
  394. y_to_deblock = mvmasks[POS_CUR]
  395. | (mvmasks[POS_BOTTOM] << 16);
  396. /* This pattern contains bits signalling that horizontal edges of
  397. * the current block can be filtered.
  398. * That happens when either of adjacent subblocks is coded or lies on
  399. * the edge of 8x8 blocks with motion vectors differing by more than
  400. * 3/4 pel in any component (any edge orientation for some reason).
  401. */
  402. y_h_deblock = y_to_deblock
  403. | ((cbp[POS_CUR] << 4) & ~MASK_Y_TOP_ROW)
  404. | ((cbp[POS_TOP] & MASK_Y_LAST_ROW) >> 12);
  405. /* This pattern contains bits signalling that vertical edges of
  406. * the current block can be filtered.
  407. * That happens when either of adjacent subblocks is coded or lies on
  408. * the edge of 8x8 blocks with motion vectors differing by more than
  409. * 3/4 pel in any component (any edge orientation for some reason).
  410. */
  411. y_v_deblock = y_to_deblock
  412. | ((cbp[POS_CUR] << 1) & ~MASK_Y_LEFT_COL)
  413. | ((cbp[POS_LEFT] & MASK_Y_RIGHT_COL) >> 3);
  414. if(!mb_x)
  415. y_v_deblock &= ~MASK_Y_LEFT_COL;
  416. if(!row)
  417. y_h_deblock &= ~MASK_Y_TOP_ROW;
  418. if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
  419. y_h_deblock &= ~(MASK_Y_TOP_ROW << 16);
  420. /* Calculating chroma patterns is similar and easier since there is
  421. * no motion vector pattern for them.
  422. */
  423. for(i = 0; i < 2; i++){
  424. c_to_deblock[i] = (uvcbp[POS_BOTTOM][i] << 4) | uvcbp[POS_CUR][i];
  425. c_v_deblock[i] = c_to_deblock[i]
  426. | ((uvcbp[POS_CUR] [i] << 1) & ~MASK_C_LEFT_COL)
  427. | ((uvcbp[POS_LEFT][i] & MASK_C_RIGHT_COL) >> 1);
  428. c_h_deblock[i] = c_to_deblock[i]
  429. | ((uvcbp[POS_TOP][i] & MASK_C_LAST_ROW) >> 2)
  430. | (uvcbp[POS_CUR][i] << 2);
  431. if(!mb_x)
  432. c_v_deblock[i] &= ~MASK_C_LEFT_COL;
  433. if(!row)
  434. c_h_deblock[i] &= ~MASK_C_TOP_ROW;
  435. if(row == s->mb_height - 1 || (mb_strong[POS_CUR] | mb_strong[POS_BOTTOM]))
  436. c_h_deblock[i] &= ~(MASK_C_TOP_ROW << 4);
  437. }
  438. for(j = 0; j < 16; j += 4){
  439. Y = s->current_picture_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
  440. for(i = 0; i < 4; i++, Y += 4){
  441. int ij = i + j;
  442. int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  443. int dither = j ? ij : i*4;
  444. // if bottom block is coded then we can filter its top edge
  445. // (or bottom edge of this block, which is the same)
  446. if(y_h_deblock & (MASK_BOTTOM << ij)){
  447. rv40_adaptive_loop_filter(&r->rdsp, Y+4*s->linesize,
  448. s->linesize, dither,
  449. y_to_deblock & (MASK_BOTTOM << ij) ? clip[POS_CUR] : 0,
  450. clip_cur, alpha, beta, betaY,
  451. 0, 0, 0);
  452. }
  453. // filter left block edge in ordinary mode (with low filtering strength)
  454. if(y_v_deblock & (MASK_CUR << ij) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
  455. if(!i)
  456. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  457. else
  458. clip_left = y_to_deblock & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  459. rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
  460. clip_cur,
  461. clip_left,
  462. alpha, beta, betaY, 0, 0, 1);
  463. }
  464. // filter top edge of the current macroblock when filtering strength is high
  465. if(!j && y_h_deblock & (MASK_CUR << i) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
  466. rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
  467. clip_cur,
  468. mvmasks[POS_TOP] & (MASK_TOP << i) ? clip[POS_TOP] : 0,
  469. alpha, beta, betaY, 0, 1, 0);
  470. }
  471. // filter left block edge in edge mode (with high filtering strength)
  472. if(y_v_deblock & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
  473. clip_left = mvmasks[POS_LEFT] & (MASK_RIGHT << j) ? clip[POS_LEFT] : 0;
  474. rv40_adaptive_loop_filter(&r->rdsp, Y, s->linesize, dither,
  475. clip_cur,
  476. clip_left,
  477. alpha, beta, betaY, 0, 1, 1);
  478. }
  479. }
  480. }
  481. for(k = 0; k < 2; k++){
  482. for(j = 0; j < 2; j++){
  483. C = s->current_picture_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
  484. for(i = 0; i < 2; i++, C += 4){
  485. int ij = i + j*2;
  486. int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
  487. if(c_h_deblock[k] & (MASK_CUR << (ij+2))){
  488. int clip_bot = c_to_deblock[k] & (MASK_CUR << (ij+2)) ? clip[POS_CUR] : 0;
  489. rv40_adaptive_loop_filter(&r->rdsp, C+4*s->uvlinesize, s->uvlinesize, i*8,
  490. clip_bot,
  491. clip_cur,
  492. alpha, beta, betaC, 1, 0, 0);
  493. }
  494. if((c_v_deblock[k] & (MASK_CUR << ij)) && (i || !(mb_strong[POS_CUR] | mb_strong[POS_LEFT]))){
  495. if(!i)
  496. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  497. else
  498. clip_left = c_to_deblock[k] & (MASK_CUR << (ij-1)) ? clip[POS_CUR] : 0;
  499. rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
  500. clip_cur,
  501. clip_left,
  502. alpha, beta, betaC, 1, 0, 1);
  503. }
  504. if(!j && c_h_deblock[k] & (MASK_CUR << ij) && (mb_strong[POS_CUR] | mb_strong[POS_TOP])){
  505. int clip_top = uvcbp[POS_TOP][k] & (MASK_CUR << (ij+2)) ? clip[POS_TOP] : 0;
  506. rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, i*8,
  507. clip_cur,
  508. clip_top,
  509. alpha, beta, betaC, 1, 1, 0);
  510. }
  511. if(c_v_deblock[k] & (MASK_CUR << ij) && !i && (mb_strong[POS_CUR] | mb_strong[POS_LEFT])){
  512. clip_left = uvcbp[POS_LEFT][k] & (MASK_CUR << (2*j+1)) ? clip[POS_LEFT] : 0;
  513. rv40_adaptive_loop_filter(&r->rdsp, C, s->uvlinesize, j*8,
  514. clip_cur,
  515. clip_left,
  516. alpha, beta, betaC, 1, 1, 1);
  517. }
  518. }
  519. }
  520. }
  521. }
  522. }
  523. /**
  524. * Initialize decoder.
  525. */
  526. static av_cold int rv40_decode_init(AVCodecContext *avctx)
  527. {
  528. RV34DecContext *r = avctx->priv_data;
  529. int ret;
  530. r->rv30 = 0;
  531. if ((ret = ff_rv34_decode_init(avctx)) < 0)
  532. return ret;
  533. if(!aic_top_vlc.bits)
  534. rv40_init_tables();
  535. r->parse_slice_header = rv40_parse_slice_header;
  536. r->decode_intra_types = rv40_decode_intra_types;
  537. r->decode_mb_info = rv40_decode_mb_info;
  538. r->loop_filter = rv40_loop_filter;
  539. r->luma_dc_quant_i = rv40_luma_dc_quant[0];
  540. r->luma_dc_quant_p = rv40_luma_dc_quant[1];
  541. return 0;
  542. }
  543. AVCodec ff_rv40_decoder = {
  544. .name = "rv40",
  545. .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
  546. .type = AVMEDIA_TYPE_VIDEO,
  547. .id = AV_CODEC_ID_RV40,
  548. .priv_data_size = sizeof(RV34DecContext),
  549. .init = rv40_decode_init,
  550. .close = ff_rv34_decode_end,
  551. .decode = ff_rv34_decode_frame,
  552. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
  553. AV_CODEC_CAP_FRAME_THREADS,
  554. .flush = ff_mpeg_flush,
  555. .pix_fmts = (const enum AVPixelFormat[]) {
  556. AV_PIX_FMT_YUV420P,
  557. AV_PIX_FMT_NONE
  558. },
  559. .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_rv34_decode_update_thread_context),
  560. .caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
  561. };