You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4107 lines
161KB

  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "internal.h"
  26. #include "thread.h"
  27. #include "videodsp.h"
  28. #include "vp56.h"
  29. #include "vp9.h"
  30. #include "vp9data.h"
  31. #include "vp9dsp.h"
  32. #include "libavutil/avassert.h"
  33. #define VP9_SYNCCODE 0x498342
  34. enum CompPredMode {
  35. PRED_SINGLEREF,
  36. PRED_COMPREF,
  37. PRED_SWITCHABLE,
  38. };
  39. enum BlockLevel {
  40. BL_64X64,
  41. BL_32X32,
  42. BL_16X16,
  43. BL_8X8,
  44. };
  45. enum BlockSize {
  46. BS_64x64,
  47. BS_64x32,
  48. BS_32x64,
  49. BS_32x32,
  50. BS_32x16,
  51. BS_16x32,
  52. BS_16x16,
  53. BS_16x8,
  54. BS_8x16,
  55. BS_8x8,
  56. BS_8x4,
  57. BS_4x8,
  58. BS_4x4,
  59. N_BS_SIZES,
  60. };
  61. struct VP9mvrefPair {
  62. VP56mv mv[2];
  63. int8_t ref[2];
  64. };
  65. typedef struct VP9Frame {
  66. ThreadFrame tf;
  67. AVBufferRef *extradata;
  68. uint8_t *segmentation_map;
  69. struct VP9mvrefPair *mv;
  70. int uses_2pass;
  71. } VP9Frame;
  72. struct VP9Filter {
  73. uint8_t level[8 * 8];
  74. uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
  75. [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
  76. };
  77. typedef struct VP9Block {
  78. uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
  79. enum FilterMode filter;
  80. VP56mv mv[4 /* b_idx */][2 /* ref */];
  81. enum BlockSize bs;
  82. enum TxfmMode tx, uvtx;
  83. enum BlockLevel bl;
  84. enum BlockPartition bp;
  85. } VP9Block;
  86. typedef struct VP9Context {
  87. VP9DSPContext dsp;
  88. VideoDSPContext vdsp;
  89. GetBitContext gb;
  90. VP56RangeCoder c;
  91. VP56RangeCoder *c_b;
  92. unsigned c_b_size;
  93. VP9Block *b_base, *b;
  94. int pass;
  95. int row, row7, col, col7;
  96. uint8_t *dst[3];
  97. ptrdiff_t y_stride, uv_stride;
  98. // bitstream header
  99. uint8_t profile;
  100. uint8_t keyframe, last_keyframe;
  101. uint8_t invisible;
  102. uint8_t use_last_frame_mvs;
  103. uint8_t errorres;
  104. uint8_t colorspace;
  105. uint8_t fullrange;
  106. uint8_t intraonly;
  107. uint8_t resetctx;
  108. uint8_t refreshrefmask;
  109. uint8_t highprecisionmvs;
  110. enum FilterMode filtermode;
  111. uint8_t allowcompinter;
  112. uint8_t fixcompref;
  113. uint8_t refreshctx;
  114. uint8_t parallelmode;
  115. uint8_t framectxid;
  116. uint8_t refidx[3];
  117. uint8_t signbias[3];
  118. uint8_t varcompref[2];
  119. ThreadFrame refs[8], next_refs[8];
  120. #define CUR_FRAME 0
  121. #define REF_FRAME_MVPAIR 1
  122. #define REF_FRAME_SEGMAP 2
  123. VP9Frame frames[3];
  124. struct {
  125. uint8_t level;
  126. int8_t sharpness;
  127. uint8_t lim_lut[64];
  128. uint8_t mblim_lut[64];
  129. } filter;
  130. struct {
  131. uint8_t enabled;
  132. int8_t mode[2];
  133. int8_t ref[4];
  134. } lf_delta;
  135. uint8_t yac_qi;
  136. int8_t ydc_qdelta, uvdc_qdelta, uvac_qdelta;
  137. uint8_t lossless;
  138. #define MAX_SEGMENT 8
  139. struct {
  140. uint8_t enabled;
  141. uint8_t temporal;
  142. uint8_t absolute_vals;
  143. uint8_t update_map;
  144. struct {
  145. uint8_t q_enabled;
  146. uint8_t lf_enabled;
  147. uint8_t ref_enabled;
  148. uint8_t skip_enabled;
  149. uint8_t ref_val;
  150. int16_t q_val;
  151. int8_t lf_val;
  152. int16_t qmul[2][2];
  153. uint8_t lflvl[4][2];
  154. } feat[MAX_SEGMENT];
  155. } segmentation;
  156. struct {
  157. unsigned log2_tile_cols, log2_tile_rows;
  158. unsigned tile_cols, tile_rows;
  159. unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  160. } tiling;
  161. unsigned sb_cols, sb_rows, rows, cols;
  162. struct {
  163. prob_context p;
  164. uint8_t coef[4][2][2][6][6][3];
  165. } prob_ctx[4];
  166. struct {
  167. prob_context p;
  168. uint8_t coef[4][2][2][6][6][11];
  169. uint8_t seg[7];
  170. uint8_t segpred[3];
  171. } prob;
  172. struct {
  173. unsigned y_mode[4][10];
  174. unsigned uv_mode[10][10];
  175. unsigned filter[4][3];
  176. unsigned mv_mode[7][4];
  177. unsigned intra[4][2];
  178. unsigned comp[5][2];
  179. unsigned single_ref[5][2][2];
  180. unsigned comp_ref[5][2];
  181. unsigned tx32p[2][4];
  182. unsigned tx16p[2][3];
  183. unsigned tx8p[2][2];
  184. unsigned skip[3][2];
  185. unsigned mv_joint[4];
  186. struct {
  187. unsigned sign[2];
  188. unsigned classes[11];
  189. unsigned class0[2];
  190. unsigned bits[10][2];
  191. unsigned class0_fp[2][4];
  192. unsigned fp[4];
  193. unsigned class0_hp[2];
  194. unsigned hp[2];
  195. } mv_comp[2];
  196. unsigned partition[4][4][4];
  197. unsigned coef[4][2][2][6][6][3];
  198. unsigned eob[4][2][2][6][6][2];
  199. } counts;
  200. enum TxfmMode txfmmode;
  201. enum CompPredMode comppredmode;
  202. // contextual (left/above) cache
  203. DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
  204. DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
  205. DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
  206. DECLARE_ALIGNED(8, uint8_t, left_uv_nnz_ctx)[2][8];
  207. DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
  208. DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
  209. DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
  210. DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
  211. DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
  212. DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
  213. DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
  214. DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
  215. uint8_t *above_partition_ctx;
  216. uint8_t *above_mode_ctx;
  217. // FIXME maybe merge some of the below in a flags field?
  218. uint8_t *above_y_nnz_ctx;
  219. uint8_t *above_uv_nnz_ctx[2];
  220. uint8_t *above_skip_ctx; // 1bit
  221. uint8_t *above_txfm_ctx; // 2bit
  222. uint8_t *above_segpred_ctx; // 1bit
  223. uint8_t *above_intra_ctx; // 1bit
  224. uint8_t *above_comp_ctx; // 1bit
  225. uint8_t *above_ref_ctx; // 2bit
  226. uint8_t *above_filter_ctx;
  227. VP56mv (*above_mv_ctx)[2];
  228. // whole-frame cache
  229. uint8_t *intra_pred_data[3];
  230. struct VP9Filter *lflvl;
  231. DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[71*80];
  232. // block reconstruction intermediates
  233. int block_alloc_using_2pass;
  234. int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
  235. uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
  236. struct { int x, y; } min_mv, max_mv;
  237. DECLARE_ALIGNED(32, uint8_t, tmp_y)[64*64];
  238. DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][32*32];
  239. } VP9Context;
  240. static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
  241. {
  242. { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
  243. { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
  244. }, {
  245. { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
  246. { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
  247. }
  248. };
  249. static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
  250. {
  251. VP9Context *s = ctx->priv_data;
  252. int ret, sz;
  253. if ((ret = ff_thread_get_buffer(ctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
  254. return ret;
  255. sz = 64 * s->sb_cols * s->sb_rows;
  256. if (!(f->extradata = av_buffer_allocz(sz * (1 + sizeof(struct VP9mvrefPair))))) {
  257. ff_thread_release_buffer(ctx, &f->tf);
  258. return AVERROR(ENOMEM);
  259. }
  260. f->segmentation_map = f->extradata->data;
  261. f->mv = (struct VP9mvrefPair *) (f->extradata->data + sz);
  262. return 0;
  263. }
  264. static void vp9_unref_frame(AVCodecContext *ctx, VP9Frame *f)
  265. {
  266. ff_thread_release_buffer(ctx, &f->tf);
  267. av_buffer_unref(&f->extradata);
  268. }
  269. static int vp9_ref_frame(AVCodecContext *ctx, VP9Frame *dst, VP9Frame *src)
  270. {
  271. int res;
  272. if ((res = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0) {
  273. return res;
  274. } else if (!(dst->extradata = av_buffer_ref(src->extradata))) {
  275. vp9_unref_frame(ctx, dst);
  276. return AVERROR(ENOMEM);
  277. }
  278. dst->segmentation_map = src->segmentation_map;
  279. dst->mv = src->mv;
  280. dst->uses_2pass = src->uses_2pass;
  281. return 0;
  282. }
  283. static int update_size(AVCodecContext *ctx, int w, int h)
  284. {
  285. VP9Context *s = ctx->priv_data;
  286. uint8_t *p;
  287. av_assert0(w > 0 && h > 0);
  288. if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height)
  289. return 0;
  290. ctx->width = w;
  291. ctx->height = h;
  292. s->sb_cols = (w + 63) >> 6;
  293. s->sb_rows = (h + 63) >> 6;
  294. s->cols = (w + 7) >> 3;
  295. s->rows = (h + 7) >> 3;
  296. #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
  297. av_freep(&s->intra_pred_data[0]);
  298. p = av_malloc(s->sb_cols * (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
  299. if (!p)
  300. return AVERROR(ENOMEM);
  301. assign(s->intra_pred_data[0], uint8_t *, 64);
  302. assign(s->intra_pred_data[1], uint8_t *, 32);
  303. assign(s->intra_pred_data[2], uint8_t *, 32);
  304. assign(s->above_y_nnz_ctx, uint8_t *, 16);
  305. assign(s->above_mode_ctx, uint8_t *, 16);
  306. assign(s->above_mv_ctx, VP56mv(*)[2], 16);
  307. assign(s->above_partition_ctx, uint8_t *, 8);
  308. assign(s->above_skip_ctx, uint8_t *, 8);
  309. assign(s->above_txfm_ctx, uint8_t *, 8);
  310. assign(s->above_uv_nnz_ctx[0], uint8_t *, 8);
  311. assign(s->above_uv_nnz_ctx[1], uint8_t *, 8);
  312. assign(s->above_segpred_ctx, uint8_t *, 8);
  313. assign(s->above_intra_ctx, uint8_t *, 8);
  314. assign(s->above_comp_ctx, uint8_t *, 8);
  315. assign(s->above_ref_ctx, uint8_t *, 8);
  316. assign(s->above_filter_ctx, uint8_t *, 8);
  317. assign(s->lflvl, struct VP9Filter *, 1);
  318. #undef assign
  319. // these will be re-allocated a little later
  320. av_freep(&s->b_base);
  321. av_freep(&s->block_base);
  322. return 0;
  323. }
  324. static int update_block_buffers(AVCodecContext *ctx)
  325. {
  326. VP9Context *s = ctx->priv_data;
  327. if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->frames[CUR_FRAME].uses_2pass)
  328. return 0;
  329. av_free(s->b_base);
  330. av_free(s->block_base);
  331. if (s->frames[CUR_FRAME].uses_2pass) {
  332. int sbs = s->sb_cols * s->sb_rows;
  333. s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
  334. s->block_base = av_mallocz((64 * 64 + 128) * sbs * 3);
  335. if (!s->b_base || !s->block_base)
  336. return AVERROR(ENOMEM);
  337. s->uvblock_base[0] = s->block_base + sbs * 64 * 64;
  338. s->uvblock_base[1] = s->uvblock_base[0] + sbs * 32 * 32;
  339. s->eob_base = (uint8_t *) (s->uvblock_base[1] + sbs * 32 * 32);
  340. s->uveob_base[0] = s->eob_base + 256 * sbs;
  341. s->uveob_base[1] = s->uveob_base[0] + 64 * sbs;
  342. } else {
  343. s->b_base = av_malloc(sizeof(VP9Block));
  344. s->block_base = av_mallocz((64 * 64 + 128) * 3);
  345. if (!s->b_base || !s->block_base)
  346. return AVERROR(ENOMEM);
  347. s->uvblock_base[0] = s->block_base + 64 * 64;
  348. s->uvblock_base[1] = s->uvblock_base[0] + 32 * 32;
  349. s->eob_base = (uint8_t *) (s->uvblock_base[1] + 32 * 32);
  350. s->uveob_base[0] = s->eob_base + 256;
  351. s->uveob_base[1] = s->uveob_base[0] + 64;
  352. }
  353. s->block_alloc_using_2pass = s->frames[CUR_FRAME].uses_2pass;
  354. return 0;
  355. }
  356. // for some reason the sign bit is at the end, not the start, of a bit sequence
  357. static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
  358. {
  359. int v = get_bits(gb, n);
  360. return get_bits1(gb) ? -v : v;
  361. }
  362. static av_always_inline int inv_recenter_nonneg(int v, int m)
  363. {
  364. return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
  365. }
  366. // differential forward probability updates
  367. static int update_prob(VP56RangeCoder *c, int p)
  368. {
  369. static const int inv_map_table[254] = {
  370. 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
  371. 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
  372. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
  373. 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
  374. 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
  375. 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  376. 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
  377. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
  378. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
  379. 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
  380. 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
  381. 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
  382. 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
  383. 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
  384. 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
  385. 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
  386. 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
  387. 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
  388. 252, 253,
  389. };
  390. int d;
  391. /* This code is trying to do a differential probability update. For a
  392. * current probability A in the range [1, 255], the difference to a new
  393. * probability of any value can be expressed differentially as 1-A,255-A
  394. * where some part of this (absolute range) exists both in positive as
  395. * well as the negative part, whereas another part only exists in one
  396. * half. We're trying to code this shared part differentially, i.e.
  397. * times two where the value of the lowest bit specifies the sign, and
  398. * the single part is then coded on top of this. This absolute difference
  399. * then again has a value of [0,254], but a bigger value in this range
  400. * indicates that we're further away from the original value A, so we
  401. * can code this as a VLC code, since higher values are increasingly
  402. * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
  403. * updates vs. the 'fine, exact' updates further down the range, which
  404. * adds one extra dimension to this differential update model. */
  405. if (!vp8_rac_get(c)) {
  406. d = vp8_rac_get_uint(c, 4) + 0;
  407. } else if (!vp8_rac_get(c)) {
  408. d = vp8_rac_get_uint(c, 4) + 16;
  409. } else if (!vp8_rac_get(c)) {
  410. d = vp8_rac_get_uint(c, 5) + 32;
  411. } else {
  412. d = vp8_rac_get_uint(c, 7);
  413. if (d >= 65)
  414. d = (d << 1) - 65 + vp8_rac_get(c);
  415. d += 64;
  416. }
  417. return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
  418. 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
  419. }
  420. static int decode_frame_header(AVCodecContext *ctx,
  421. const uint8_t *data, int size, int *ref)
  422. {
  423. VP9Context *s = ctx->priv_data;
  424. int c, i, j, k, l, m, n, w, h, max, size2, res, sharp;
  425. int last_invisible;
  426. const uint8_t *data2;
  427. /* general header */
  428. if ((res = init_get_bits8(&s->gb, data, size)) < 0) {
  429. av_log(ctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
  430. return res;
  431. }
  432. if (get_bits(&s->gb, 2) != 0x2) { // frame marker
  433. av_log(ctx, AV_LOG_ERROR, "Invalid frame marker\n");
  434. return AVERROR_INVALIDDATA;
  435. }
  436. s->profile = get_bits1(&s->gb);
  437. if (get_bits1(&s->gb)) { // reserved bit
  438. av_log(ctx, AV_LOG_ERROR, "Reserved bit should be zero\n");
  439. return AVERROR_INVALIDDATA;
  440. }
  441. if (get_bits1(&s->gb)) {
  442. *ref = get_bits(&s->gb, 3);
  443. return 0;
  444. }
  445. s->last_keyframe = s->keyframe;
  446. s->keyframe = !get_bits1(&s->gb);
  447. last_invisible = s->invisible;
  448. s->invisible = !get_bits1(&s->gb);
  449. s->errorres = get_bits1(&s->gb);
  450. s->use_last_frame_mvs = !s->errorres && !last_invisible;
  451. if (s->keyframe) {
  452. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  453. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  454. return AVERROR_INVALIDDATA;
  455. }
  456. s->colorspace = get_bits(&s->gb, 3);
  457. if (s->colorspace == 7) { // RGB = profile 1
  458. av_log(ctx, AV_LOG_ERROR, "RGB not supported in profile 0\n");
  459. return AVERROR_INVALIDDATA;
  460. }
  461. s->fullrange = get_bits1(&s->gb);
  462. // for profile 1, here follows the subsampling bits
  463. s->refreshrefmask = 0xff;
  464. w = get_bits(&s->gb, 16) + 1;
  465. h = get_bits(&s->gb, 16) + 1;
  466. if (get_bits1(&s->gb)) // display size
  467. skip_bits(&s->gb, 32);
  468. } else {
  469. s->intraonly = s->invisible ? get_bits1(&s->gb) : 0;
  470. s->resetctx = s->errorres ? 0 : get_bits(&s->gb, 2);
  471. if (s->intraonly) {
  472. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  473. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  474. return AVERROR_INVALIDDATA;
  475. }
  476. s->refreshrefmask = get_bits(&s->gb, 8);
  477. w = get_bits(&s->gb, 16) + 1;
  478. h = get_bits(&s->gb, 16) + 1;
  479. if (get_bits1(&s->gb)) // display size
  480. skip_bits(&s->gb, 32);
  481. } else {
  482. s->refreshrefmask = get_bits(&s->gb, 8);
  483. s->refidx[0] = get_bits(&s->gb, 3);
  484. s->signbias[0] = get_bits1(&s->gb);
  485. s->refidx[1] = get_bits(&s->gb, 3);
  486. s->signbias[1] = get_bits1(&s->gb);
  487. s->refidx[2] = get_bits(&s->gb, 3);
  488. s->signbias[2] = get_bits1(&s->gb);
  489. if (!s->refs[s->refidx[0]].f->data[0] ||
  490. !s->refs[s->refidx[1]].f->data[0] ||
  491. !s->refs[s->refidx[2]].f->data[0]) {
  492. av_log(ctx, AV_LOG_ERROR, "Not all references are available\n");
  493. return AVERROR_INVALIDDATA;
  494. }
  495. if (get_bits1(&s->gb)) {
  496. w = s->refs[s->refidx[0]].f->width;
  497. h = s->refs[s->refidx[0]].f->height;
  498. } else if (get_bits1(&s->gb)) {
  499. w = s->refs[s->refidx[1]].f->width;
  500. h = s->refs[s->refidx[1]].f->height;
  501. } else if (get_bits1(&s->gb)) {
  502. w = s->refs[s->refidx[2]].f->width;
  503. h = s->refs[s->refidx[2]].f->height;
  504. } else {
  505. w = get_bits(&s->gb, 16) + 1;
  506. h = get_bits(&s->gb, 16) + 1;
  507. }
  508. // Note that in this code, "CUR_FRAME" is actually before we
  509. // have formally allocated a frame, and thus actually represents
  510. // the _last_ frame
  511. s->use_last_frame_mvs &= s->frames[CUR_FRAME].tf.f->width == w &&
  512. s->frames[CUR_FRAME].tf.f->height == h;
  513. if (get_bits1(&s->gb)) // display size
  514. skip_bits(&s->gb, 32);
  515. s->highprecisionmvs = get_bits1(&s->gb);
  516. s->filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
  517. get_bits(&s->gb, 2);
  518. s->allowcompinter = s->signbias[0] != s->signbias[1] ||
  519. s->signbias[0] != s->signbias[2];
  520. if (s->allowcompinter) {
  521. if (s->signbias[0] == s->signbias[1]) {
  522. s->fixcompref = 2;
  523. s->varcompref[0] = 0;
  524. s->varcompref[1] = 1;
  525. } else if (s->signbias[0] == s->signbias[2]) {
  526. s->fixcompref = 1;
  527. s->varcompref[0] = 0;
  528. s->varcompref[1] = 2;
  529. } else {
  530. s->fixcompref = 0;
  531. s->varcompref[0] = 1;
  532. s->varcompref[1] = 2;
  533. }
  534. }
  535. }
  536. }
  537. s->refreshctx = s->errorres ? 0 : get_bits1(&s->gb);
  538. s->parallelmode = s->errorres ? 1 : get_bits1(&s->gb);
  539. s->framectxid = c = get_bits(&s->gb, 2);
  540. /* loopfilter header data */
  541. s->filter.level = get_bits(&s->gb, 6);
  542. sharp = get_bits(&s->gb, 3);
  543. // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
  544. // the old cache values since they are still valid
  545. if (s->filter.sharpness != sharp)
  546. memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut));
  547. s->filter.sharpness = sharp;
  548. if ((s->lf_delta.enabled = get_bits1(&s->gb))) {
  549. if (get_bits1(&s->gb)) {
  550. for (i = 0; i < 4; i++)
  551. if (get_bits1(&s->gb))
  552. s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
  553. for (i = 0; i < 2; i++)
  554. if (get_bits1(&s->gb))
  555. s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
  556. }
  557. } else {
  558. memset(&s->lf_delta, 0, sizeof(s->lf_delta));
  559. }
  560. /* quantization header data */
  561. s->yac_qi = get_bits(&s->gb, 8);
  562. s->ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  563. s->uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  564. s->uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  565. s->lossless = s->yac_qi == 0 && s->ydc_qdelta == 0 &&
  566. s->uvdc_qdelta == 0 && s->uvac_qdelta == 0;
  567. /* segmentation header info */
  568. if ((s->segmentation.enabled = get_bits1(&s->gb))) {
  569. if ((s->segmentation.update_map = get_bits1(&s->gb))) {
  570. for (i = 0; i < 7; i++)
  571. s->prob.seg[i] = get_bits1(&s->gb) ?
  572. get_bits(&s->gb, 8) : 255;
  573. if ((s->segmentation.temporal = get_bits1(&s->gb))) {
  574. for (i = 0; i < 3; i++)
  575. s->prob.segpred[i] = get_bits1(&s->gb) ?
  576. get_bits(&s->gb, 8) : 255;
  577. }
  578. }
  579. if ((!s->segmentation.update_map || s->segmentation.temporal) &&
  580. (w != s->frames[CUR_FRAME].tf.f->width ||
  581. h != s->frames[CUR_FRAME].tf.f->height)) {
  582. av_log(ctx, AV_LOG_ERROR,
  583. "Reference segmap (temp=%d,update=%d) enabled on size-change!\n",
  584. s->segmentation.temporal, s->segmentation.update_map);
  585. return AVERROR_INVALIDDATA;
  586. }
  587. if (get_bits1(&s->gb)) {
  588. s->segmentation.absolute_vals = get_bits1(&s->gb);
  589. for (i = 0; i < 8; i++) {
  590. if ((s->segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
  591. s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
  592. if ((s->segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
  593. s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
  594. if ((s->segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
  595. s->segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
  596. s->segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
  597. }
  598. }
  599. } else {
  600. s->segmentation.feat[0].q_enabled = 0;
  601. s->segmentation.feat[0].lf_enabled = 0;
  602. s->segmentation.feat[0].skip_enabled = 0;
  603. s->segmentation.feat[0].ref_enabled = 0;
  604. }
  605. // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
  606. for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) {
  607. int qyac, qydc, quvac, quvdc, lflvl, sh;
  608. if (s->segmentation.feat[i].q_enabled) {
  609. if (s->segmentation.absolute_vals)
  610. qyac = s->segmentation.feat[i].q_val;
  611. else
  612. qyac = s->yac_qi + s->segmentation.feat[i].q_val;
  613. } else {
  614. qyac = s->yac_qi;
  615. }
  616. qydc = av_clip_uintp2(qyac + s->ydc_qdelta, 8);
  617. quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8);
  618. quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8);
  619. qyac = av_clip_uintp2(qyac, 8);
  620. s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[qydc];
  621. s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[qyac];
  622. s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[quvdc];
  623. s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[quvac];
  624. sh = s->filter.level >= 32;
  625. if (s->segmentation.feat[i].lf_enabled) {
  626. if (s->segmentation.absolute_vals)
  627. lflvl = s->segmentation.feat[i].lf_val;
  628. else
  629. lflvl = s->filter.level + s->segmentation.feat[i].lf_val;
  630. } else {
  631. lflvl = s->filter.level;
  632. }
  633. s->segmentation.feat[i].lflvl[0][0] =
  634. s->segmentation.feat[i].lflvl[0][1] =
  635. av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6);
  636. for (j = 1; j < 4; j++) {
  637. s->segmentation.feat[i].lflvl[j][0] =
  638. av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
  639. s->lf_delta.mode[0]) * (1 << sh)), 6);
  640. s->segmentation.feat[i].lflvl[j][1] =
  641. av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
  642. s->lf_delta.mode[1]) * (1 << sh)), 6);
  643. }
  644. }
  645. /* tiling info */
  646. if ((res = update_size(ctx, w, h)) < 0) {
  647. av_log(ctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d\n", w, h);
  648. return res;
  649. }
  650. for (s->tiling.log2_tile_cols = 0;
  651. (s->sb_cols >> s->tiling.log2_tile_cols) > 64;
  652. s->tiling.log2_tile_cols++) ;
  653. for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
  654. max = FFMAX(0, max - 1);
  655. while (max > s->tiling.log2_tile_cols) {
  656. if (get_bits1(&s->gb))
  657. s->tiling.log2_tile_cols++;
  658. else
  659. break;
  660. }
  661. s->tiling.log2_tile_rows = decode012(&s->gb);
  662. s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows;
  663. if (s->tiling.tile_cols != (1 << s->tiling.log2_tile_cols)) {
  664. s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols;
  665. s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
  666. sizeof(VP56RangeCoder) * s->tiling.tile_cols);
  667. if (!s->c_b) {
  668. av_log(ctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
  669. return AVERROR(ENOMEM);
  670. }
  671. }
  672. if (s->keyframe || s->errorres || s->intraonly) {
  673. s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
  674. s->prob_ctx[3].p = vp9_default_probs;
  675. memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
  676. sizeof(vp9_default_coef_probs));
  677. memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
  678. sizeof(vp9_default_coef_probs));
  679. memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
  680. sizeof(vp9_default_coef_probs));
  681. memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
  682. sizeof(vp9_default_coef_probs));
  683. }
  684. // next 16 bits is size of the rest of the header (arith-coded)
  685. size2 = get_bits(&s->gb, 16);
  686. data2 = align_get_bits(&s->gb);
  687. if (size2 > size - (data2 - data)) {
  688. av_log(ctx, AV_LOG_ERROR, "Invalid compressed header size\n");
  689. return AVERROR_INVALIDDATA;
  690. }
  691. ff_vp56_init_range_decoder(&s->c, data2, size2);
  692. if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
  693. av_log(ctx, AV_LOG_ERROR, "Marker bit was set\n");
  694. return AVERROR_INVALIDDATA;
  695. }
  696. if (s->keyframe || s->intraonly) {
  697. memset(s->counts.coef, 0, sizeof(s->counts.coef) + sizeof(s->counts.eob));
  698. } else {
  699. memset(&s->counts, 0, sizeof(s->counts));
  700. }
  701. // FIXME is it faster to not copy here, but do it down in the fw updates
  702. // as explicit copies if the fw update is missing (and skip the copy upon
  703. // fw update)?
  704. s->prob.p = s->prob_ctx[c].p;
  705. // txfm updates
  706. if (s->lossless) {
  707. s->txfmmode = TX_4X4;
  708. } else {
  709. s->txfmmode = vp8_rac_get_uint(&s->c, 2);
  710. if (s->txfmmode == 3)
  711. s->txfmmode += vp8_rac_get(&s->c);
  712. if (s->txfmmode == TX_SWITCHABLE) {
  713. for (i = 0; i < 2; i++)
  714. if (vp56_rac_get_prob_branchy(&s->c, 252))
  715. s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
  716. for (i = 0; i < 2; i++)
  717. for (j = 0; j < 2; j++)
  718. if (vp56_rac_get_prob_branchy(&s->c, 252))
  719. s->prob.p.tx16p[i][j] =
  720. update_prob(&s->c, s->prob.p.tx16p[i][j]);
  721. for (i = 0; i < 2; i++)
  722. for (j = 0; j < 3; j++)
  723. if (vp56_rac_get_prob_branchy(&s->c, 252))
  724. s->prob.p.tx32p[i][j] =
  725. update_prob(&s->c, s->prob.p.tx32p[i][j]);
  726. }
  727. }
  728. // coef updates
  729. for (i = 0; i < 4; i++) {
  730. uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
  731. if (vp8_rac_get(&s->c)) {
  732. for (j = 0; j < 2; j++)
  733. for (k = 0; k < 2; k++)
  734. for (l = 0; l < 6; l++)
  735. for (m = 0; m < 6; m++) {
  736. uint8_t *p = s->prob.coef[i][j][k][l][m];
  737. uint8_t *r = ref[j][k][l][m];
  738. if (m >= 3 && l == 0) // dc only has 3 pt
  739. break;
  740. for (n = 0; n < 3; n++) {
  741. if (vp56_rac_get_prob_branchy(&s->c, 252)) {
  742. p[n] = update_prob(&s->c, r[n]);
  743. } else {
  744. p[n] = r[n];
  745. }
  746. }
  747. p[3] = 0;
  748. }
  749. } else {
  750. for (j = 0; j < 2; j++)
  751. for (k = 0; k < 2; k++)
  752. for (l = 0; l < 6; l++)
  753. for (m = 0; m < 6; m++) {
  754. uint8_t *p = s->prob.coef[i][j][k][l][m];
  755. uint8_t *r = ref[j][k][l][m];
  756. if (m > 3 && l == 0) // dc only has 3 pt
  757. break;
  758. memcpy(p, r, 3);
  759. p[3] = 0;
  760. }
  761. }
  762. if (s->txfmmode == i)
  763. break;
  764. }
  765. // mode updates
  766. for (i = 0; i < 3; i++)
  767. if (vp56_rac_get_prob_branchy(&s->c, 252))
  768. s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
  769. if (!s->keyframe && !s->intraonly) {
  770. for (i = 0; i < 7; i++)
  771. for (j = 0; j < 3; j++)
  772. if (vp56_rac_get_prob_branchy(&s->c, 252))
  773. s->prob.p.mv_mode[i][j] =
  774. update_prob(&s->c, s->prob.p.mv_mode[i][j]);
  775. if (s->filtermode == FILTER_SWITCHABLE)
  776. for (i = 0; i < 4; i++)
  777. for (j = 0; j < 2; j++)
  778. if (vp56_rac_get_prob_branchy(&s->c, 252))
  779. s->prob.p.filter[i][j] =
  780. update_prob(&s->c, s->prob.p.filter[i][j]);
  781. for (i = 0; i < 4; i++)
  782. if (vp56_rac_get_prob_branchy(&s->c, 252))
  783. s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
  784. if (s->allowcompinter) {
  785. s->comppredmode = vp8_rac_get(&s->c);
  786. if (s->comppredmode)
  787. s->comppredmode += vp8_rac_get(&s->c);
  788. if (s->comppredmode == PRED_SWITCHABLE)
  789. for (i = 0; i < 5; i++)
  790. if (vp56_rac_get_prob_branchy(&s->c, 252))
  791. s->prob.p.comp[i] =
  792. update_prob(&s->c, s->prob.p.comp[i]);
  793. } else {
  794. s->comppredmode = PRED_SINGLEREF;
  795. }
  796. if (s->comppredmode != PRED_COMPREF) {
  797. for (i = 0; i < 5; i++) {
  798. if (vp56_rac_get_prob_branchy(&s->c, 252))
  799. s->prob.p.single_ref[i][0] =
  800. update_prob(&s->c, s->prob.p.single_ref[i][0]);
  801. if (vp56_rac_get_prob_branchy(&s->c, 252))
  802. s->prob.p.single_ref[i][1] =
  803. update_prob(&s->c, s->prob.p.single_ref[i][1]);
  804. }
  805. }
  806. if (s->comppredmode != PRED_SINGLEREF) {
  807. for (i = 0; i < 5; i++)
  808. if (vp56_rac_get_prob_branchy(&s->c, 252))
  809. s->prob.p.comp_ref[i] =
  810. update_prob(&s->c, s->prob.p.comp_ref[i]);
  811. }
  812. for (i = 0; i < 4; i++)
  813. for (j = 0; j < 9; j++)
  814. if (vp56_rac_get_prob_branchy(&s->c, 252))
  815. s->prob.p.y_mode[i][j] =
  816. update_prob(&s->c, s->prob.p.y_mode[i][j]);
  817. for (i = 0; i < 4; i++)
  818. for (j = 0; j < 4; j++)
  819. for (k = 0; k < 3; k++)
  820. if (vp56_rac_get_prob_branchy(&s->c, 252))
  821. s->prob.p.partition[3 - i][j][k] =
  822. update_prob(&s->c, s->prob.p.partition[3 - i][j][k]);
  823. // mv fields don't use the update_prob subexp model for some reason
  824. for (i = 0; i < 3; i++)
  825. if (vp56_rac_get_prob_branchy(&s->c, 252))
  826. s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  827. for (i = 0; i < 2; i++) {
  828. if (vp56_rac_get_prob_branchy(&s->c, 252))
  829. s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  830. for (j = 0; j < 10; j++)
  831. if (vp56_rac_get_prob_branchy(&s->c, 252))
  832. s->prob.p.mv_comp[i].classes[j] =
  833. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  834. if (vp56_rac_get_prob_branchy(&s->c, 252))
  835. s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  836. for (j = 0; j < 10; j++)
  837. if (vp56_rac_get_prob_branchy(&s->c, 252))
  838. s->prob.p.mv_comp[i].bits[j] =
  839. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  840. }
  841. for (i = 0; i < 2; i++) {
  842. for (j = 0; j < 2; j++)
  843. for (k = 0; k < 3; k++)
  844. if (vp56_rac_get_prob_branchy(&s->c, 252))
  845. s->prob.p.mv_comp[i].class0_fp[j][k] =
  846. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  847. for (j = 0; j < 3; j++)
  848. if (vp56_rac_get_prob_branchy(&s->c, 252))
  849. s->prob.p.mv_comp[i].fp[j] =
  850. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  851. }
  852. if (s->highprecisionmvs) {
  853. for (i = 0; i < 2; i++) {
  854. if (vp56_rac_get_prob_branchy(&s->c, 252))
  855. s->prob.p.mv_comp[i].class0_hp =
  856. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  857. if (vp56_rac_get_prob_branchy(&s->c, 252))
  858. s->prob.p.mv_comp[i].hp =
  859. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  860. }
  861. }
  862. }
  863. return (data2 - data) + size2;
  864. }
  865. static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
  866. VP9Context *s)
  867. {
  868. dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
  869. dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
  870. }
  871. static void find_ref_mvs(VP9Context *s,
  872. VP56mv *pmv, int ref, int z, int idx, int sb)
  873. {
  874. static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
  875. [BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
  876. { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }},
  877. [BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
  878. { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }},
  879. [BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
  880. { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }},
  881. [BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
  882. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  883. [BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
  884. { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  885. [BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
  886. { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }},
  887. [BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
  888. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  889. [BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
  890. { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }},
  891. [BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
  892. { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }},
  893. [BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  894. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  895. [BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  896. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  897. [BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  898. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  899. [BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  900. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  901. };
  902. VP9Block *b = s->b;
  903. int row = s->row, col = s->col, row7 = s->row7;
  904. const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
  905. #define INVALID_MV 0x80008000U
  906. uint32_t mem = INVALID_MV;
  907. int i;
  908. #define RETURN_DIRECT_MV(mv) \
  909. do { \
  910. uint32_t m = AV_RN32A(&mv); \
  911. if (!idx) { \
  912. AV_WN32A(pmv, m); \
  913. return; \
  914. } else if (mem == INVALID_MV) { \
  915. mem = m; \
  916. } else if (m != mem) { \
  917. AV_WN32A(pmv, m); \
  918. return; \
  919. } \
  920. } while (0)
  921. if (sb >= 0) {
  922. if (sb == 2 || sb == 1) {
  923. RETURN_DIRECT_MV(b->mv[0][z]);
  924. } else if (sb == 3) {
  925. RETURN_DIRECT_MV(b->mv[2][z]);
  926. RETURN_DIRECT_MV(b->mv[1][z]);
  927. RETURN_DIRECT_MV(b->mv[0][z]);
  928. }
  929. #define RETURN_MV(mv) \
  930. do { \
  931. if (sb > 0) { \
  932. VP56mv tmp; \
  933. uint32_t m; \
  934. clamp_mv(&tmp, &mv, s); \
  935. m = AV_RN32A(&tmp); \
  936. if (!idx) { \
  937. AV_WN32A(pmv, m); \
  938. return; \
  939. } else if (mem == INVALID_MV) { \
  940. mem = m; \
  941. } else if (m != mem) { \
  942. AV_WN32A(pmv, m); \
  943. return; \
  944. } \
  945. } else { \
  946. uint32_t m = AV_RN32A(&mv); \
  947. if (!idx) { \
  948. clamp_mv(pmv, &mv, s); \
  949. return; \
  950. } else if (mem == INVALID_MV) { \
  951. mem = m; \
  952. } else if (m != mem) { \
  953. clamp_mv(pmv, &mv, s); \
  954. return; \
  955. } \
  956. } \
  957. } while (0)
  958. if (row > 0) {
  959. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
  960. if (mv->ref[0] == ref) {
  961. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
  962. } else if (mv->ref[1] == ref) {
  963. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
  964. }
  965. }
  966. if (col > s->tiling.tile_col_start) {
  967. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
  968. if (mv->ref[0] == ref) {
  969. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
  970. } else if (mv->ref[1] == ref) {
  971. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
  972. }
  973. }
  974. i = 2;
  975. } else {
  976. i = 0;
  977. }
  978. // previously coded MVs in this neighbourhood, using same reference frame
  979. for (; i < 8; i++) {
  980. int c = p[i][0] + col, r = p[i][1] + row;
  981. if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  982. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  983. if (mv->ref[0] == ref) {
  984. RETURN_MV(mv->mv[0]);
  985. } else if (mv->ref[1] == ref) {
  986. RETURN_MV(mv->mv[1]);
  987. }
  988. }
  989. }
  990. // MV at this position in previous frame, using same reference frame
  991. if (s->use_last_frame_mvs) {
  992. struct VP9mvrefPair *mv = &s->frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
  993. if (!s->frames[REF_FRAME_MVPAIR].uses_2pass)
  994. ff_thread_await_progress(&s->frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
  995. if (mv->ref[0] == ref) {
  996. RETURN_MV(mv->mv[0]);
  997. } else if (mv->ref[1] == ref) {
  998. RETURN_MV(mv->mv[1]);
  999. }
  1000. }
  1001. #define RETURN_SCALE_MV(mv, scale) \
  1002. do { \
  1003. if (scale) { \
  1004. VP56mv mv_temp = { -mv.x, -mv.y }; \
  1005. RETURN_MV(mv_temp); \
  1006. } else { \
  1007. RETURN_MV(mv); \
  1008. } \
  1009. } while (0)
  1010. // previously coded MVs in this neighbourhood, using different reference frame
  1011. for (i = 0; i < 8; i++) {
  1012. int c = p[i][0] + col, r = p[i][1] + row;
  1013. if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  1014. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  1015. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1016. RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
  1017. }
  1018. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1019. // BUG - libvpx has this condition regardless of whether
  1020. // we used the first ref MV and pre-scaling
  1021. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1022. RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
  1023. }
  1024. }
  1025. }
  1026. // MV at this position in previous frame, using different reference frame
  1027. if (s->use_last_frame_mvs) {
  1028. struct VP9mvrefPair *mv = &s->frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
  1029. // no need to await_progress, because we already did that above
  1030. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1031. RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
  1032. }
  1033. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1034. // BUG - libvpx has this condition regardless of whether
  1035. // we used the first ref MV and pre-scaling
  1036. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1037. RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
  1038. }
  1039. }
  1040. AV_ZERO32(pmv);
  1041. #undef INVALID_MV
  1042. #undef RETURN_MV
  1043. #undef RETURN_SCALE_MV
  1044. }
  1045. static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
  1046. {
  1047. int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
  1048. int n, c = vp8_rac_get_tree(&s->c, vp9_mv_class_tree,
  1049. s->prob.p.mv_comp[idx].classes);
  1050. s->counts.mv_comp[idx].sign[sign]++;
  1051. s->counts.mv_comp[idx].classes[c]++;
  1052. if (c) {
  1053. int m;
  1054. for (n = 0, m = 0; m < c; m++) {
  1055. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
  1056. n |= bit << m;
  1057. s->counts.mv_comp[idx].bits[m][bit]++;
  1058. }
  1059. n <<= 3;
  1060. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp);
  1061. n |= bit << 1;
  1062. s->counts.mv_comp[idx].fp[bit]++;
  1063. if (hp) {
  1064. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
  1065. s->counts.mv_comp[idx].hp[bit]++;
  1066. n |= bit;
  1067. } else {
  1068. n |= 1;
  1069. // bug in libvpx - we count for bw entropy purposes even if the
  1070. // bit wasn't coded
  1071. s->counts.mv_comp[idx].hp[1]++;
  1072. }
  1073. n += 8 << c;
  1074. } else {
  1075. n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
  1076. s->counts.mv_comp[idx].class0[n]++;
  1077. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree,
  1078. s->prob.p.mv_comp[idx].class0_fp[n]);
  1079. s->counts.mv_comp[idx].class0_fp[n][bit]++;
  1080. n = (n << 3) | (bit << 1);
  1081. if (hp) {
  1082. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
  1083. s->counts.mv_comp[idx].class0_hp[bit]++;
  1084. n |= bit;
  1085. } else {
  1086. n |= 1;
  1087. // bug in libvpx - we count for bw entropy purposes even if the
  1088. // bit wasn't coded
  1089. s->counts.mv_comp[idx].class0_hp[1]++;
  1090. }
  1091. }
  1092. return sign ? -(n + 1) : (n + 1);
  1093. }
  1094. static void fill_mv(VP9Context *s,
  1095. VP56mv *mv, int mode, int sb)
  1096. {
  1097. VP9Block *b = s->b;
  1098. if (mode == ZEROMV) {
  1099. AV_ZERO64(mv);
  1100. } else {
  1101. int hp;
  1102. // FIXME cache this value and reuse for other subblocks
  1103. find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
  1104. mode == NEWMV ? -1 : sb);
  1105. // FIXME maybe move this code into find_ref_mvs()
  1106. if ((mode == NEWMV || sb == -1) &&
  1107. !(hp = s->highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
  1108. if (mv[0].y & 1) {
  1109. if (mv[0].y < 0)
  1110. mv[0].y++;
  1111. else
  1112. mv[0].y--;
  1113. }
  1114. if (mv[0].x & 1) {
  1115. if (mv[0].x < 0)
  1116. mv[0].x++;
  1117. else
  1118. mv[0].x--;
  1119. }
  1120. }
  1121. if (mode == NEWMV) {
  1122. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1123. s->prob.p.mv_joint);
  1124. s->counts.mv_joint[j]++;
  1125. if (j >= MV_JOINT_V)
  1126. mv[0].y += read_mv_component(s, 0, hp);
  1127. if (j & 1)
  1128. mv[0].x += read_mv_component(s, 1, hp);
  1129. }
  1130. if (b->comp) {
  1131. // FIXME cache this value and reuse for other subblocks
  1132. find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
  1133. mode == NEWMV ? -1 : sb);
  1134. if ((mode == NEWMV || sb == -1) &&
  1135. !(hp = s->highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
  1136. if (mv[1].y & 1) {
  1137. if (mv[1].y < 0)
  1138. mv[1].y++;
  1139. else
  1140. mv[1].y--;
  1141. }
  1142. if (mv[1].x & 1) {
  1143. if (mv[1].x < 0)
  1144. mv[1].x++;
  1145. else
  1146. mv[1].x--;
  1147. }
  1148. }
  1149. if (mode == NEWMV) {
  1150. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1151. s->prob.p.mv_joint);
  1152. s->counts.mv_joint[j]++;
  1153. if (j >= MV_JOINT_V)
  1154. mv[1].y += read_mv_component(s, 0, hp);
  1155. if (j & 1)
  1156. mv[1].x += read_mv_component(s, 1, hp);
  1157. }
  1158. }
  1159. }
  1160. }
  1161. static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
  1162. ptrdiff_t stride, int v)
  1163. {
  1164. switch (w) {
  1165. case 1:
  1166. do {
  1167. *ptr = v;
  1168. ptr += stride;
  1169. } while (--h);
  1170. break;
  1171. case 2: {
  1172. int v16 = v * 0x0101;
  1173. do {
  1174. AV_WN16A(ptr, v16);
  1175. ptr += stride;
  1176. } while (--h);
  1177. break;
  1178. }
  1179. case 4: {
  1180. uint32_t v32 = v * 0x01010101;
  1181. do {
  1182. AV_WN32A(ptr, v32);
  1183. ptr += stride;
  1184. } while (--h);
  1185. break;
  1186. }
  1187. case 8: {
  1188. #if HAVE_FAST_64BIT
  1189. uint64_t v64 = v * 0x0101010101010101ULL;
  1190. do {
  1191. AV_WN64A(ptr, v64);
  1192. ptr += stride;
  1193. } while (--h);
  1194. #else
  1195. uint32_t v32 = v * 0x01010101;
  1196. do {
  1197. AV_WN32A(ptr, v32);
  1198. AV_WN32A(ptr + 4, v32);
  1199. ptr += stride;
  1200. } while (--h);
  1201. #endif
  1202. break;
  1203. }
  1204. }
  1205. }
  1206. static void decode_mode(AVCodecContext *ctx)
  1207. {
  1208. static const uint8_t left_ctx[N_BS_SIZES] = {
  1209. 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
  1210. };
  1211. static const uint8_t above_ctx[N_BS_SIZES] = {
  1212. 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
  1213. };
  1214. static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
  1215. TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
  1216. TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
  1217. };
  1218. VP9Context *s = ctx->priv_data;
  1219. VP9Block *b = s->b;
  1220. int row = s->row, col = s->col, row7 = s->row7;
  1221. enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
  1222. int w4 = FFMIN(s->cols - col, bwh_tab[1][b->bs][0]);
  1223. int h4 = FFMIN(s->rows - row, bwh_tab[1][b->bs][1]), y;
  1224. int have_a = row > 0, have_l = col > s->tiling.tile_col_start;
  1225. int vref, filter_id;
  1226. if (!s->segmentation.enabled) {
  1227. b->seg_id = 0;
  1228. } else if (s->keyframe || s->intraonly) {
  1229. b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg);
  1230. } else if (!s->segmentation.update_map ||
  1231. (s->segmentation.temporal &&
  1232. vp56_rac_get_prob_branchy(&s->c,
  1233. s->prob.segpred[s->above_segpred_ctx[col] +
  1234. s->left_segpred_ctx[row7]]))) {
  1235. if (!s->errorres) {
  1236. int pred = 8, x;
  1237. uint8_t *refsegmap = s->frames[REF_FRAME_SEGMAP].segmentation_map;
  1238. if (!s->frames[REF_FRAME_SEGMAP].uses_2pass)
  1239. ff_thread_await_progress(&s->frames[REF_FRAME_SEGMAP].tf, row >> 3, 0);
  1240. for (y = 0; y < h4; y++) {
  1241. int idx_base = (y + row) * 8 * s->sb_cols + col;
  1242. for (x = 0; x < w4; x++)
  1243. pred = FFMIN(pred, refsegmap[idx_base + x]);
  1244. }
  1245. av_assert1(pred < 8);
  1246. b->seg_id = pred;
  1247. } else {
  1248. b->seg_id = 0;
  1249. }
  1250. memset(&s->above_segpred_ctx[col], 1, w4);
  1251. memset(&s->left_segpred_ctx[row7], 1, h4);
  1252. } else {
  1253. b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree,
  1254. s->prob.seg);
  1255. memset(&s->above_segpred_ctx[col], 0, w4);
  1256. memset(&s->left_segpred_ctx[row7], 0, h4);
  1257. }
  1258. if (s->segmentation.enabled &&
  1259. (s->segmentation.update_map || s->keyframe || s->intraonly)) {
  1260. setctx_2d(&s->frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
  1261. w4, h4, 8 * s->sb_cols, b->seg_id);
  1262. }
  1263. b->skip = s->segmentation.enabled &&
  1264. s->segmentation.feat[b->seg_id].skip_enabled;
  1265. if (!b->skip) {
  1266. int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col];
  1267. b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]);
  1268. s->counts.skip[c][b->skip]++;
  1269. }
  1270. if (s->keyframe || s->intraonly) {
  1271. b->intra = 1;
  1272. } else if (s->segmentation.feat[b->seg_id].ref_enabled) {
  1273. b->intra = !s->segmentation.feat[b->seg_id].ref_val;
  1274. } else {
  1275. int c, bit;
  1276. if (have_a && have_l) {
  1277. c = s->above_intra_ctx[col] + s->left_intra_ctx[row7];
  1278. c += (c == 2);
  1279. } else {
  1280. c = have_a ? 2 * s->above_intra_ctx[col] :
  1281. have_l ? 2 * s->left_intra_ctx[row7] : 0;
  1282. }
  1283. bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]);
  1284. s->counts.intra[c][bit]++;
  1285. b->intra = !bit;
  1286. }
  1287. if ((b->intra || !b->skip) && s->txfmmode == TX_SWITCHABLE) {
  1288. int c;
  1289. if (have_a) {
  1290. if (have_l) {
  1291. c = (s->above_skip_ctx[col] ? max_tx :
  1292. s->above_txfm_ctx[col]) +
  1293. (s->left_skip_ctx[row7] ? max_tx :
  1294. s->left_txfm_ctx[row7]) > max_tx;
  1295. } else {
  1296. c = s->above_skip_ctx[col] ? 1 :
  1297. (s->above_txfm_ctx[col] * 2 > max_tx);
  1298. }
  1299. } else if (have_l) {
  1300. c = s->left_skip_ctx[row7] ? 1 :
  1301. (s->left_txfm_ctx[row7] * 2 > max_tx);
  1302. } else {
  1303. c = 1;
  1304. }
  1305. switch (max_tx) {
  1306. case TX_32X32:
  1307. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]);
  1308. if (b->tx) {
  1309. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]);
  1310. if (b->tx == 2)
  1311. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]);
  1312. }
  1313. s->counts.tx32p[c][b->tx]++;
  1314. break;
  1315. case TX_16X16:
  1316. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]);
  1317. if (b->tx)
  1318. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]);
  1319. s->counts.tx16p[c][b->tx]++;
  1320. break;
  1321. case TX_8X8:
  1322. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]);
  1323. s->counts.tx8p[c][b->tx]++;
  1324. break;
  1325. case TX_4X4:
  1326. b->tx = TX_4X4;
  1327. break;
  1328. }
  1329. } else {
  1330. b->tx = FFMIN(max_tx, s->txfmmode);
  1331. }
  1332. if (s->keyframe || s->intraonly) {
  1333. uint8_t *a = &s->above_mode_ctx[col * 2];
  1334. uint8_t *l = &s->left_mode_ctx[(row7) << 1];
  1335. b->comp = 0;
  1336. if (b->bs > BS_8x8) {
  1337. // FIXME the memory storage intermediates here aren't really
  1338. // necessary, they're just there to make the code slightly
  1339. // simpler for now
  1340. b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1341. vp9_default_kf_ymode_probs[a[0]][l[0]]);
  1342. if (b->bs != BS_8x4) {
  1343. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1344. vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
  1345. l[0] = a[1] = b->mode[1];
  1346. } else {
  1347. l[0] = a[1] = b->mode[1] = b->mode[0];
  1348. }
  1349. if (b->bs != BS_4x8) {
  1350. b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1351. vp9_default_kf_ymode_probs[a[0]][l[1]]);
  1352. if (b->bs != BS_8x4) {
  1353. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1354. vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
  1355. l[1] = a[1] = b->mode[3];
  1356. } else {
  1357. l[1] = a[1] = b->mode[3] = b->mode[2];
  1358. }
  1359. } else {
  1360. b->mode[2] = b->mode[0];
  1361. l[1] = a[1] = b->mode[3] = b->mode[1];
  1362. }
  1363. } else {
  1364. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1365. vp9_default_kf_ymode_probs[*a][*l]);
  1366. b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0];
  1367. // FIXME this can probably be optimized
  1368. memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
  1369. memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
  1370. }
  1371. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1372. vp9_default_kf_uvmode_probs[b->mode[3]]);
  1373. } else if (b->intra) {
  1374. b->comp = 0;
  1375. if (b->bs > BS_8x8) {
  1376. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1377. s->prob.p.y_mode[0]);
  1378. s->counts.y_mode[0][b->mode[0]]++;
  1379. if (b->bs != BS_8x4) {
  1380. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1381. s->prob.p.y_mode[0]);
  1382. s->counts.y_mode[0][b->mode[1]]++;
  1383. } else {
  1384. b->mode[1] = b->mode[0];
  1385. }
  1386. if (b->bs != BS_4x8) {
  1387. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1388. s->prob.p.y_mode[0]);
  1389. s->counts.y_mode[0][b->mode[2]]++;
  1390. if (b->bs != BS_8x4) {
  1391. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1392. s->prob.p.y_mode[0]);
  1393. s->counts.y_mode[0][b->mode[3]]++;
  1394. } else {
  1395. b->mode[3] = b->mode[2];
  1396. }
  1397. } else {
  1398. b->mode[2] = b->mode[0];
  1399. b->mode[3] = b->mode[1];
  1400. }
  1401. } else {
  1402. static const uint8_t size_group[10] = {
  1403. 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
  1404. };
  1405. int sz = size_group[b->bs];
  1406. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1407. s->prob.p.y_mode[sz]);
  1408. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1409. s->counts.y_mode[sz][b->mode[3]]++;
  1410. }
  1411. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1412. s->prob.p.uv_mode[b->mode[3]]);
  1413. s->counts.uv_mode[b->mode[3]][b->uvmode]++;
  1414. } else {
  1415. static const uint8_t inter_mode_ctx_lut[14][14] = {
  1416. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1417. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1418. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1419. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1420. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1421. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1422. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1423. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1424. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1425. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1426. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1427. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1428. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
  1429. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
  1430. };
  1431. if (s->segmentation.feat[b->seg_id].ref_enabled) {
  1432. av_assert2(s->segmentation.feat[b->seg_id].ref_val != 0);
  1433. b->comp = 0;
  1434. b->ref[0] = s->segmentation.feat[b->seg_id].ref_val - 1;
  1435. } else {
  1436. // read comp_pred flag
  1437. if (s->comppredmode != PRED_SWITCHABLE) {
  1438. b->comp = s->comppredmode == PRED_COMPREF;
  1439. } else {
  1440. int c;
  1441. // FIXME add intra as ref=0xff (or -1) to make these easier?
  1442. if (have_a) {
  1443. if (have_l) {
  1444. if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) {
  1445. c = 4;
  1446. } else if (s->above_comp_ctx[col]) {
  1447. c = 2 + (s->left_intra_ctx[row7] ||
  1448. s->left_ref_ctx[row7] == s->fixcompref);
  1449. } else if (s->left_comp_ctx[row7]) {
  1450. c = 2 + (s->above_intra_ctx[col] ||
  1451. s->above_ref_ctx[col] == s->fixcompref);
  1452. } else {
  1453. c = (!s->above_intra_ctx[col] &&
  1454. s->above_ref_ctx[col] == s->fixcompref) ^
  1455. (!s->left_intra_ctx[row7] &&
  1456. s->left_ref_ctx[row & 7] == s->fixcompref);
  1457. }
  1458. } else {
  1459. c = s->above_comp_ctx[col] ? 3 :
  1460. (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->fixcompref);
  1461. }
  1462. } else if (have_l) {
  1463. c = s->left_comp_ctx[row7] ? 3 :
  1464. (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->fixcompref);
  1465. } else {
  1466. c = 1;
  1467. }
  1468. b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]);
  1469. s->counts.comp[c][b->comp]++;
  1470. }
  1471. // read actual references
  1472. // FIXME probably cache a few variables here to prevent repetitive
  1473. // memory accesses below
  1474. if (b->comp) /* two references */ {
  1475. int fix_idx = s->signbias[s->fixcompref], var_idx = !fix_idx, c, bit;
  1476. b->ref[fix_idx] = s->fixcompref;
  1477. // FIXME can this codeblob be replaced by some sort of LUT?
  1478. if (have_a) {
  1479. if (have_l) {
  1480. if (s->above_intra_ctx[col]) {
  1481. if (s->left_intra_ctx[row7]) {
  1482. c = 2;
  1483. } else {
  1484. c = 1 + 2 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1485. }
  1486. } else if (s->left_intra_ctx[row7]) {
  1487. c = 1 + 2 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1488. } else {
  1489. int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
  1490. if (refl == refa && refa == s->varcompref[1]) {
  1491. c = 0;
  1492. } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
  1493. if ((refa == s->fixcompref && refl == s->varcompref[0]) ||
  1494. (refl == s->fixcompref && refa == s->varcompref[0])) {
  1495. c = 4;
  1496. } else {
  1497. c = (refa == refl) ? 3 : 1;
  1498. }
  1499. } else if (!s->left_comp_ctx[row7]) {
  1500. if (refa == s->varcompref[1] && refl != s->varcompref[1]) {
  1501. c = 1;
  1502. } else {
  1503. c = (refl == s->varcompref[1] &&
  1504. refa != s->varcompref[1]) ? 2 : 4;
  1505. }
  1506. } else if (!s->above_comp_ctx[col]) {
  1507. if (refl == s->varcompref[1] && refa != s->varcompref[1]) {
  1508. c = 1;
  1509. } else {
  1510. c = (refa == s->varcompref[1] &&
  1511. refl != s->varcompref[1]) ? 2 : 4;
  1512. }
  1513. } else {
  1514. c = (refl == refa) ? 4 : 2;
  1515. }
  1516. }
  1517. } else {
  1518. if (s->above_intra_ctx[col]) {
  1519. c = 2;
  1520. } else if (s->above_comp_ctx[col]) {
  1521. c = 4 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1522. } else {
  1523. c = 3 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1524. }
  1525. }
  1526. } else if (have_l) {
  1527. if (s->left_intra_ctx[row7]) {
  1528. c = 2;
  1529. } else if (s->left_comp_ctx[row7]) {
  1530. c = 4 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1531. } else {
  1532. c = 3 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1533. }
  1534. } else {
  1535. c = 2;
  1536. }
  1537. bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]);
  1538. b->ref[var_idx] = s->varcompref[bit];
  1539. s->counts.comp_ref[c][bit]++;
  1540. } else /* single reference */ {
  1541. int bit, c;
  1542. if (have_a && !s->above_intra_ctx[col]) {
  1543. if (have_l && !s->left_intra_ctx[row7]) {
  1544. if (s->left_comp_ctx[row7]) {
  1545. if (s->above_comp_ctx[col]) {
  1546. c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7] ||
  1547. !s->above_ref_ctx[col]);
  1548. } else {
  1549. c = (3 * !s->above_ref_ctx[col]) +
  1550. (!s->fixcompref || !s->left_ref_ctx[row7]);
  1551. }
  1552. } else if (s->above_comp_ctx[col]) {
  1553. c = (3 * !s->left_ref_ctx[row7]) +
  1554. (!s->fixcompref || !s->above_ref_ctx[col]);
  1555. } else {
  1556. c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
  1557. }
  1558. } else if (s->above_intra_ctx[col]) {
  1559. c = 2;
  1560. } else if (s->above_comp_ctx[col]) {
  1561. c = 1 + (!s->fixcompref || !s->above_ref_ctx[col]);
  1562. } else {
  1563. c = 4 * (!s->above_ref_ctx[col]);
  1564. }
  1565. } else if (have_l && !s->left_intra_ctx[row7]) {
  1566. if (s->left_intra_ctx[row7]) {
  1567. c = 2;
  1568. } else if (s->left_comp_ctx[row7]) {
  1569. c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7]);
  1570. } else {
  1571. c = 4 * (!s->left_ref_ctx[row7]);
  1572. }
  1573. } else {
  1574. c = 2;
  1575. }
  1576. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]);
  1577. s->counts.single_ref[c][0][bit]++;
  1578. if (!bit) {
  1579. b->ref[0] = 0;
  1580. } else {
  1581. // FIXME can this codeblob be replaced by some sort of LUT?
  1582. if (have_a) {
  1583. if (have_l) {
  1584. if (s->left_intra_ctx[row7]) {
  1585. if (s->above_intra_ctx[col]) {
  1586. c = 2;
  1587. } else if (s->above_comp_ctx[col]) {
  1588. c = 1 + 2 * (s->fixcompref == 1 ||
  1589. s->above_ref_ctx[col] == 1);
  1590. } else if (!s->above_ref_ctx[col]) {
  1591. c = 3;
  1592. } else {
  1593. c = 4 * (s->above_ref_ctx[col] == 1);
  1594. }
  1595. } else if (s->above_intra_ctx[col]) {
  1596. if (s->left_intra_ctx[row7]) {
  1597. c = 2;
  1598. } else if (s->left_comp_ctx[row7]) {
  1599. c = 1 + 2 * (s->fixcompref == 1 ||
  1600. s->left_ref_ctx[row7] == 1);
  1601. } else if (!s->left_ref_ctx[row7]) {
  1602. c = 3;
  1603. } else {
  1604. c = 4 * (s->left_ref_ctx[row7] == 1);
  1605. }
  1606. } else if (s->above_comp_ctx[col]) {
  1607. if (s->left_comp_ctx[row7]) {
  1608. if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
  1609. c = 3 * (s->fixcompref == 1 ||
  1610. s->left_ref_ctx[row7] == 1);
  1611. } else {
  1612. c = 2;
  1613. }
  1614. } else if (!s->left_ref_ctx[row7]) {
  1615. c = 1 + 2 * (s->fixcompref == 1 ||
  1616. s->above_ref_ctx[col] == 1);
  1617. } else {
  1618. c = 3 * (s->left_ref_ctx[row7] == 1) +
  1619. (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1620. }
  1621. } else if (s->left_comp_ctx[row7]) {
  1622. if (!s->above_ref_ctx[col]) {
  1623. c = 1 + 2 * (s->fixcompref == 1 ||
  1624. s->left_ref_ctx[row7] == 1);
  1625. } else {
  1626. c = 3 * (s->above_ref_ctx[col] == 1) +
  1627. (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1628. }
  1629. } else if (!s->above_ref_ctx[col]) {
  1630. if (!s->left_ref_ctx[row7]) {
  1631. c = 3;
  1632. } else {
  1633. c = 4 * (s->left_ref_ctx[row7] == 1);
  1634. }
  1635. } else if (!s->left_ref_ctx[row7]) {
  1636. c = 4 * (s->above_ref_ctx[col] == 1);
  1637. } else {
  1638. c = 2 * (s->left_ref_ctx[row7] == 1) +
  1639. 2 * (s->above_ref_ctx[col] == 1);
  1640. }
  1641. } else {
  1642. if (s->above_intra_ctx[col] ||
  1643. (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
  1644. c = 2;
  1645. } else if (s->above_comp_ctx[col]) {
  1646. c = 3 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1647. } else {
  1648. c = 4 * (s->above_ref_ctx[col] == 1);
  1649. }
  1650. }
  1651. } else if (have_l) {
  1652. if (s->left_intra_ctx[row7] ||
  1653. (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) {
  1654. c = 2;
  1655. } else if (s->left_comp_ctx[row7]) {
  1656. c = 3 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1657. } else {
  1658. c = 4 * (s->left_ref_ctx[row7] == 1);
  1659. }
  1660. } else {
  1661. c = 2;
  1662. }
  1663. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]);
  1664. s->counts.single_ref[c][1][bit]++;
  1665. b->ref[0] = 1 + bit;
  1666. }
  1667. }
  1668. }
  1669. if (b->bs <= BS_8x8) {
  1670. if (s->segmentation.feat[b->seg_id].skip_enabled) {
  1671. b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV;
  1672. } else {
  1673. static const uint8_t off[10] = {
  1674. 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
  1675. };
  1676. // FIXME this needs to use the LUT tables from find_ref_mvs
  1677. // because not all are -1,0/0,-1
  1678. int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
  1679. [s->left_mode_ctx[row7 + off[b->bs]]];
  1680. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1681. s->prob.p.mv_mode[c]);
  1682. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1683. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1684. }
  1685. }
  1686. if (s->filtermode == FILTER_SWITCHABLE) {
  1687. int c;
  1688. if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
  1689. if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1690. c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ?
  1691. s->left_filter_ctx[row7] : 3;
  1692. } else {
  1693. c = s->above_filter_ctx[col];
  1694. }
  1695. } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1696. c = s->left_filter_ctx[row7];
  1697. } else {
  1698. c = 3;
  1699. }
  1700. filter_id = vp8_rac_get_tree(&s->c, vp9_filter_tree,
  1701. s->prob.p.filter[c]);
  1702. s->counts.filter[c][filter_id]++;
  1703. b->filter = vp9_filter_lut[filter_id];
  1704. } else {
  1705. b->filter = s->filtermode;
  1706. }
  1707. if (b->bs > BS_8x8) {
  1708. int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]];
  1709. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1710. s->prob.p.mv_mode[c]);
  1711. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1712. fill_mv(s, b->mv[0], b->mode[0], 0);
  1713. if (b->bs != BS_8x4) {
  1714. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1715. s->prob.p.mv_mode[c]);
  1716. s->counts.mv_mode[c][b->mode[1] - 10]++;
  1717. fill_mv(s, b->mv[1], b->mode[1], 1);
  1718. } else {
  1719. b->mode[1] = b->mode[0];
  1720. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1721. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1722. }
  1723. if (b->bs != BS_4x8) {
  1724. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1725. s->prob.p.mv_mode[c]);
  1726. s->counts.mv_mode[c][b->mode[2] - 10]++;
  1727. fill_mv(s, b->mv[2], b->mode[2], 2);
  1728. if (b->bs != BS_8x4) {
  1729. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1730. s->prob.p.mv_mode[c]);
  1731. s->counts.mv_mode[c][b->mode[3] - 10]++;
  1732. fill_mv(s, b->mv[3], b->mode[3], 3);
  1733. } else {
  1734. b->mode[3] = b->mode[2];
  1735. AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
  1736. AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
  1737. }
  1738. } else {
  1739. b->mode[2] = b->mode[0];
  1740. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1741. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1742. b->mode[3] = b->mode[1];
  1743. AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
  1744. AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
  1745. }
  1746. } else {
  1747. fill_mv(s, b->mv[0], b->mode[0], -1);
  1748. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1749. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1750. AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
  1751. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1752. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1753. AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
  1754. }
  1755. vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0];
  1756. }
  1757. #if HAVE_FAST_64BIT
  1758. #define SPLAT_CTX(var, val, n) \
  1759. switch (n) { \
  1760. case 1: var = val; break; \
  1761. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1762. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1763. case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
  1764. case 16: { \
  1765. uint64_t v64 = val * 0x0101010101010101ULL; \
  1766. AV_WN64A( &var, v64); \
  1767. AV_WN64A(&((uint8_t *) &var)[8], v64); \
  1768. break; \
  1769. } \
  1770. }
  1771. #else
  1772. #define SPLAT_CTX(var, val, n) \
  1773. switch (n) { \
  1774. case 1: var = val; break; \
  1775. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1776. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1777. case 8: { \
  1778. uint32_t v32 = val * 0x01010101; \
  1779. AV_WN32A( &var, v32); \
  1780. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1781. break; \
  1782. } \
  1783. case 16: { \
  1784. uint32_t v32 = val * 0x01010101; \
  1785. AV_WN32A( &var, v32); \
  1786. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1787. AV_WN32A(&((uint8_t *) &var)[8], v32); \
  1788. AV_WN32A(&((uint8_t *) &var)[12], v32); \
  1789. break; \
  1790. } \
  1791. }
  1792. #endif
  1793. switch (bwh_tab[1][b->bs][0]) {
  1794. #define SET_CTXS(dir, off, n) \
  1795. do { \
  1796. SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
  1797. SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
  1798. SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
  1799. if (!s->keyframe && !s->intraonly) { \
  1800. SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
  1801. SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
  1802. SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
  1803. if (!b->intra) { \
  1804. SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
  1805. if (s->filtermode == FILTER_SWITCHABLE) { \
  1806. SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
  1807. } \
  1808. } \
  1809. } \
  1810. } while (0)
  1811. case 1: SET_CTXS(above, col, 1); break;
  1812. case 2: SET_CTXS(above, col, 2); break;
  1813. case 4: SET_CTXS(above, col, 4); break;
  1814. case 8: SET_CTXS(above, col, 8); break;
  1815. }
  1816. switch (bwh_tab[1][b->bs][1]) {
  1817. case 1: SET_CTXS(left, row7, 1); break;
  1818. case 2: SET_CTXS(left, row7, 2); break;
  1819. case 4: SET_CTXS(left, row7, 4); break;
  1820. case 8: SET_CTXS(left, row7, 8); break;
  1821. }
  1822. #undef SPLAT_CTX
  1823. #undef SET_CTXS
  1824. if (!s->keyframe && !s->intraonly) {
  1825. if (b->bs > BS_8x8) {
  1826. int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1827. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
  1828. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
  1829. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0);
  1830. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1);
  1831. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
  1832. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
  1833. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
  1834. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
  1835. } else {
  1836. int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1837. for (n = 0; n < w4 * 2; n++) {
  1838. AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
  1839. AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
  1840. }
  1841. for (n = 0; n < h4 * 2; n++) {
  1842. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0);
  1843. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
  1844. }
  1845. }
  1846. }
  1847. // FIXME kinda ugly
  1848. for (y = 0; y < h4; y++) {
  1849. int x, o = (row + y) * s->sb_cols * 8 + col;
  1850. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[o];
  1851. if (b->intra) {
  1852. for (x = 0; x < w4; x++) {
  1853. mv[x].ref[0] =
  1854. mv[x].ref[1] = -1;
  1855. }
  1856. } else if (b->comp) {
  1857. for (x = 0; x < w4; x++) {
  1858. mv[x].ref[0] = b->ref[0];
  1859. mv[x].ref[1] = b->ref[1];
  1860. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1861. AV_COPY32(&mv[x].mv[1], &b->mv[3][1]);
  1862. }
  1863. } else {
  1864. for (x = 0; x < w4; x++) {
  1865. mv[x].ref[0] = b->ref[0];
  1866. mv[x].ref[1] = -1;
  1867. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1868. }
  1869. }
  1870. }
  1871. }
  1872. // FIXME merge cnt/eob arguments?
  1873. static av_always_inline int
  1874. decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  1875. int is_tx32x32, unsigned (*cnt)[6][3],
  1876. unsigned (*eob)[6][2], uint8_t (*p)[6][11],
  1877. int nnz, const int16_t *scan, const int16_t (*nb)[2],
  1878. const int16_t *band_counts, const int16_t *qmul)
  1879. {
  1880. int i = 0, band = 0, band_left = band_counts[band];
  1881. uint8_t *tp = p[0][nnz];
  1882. uint8_t cache[1024];
  1883. do {
  1884. int val, rc;
  1885. val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
  1886. eob[band][nnz][val]++;
  1887. if (!val)
  1888. break;
  1889. skip_eob:
  1890. if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
  1891. cnt[band][nnz][0]++;
  1892. if (!--band_left)
  1893. band_left = band_counts[++band];
  1894. cache[scan[i]] = 0;
  1895. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  1896. tp = p[band][nnz];
  1897. if (++i == n_coeffs)
  1898. break; //invalid input; blocks should end with EOB
  1899. goto skip_eob;
  1900. }
  1901. rc = scan[i];
  1902. if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
  1903. cnt[band][nnz][1]++;
  1904. val = 1;
  1905. cache[rc] = 1;
  1906. } else {
  1907. // fill in p[3-10] (model fill) - only once per frame for each pos
  1908. if (!tp[3])
  1909. memcpy(&tp[3], vp9_model_pareto8[tp[2]], 8);
  1910. cnt[band][nnz][2]++;
  1911. if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
  1912. if (!vp56_rac_get_prob_branchy(c, tp[4])) {
  1913. cache[rc] = val = 2;
  1914. } else {
  1915. val = 3 + vp56_rac_get_prob(c, tp[5]);
  1916. cache[rc] = 3;
  1917. }
  1918. } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
  1919. cache[rc] = 4;
  1920. if (!vp56_rac_get_prob_branchy(c, tp[7])) {
  1921. val = 5 + vp56_rac_get_prob(c, 159);
  1922. } else {
  1923. val = 7 + (vp56_rac_get_prob(c, 165) << 1);
  1924. val += vp56_rac_get_prob(c, 145);
  1925. }
  1926. } else { // cat 3-6
  1927. cache[rc] = 5;
  1928. if (!vp56_rac_get_prob_branchy(c, tp[8])) {
  1929. if (!vp56_rac_get_prob_branchy(c, tp[9])) {
  1930. val = 11 + (vp56_rac_get_prob(c, 173) << 2);
  1931. val += (vp56_rac_get_prob(c, 148) << 1);
  1932. val += vp56_rac_get_prob(c, 140);
  1933. } else {
  1934. val = 19 + (vp56_rac_get_prob(c, 176) << 3);
  1935. val += (vp56_rac_get_prob(c, 155) << 2);
  1936. val += (vp56_rac_get_prob(c, 140) << 1);
  1937. val += vp56_rac_get_prob(c, 135);
  1938. }
  1939. } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
  1940. val = 35 + (vp56_rac_get_prob(c, 180) << 4);
  1941. val += (vp56_rac_get_prob(c, 157) << 3);
  1942. val += (vp56_rac_get_prob(c, 141) << 2);
  1943. val += (vp56_rac_get_prob(c, 134) << 1);
  1944. val += vp56_rac_get_prob(c, 130);
  1945. } else {
  1946. val = 67 + (vp56_rac_get_prob(c, 254) << 13);
  1947. val += (vp56_rac_get_prob(c, 254) << 12);
  1948. val += (vp56_rac_get_prob(c, 254) << 11);
  1949. val += (vp56_rac_get_prob(c, 252) << 10);
  1950. val += (vp56_rac_get_prob(c, 249) << 9);
  1951. val += (vp56_rac_get_prob(c, 243) << 8);
  1952. val += (vp56_rac_get_prob(c, 230) << 7);
  1953. val += (vp56_rac_get_prob(c, 196) << 6);
  1954. val += (vp56_rac_get_prob(c, 177) << 5);
  1955. val += (vp56_rac_get_prob(c, 153) << 4);
  1956. val += (vp56_rac_get_prob(c, 140) << 3);
  1957. val += (vp56_rac_get_prob(c, 133) << 2);
  1958. val += (vp56_rac_get_prob(c, 130) << 1);
  1959. val += vp56_rac_get_prob(c, 129);
  1960. }
  1961. }
  1962. }
  1963. if (!--band_left)
  1964. band_left = band_counts[++band];
  1965. if (is_tx32x32)
  1966. coef[rc] = ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2;
  1967. else
  1968. coef[rc] = (vp8_rac_get(c) ? -val : val) * qmul[!!i];
  1969. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  1970. tp = p[band][nnz];
  1971. } while (++i < n_coeffs);
  1972. return i;
  1973. }
  1974. static int decode_coeffs_b(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  1975. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  1976. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  1977. const int16_t (*nb)[2], const int16_t *band_counts,
  1978. const int16_t *qmul)
  1979. {
  1980. return decode_coeffs_b_generic(c, coef, n_coeffs, 0, cnt, eob, p,
  1981. nnz, scan, nb, band_counts, qmul);
  1982. }
  1983. static int decode_coeffs_b32(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  1984. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  1985. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  1986. const int16_t (*nb)[2], const int16_t *band_counts,
  1987. const int16_t *qmul)
  1988. {
  1989. return decode_coeffs_b_generic(c, coef, n_coeffs, 1, cnt, eob, p,
  1990. nnz, scan, nb, band_counts, qmul);
  1991. }
  1992. static void decode_coeffs(AVCodecContext *ctx)
  1993. {
  1994. VP9Context *s = ctx->priv_data;
  1995. VP9Block *b = s->b;
  1996. int row = s->row, col = s->col;
  1997. uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
  1998. unsigned (*c)[6][3] = s->counts.coef[b->tx][0 /* y */][!b->intra];
  1999. unsigned (*e)[6][2] = s->counts.eob[b->tx][0 /* y */][!b->intra];
  2000. int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
  2001. int end_x = FFMIN(2 * (s->cols - col), w4);
  2002. int end_y = FFMIN(2 * (s->rows - row), h4);
  2003. int n, pl, x, y, res;
  2004. int16_t (*qmul)[2] = s->segmentation.feat[b->seg_id].qmul;
  2005. int tx = 4 * s->lossless + b->tx;
  2006. const int16_t * const *yscans = vp9_scans[tx];
  2007. const int16_t (* const *ynbs)[2] = vp9_scans_nb[tx];
  2008. const int16_t *uvscan = vp9_scans[b->uvtx][DCT_DCT];
  2009. const int16_t (*uvnb)[2] = vp9_scans_nb[b->uvtx][DCT_DCT];
  2010. uint8_t *a = &s->above_y_nnz_ctx[col * 2];
  2011. uint8_t *l = &s->left_y_nnz_ctx[(row & 7) << 1];
  2012. static const int16_t band_counts[4][8] = {
  2013. { 1, 2, 3, 4, 3, 16 - 13 },
  2014. { 1, 2, 3, 4, 11, 64 - 21 },
  2015. { 1, 2, 3, 4, 11, 256 - 21 },
  2016. { 1, 2, 3, 4, 11, 1024 - 21 },
  2017. };
  2018. const int16_t *y_band_counts = band_counts[b->tx];
  2019. const int16_t *uv_band_counts = band_counts[b->uvtx];
  2020. #define MERGE(la, end, step, rd) \
  2021. for (n = 0; n < end; n += step) \
  2022. la[n] = !!rd(&la[n])
  2023. #define MERGE_CTX(step, rd) \
  2024. do { \
  2025. MERGE(l, end_y, step, rd); \
  2026. MERGE(a, end_x, step, rd); \
  2027. } while (0)
  2028. #define DECODE_Y_COEF_LOOP(step, mode_index, v) \
  2029. for (n = 0, y = 0; y < end_y; y += step) { \
  2030. for (x = 0; x < end_x; x += step, n += step * step) { \
  2031. enum TxfmType txtp = vp9_intra_txfm_type[b->mode[mode_index]]; \
  2032. res = decode_coeffs_b##v(&s->c, s->block + 16 * n, 16 * step * step, \
  2033. c, e, p, a[x] + l[y], yscans[txtp], \
  2034. ynbs[txtp], y_band_counts, qmul[0]); \
  2035. a[x] = l[y] = !!res; \
  2036. if (step >= 4) { \
  2037. AV_WN16A(&s->eob[n], res); \
  2038. } else { \
  2039. s->eob[n] = res; \
  2040. } \
  2041. } \
  2042. }
  2043. #define SPLAT(la, end, step, cond) \
  2044. if (step == 2) { \
  2045. for (n = 1; n < end; n += step) \
  2046. la[n] = la[n - 1]; \
  2047. } else if (step == 4) { \
  2048. if (cond) { \
  2049. for (n = 0; n < end; n += step) \
  2050. AV_WN32A(&la[n], la[n] * 0x01010101); \
  2051. } else { \
  2052. for (n = 0; n < end; n += step) \
  2053. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
  2054. } \
  2055. } else /* step == 8 */ { \
  2056. if (cond) { \
  2057. if (HAVE_FAST_64BIT) { \
  2058. for (n = 0; n < end; n += step) \
  2059. AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
  2060. } else { \
  2061. for (n = 0; n < end; n += step) { \
  2062. uint32_t v32 = la[n] * 0x01010101; \
  2063. AV_WN32A(&la[n], v32); \
  2064. AV_WN32A(&la[n + 4], v32); \
  2065. } \
  2066. } \
  2067. } else { \
  2068. for (n = 0; n < end; n += step) \
  2069. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
  2070. } \
  2071. }
  2072. #define SPLAT_CTX(step) \
  2073. do { \
  2074. SPLAT(a, end_x, step, end_x == w4); \
  2075. SPLAT(l, end_y, step, end_y == h4); \
  2076. } while (0)
  2077. /* y tokens */
  2078. switch (b->tx) {
  2079. case TX_4X4:
  2080. DECODE_Y_COEF_LOOP(1, b->bs > BS_8x8 ? n : 0,);
  2081. break;
  2082. case TX_8X8:
  2083. MERGE_CTX(2, AV_RN16A);
  2084. DECODE_Y_COEF_LOOP(2, 0,);
  2085. SPLAT_CTX(2);
  2086. break;
  2087. case TX_16X16:
  2088. MERGE_CTX(4, AV_RN32A);
  2089. DECODE_Y_COEF_LOOP(4, 0,);
  2090. SPLAT_CTX(4);
  2091. break;
  2092. case TX_32X32:
  2093. MERGE_CTX(8, AV_RN64A);
  2094. DECODE_Y_COEF_LOOP(8, 0, 32);
  2095. SPLAT_CTX(8);
  2096. break;
  2097. }
  2098. #define DECODE_UV_COEF_LOOP(step) \
  2099. for (n = 0, y = 0; y < end_y; y += step) { \
  2100. for (x = 0; x < end_x; x += step, n += step * step) { \
  2101. res = decode_coeffs_b(&s->c, s->uvblock[pl] + 16 * n, \
  2102. 16 * step * step, c, e, p, a[x] + l[y], \
  2103. uvscan, uvnb, uv_band_counts, qmul[1]); \
  2104. a[x] = l[y] = !!res; \
  2105. if (step >= 4) { \
  2106. AV_WN16A(&s->uveob[pl][n], res); \
  2107. } else { \
  2108. s->uveob[pl][n] = res; \
  2109. } \
  2110. } \
  2111. }
  2112. p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
  2113. c = s->counts.coef[b->uvtx][1 /* uv */][!b->intra];
  2114. e = s->counts.eob[b->uvtx][1 /* uv */][!b->intra];
  2115. w4 >>= 1;
  2116. h4 >>= 1;
  2117. end_x >>= 1;
  2118. end_y >>= 1;
  2119. for (pl = 0; pl < 2; pl++) {
  2120. a = &s->above_uv_nnz_ctx[pl][col];
  2121. l = &s->left_uv_nnz_ctx[pl][row & 7];
  2122. switch (b->uvtx) {
  2123. case TX_4X4:
  2124. DECODE_UV_COEF_LOOP(1);
  2125. break;
  2126. case TX_8X8:
  2127. MERGE_CTX(2, AV_RN16A);
  2128. DECODE_UV_COEF_LOOP(2);
  2129. SPLAT_CTX(2);
  2130. break;
  2131. case TX_16X16:
  2132. MERGE_CTX(4, AV_RN32A);
  2133. DECODE_UV_COEF_LOOP(4);
  2134. SPLAT_CTX(4);
  2135. break;
  2136. case TX_32X32:
  2137. MERGE_CTX(8, AV_RN64A);
  2138. // a 64x64 (max) uv block can ever only contain 1 tx32x32 block
  2139. // so there is no need to loop
  2140. res = decode_coeffs_b32(&s->c, s->uvblock[pl],
  2141. 1024, c, e, p, a[0] + l[0],
  2142. uvscan, uvnb, uv_band_counts, qmul[1]);
  2143. a[0] = l[0] = !!res;
  2144. AV_WN16A(&s->uveob[pl][0], res);
  2145. SPLAT_CTX(8);
  2146. break;
  2147. }
  2148. }
  2149. }
  2150. static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
  2151. uint8_t *dst_edge, ptrdiff_t stride_edge,
  2152. uint8_t *dst_inner, ptrdiff_t stride_inner,
  2153. uint8_t *l, int col, int x, int w,
  2154. int row, int y, enum TxfmMode tx,
  2155. int p)
  2156. {
  2157. int have_top = row > 0 || y > 0;
  2158. int have_left = col > s->tiling.tile_col_start || x > 0;
  2159. int have_right = x < w - 1;
  2160. static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
  2161. [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
  2162. { DC_127_PRED, VERT_PRED } },
  2163. [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
  2164. { HOR_PRED, HOR_PRED } },
  2165. [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
  2166. { LEFT_DC_PRED, DC_PRED } },
  2167. [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
  2168. { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
  2169. [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
  2170. { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
  2171. [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
  2172. { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
  2173. [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
  2174. { HOR_DOWN_PRED, HOR_DOWN_PRED } },
  2175. [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
  2176. { DC_127_PRED, VERT_LEFT_PRED } },
  2177. [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
  2178. { HOR_UP_PRED, HOR_UP_PRED } },
  2179. [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
  2180. { HOR_PRED, TM_VP8_PRED } },
  2181. };
  2182. static const struct {
  2183. uint8_t needs_left:1;
  2184. uint8_t needs_top:1;
  2185. uint8_t needs_topleft:1;
  2186. uint8_t needs_topright:1;
  2187. uint8_t invert_left:1;
  2188. } edges[N_INTRA_PRED_MODES] = {
  2189. [VERT_PRED] = { .needs_top = 1 },
  2190. [HOR_PRED] = { .needs_left = 1 },
  2191. [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
  2192. [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2193. [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2194. [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2195. [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2196. [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2197. [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
  2198. [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2199. [LEFT_DC_PRED] = { .needs_left = 1 },
  2200. [TOP_DC_PRED] = { .needs_top = 1 },
  2201. [DC_128_PRED] = { 0 },
  2202. [DC_127_PRED] = { 0 },
  2203. [DC_129_PRED] = { 0 }
  2204. };
  2205. av_assert2(mode >= 0 && mode < 10);
  2206. mode = mode_conv[mode][have_left][have_top];
  2207. if (edges[mode].needs_top) {
  2208. uint8_t *top, *topleft;
  2209. int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !p) - x) * 4;
  2210. int n_px_need_tr = 0;
  2211. if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
  2212. n_px_need_tr = 4;
  2213. // if top of sb64-row, use s->intra_pred_data[] instead of
  2214. // dst[-stride] for intra prediction (it contains pre- instead of
  2215. // post-loopfilter data)
  2216. if (have_top) {
  2217. top = !(row & 7) && !y ?
  2218. s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
  2219. y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
  2220. if (have_left)
  2221. topleft = !(row & 7) && !y ?
  2222. s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
  2223. y == 0 || x == 0 ? &dst_edge[-stride_edge] :
  2224. &dst_inner[-stride_inner];
  2225. }
  2226. if (have_top &&
  2227. (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
  2228. (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
  2229. n_px_need + n_px_need_tr <= n_px_have) {
  2230. *a = top;
  2231. } else {
  2232. if (have_top) {
  2233. if (n_px_need <= n_px_have) {
  2234. memcpy(*a, top, n_px_need);
  2235. } else {
  2236. memcpy(*a, top, n_px_have);
  2237. memset(&(*a)[n_px_have], (*a)[n_px_have - 1],
  2238. n_px_need - n_px_have);
  2239. }
  2240. } else {
  2241. memset(*a, 127, n_px_need);
  2242. }
  2243. if (edges[mode].needs_topleft) {
  2244. if (have_left && have_top) {
  2245. (*a)[-1] = topleft[-1];
  2246. } else {
  2247. (*a)[-1] = have_top ? 129 : 127;
  2248. }
  2249. }
  2250. if (tx == TX_4X4 && edges[mode].needs_topright) {
  2251. if (have_top && have_right &&
  2252. n_px_need + n_px_need_tr <= n_px_have) {
  2253. memcpy(&(*a)[4], &top[4], 4);
  2254. } else {
  2255. memset(&(*a)[4], (*a)[3], 4);
  2256. }
  2257. }
  2258. }
  2259. }
  2260. if (edges[mode].needs_left) {
  2261. if (have_left) {
  2262. int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !p) - y) * 4;
  2263. uint8_t *dst = x == 0 ? dst_edge : dst_inner;
  2264. ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
  2265. if (edges[mode].invert_left) {
  2266. if (n_px_need <= n_px_have) {
  2267. for (i = 0; i < n_px_need; i++)
  2268. l[i] = dst[i * stride - 1];
  2269. } else {
  2270. for (i = 0; i < n_px_have; i++)
  2271. l[i] = dst[i * stride - 1];
  2272. memset(&l[n_px_have], l[n_px_have - 1], n_px_need - n_px_have);
  2273. }
  2274. } else {
  2275. if (n_px_need <= n_px_have) {
  2276. for (i = 0; i < n_px_need; i++)
  2277. l[n_px_need - 1 - i] = dst[i * stride - 1];
  2278. } else {
  2279. for (i = 0; i < n_px_have; i++)
  2280. l[n_px_need - 1 - i] = dst[i * stride - 1];
  2281. memset(l, l[n_px_need - n_px_have], n_px_need - n_px_have);
  2282. }
  2283. }
  2284. } else {
  2285. memset(l, 129, 4 << tx);
  2286. }
  2287. }
  2288. return mode;
  2289. }
  2290. static void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
  2291. {
  2292. VP9Context *s = ctx->priv_data;
  2293. VP9Block *b = s->b;
  2294. int row = s->row, col = s->col;
  2295. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2296. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2297. int end_x = FFMIN(2 * (s->cols - col), w4);
  2298. int end_y = FFMIN(2 * (s->rows - row), h4);
  2299. int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
  2300. int uvstep1d = 1 << b->uvtx, p;
  2301. uint8_t *dst = s->dst[0], *dst_r = s->frames[CUR_FRAME].tf.f->data[0] + y_off;
  2302. LOCAL_ALIGNED_32(uint8_t, a_buf, [64]);
  2303. LOCAL_ALIGNED_32(uint8_t, l, [32]);
  2304. for (n = 0, y = 0; y < end_y; y += step1d) {
  2305. uint8_t *ptr = dst, *ptr_r = dst_r;
  2306. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d,
  2307. ptr_r += 4 * step1d, n += step) {
  2308. int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
  2309. y * 2 + x : 0];
  2310. uint8_t *a = &a_buf[32];
  2311. enum TxfmType txtp = vp9_intra_txfm_type[mode];
  2312. int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2313. mode = check_intra_mode(s, mode, &a, ptr_r,
  2314. s->frames[CUR_FRAME].tf.f->linesize[0],
  2315. ptr, s->y_stride, l,
  2316. col, x, w4, row, y, b->tx, 0);
  2317. s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a);
  2318. if (eob)
  2319. s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride,
  2320. s->block + 16 * n, eob);
  2321. }
  2322. dst_r += 4 * step1d * s->frames[CUR_FRAME].tf.f->linesize[0];
  2323. dst += 4 * step1d * s->y_stride;
  2324. }
  2325. // U/V
  2326. w4 >>= 1;
  2327. end_x >>= 1;
  2328. end_y >>= 1;
  2329. step = 1 << (b->uvtx * 2);
  2330. for (p = 0; p < 2; p++) {
  2331. dst = s->dst[1 + p];
  2332. dst_r = s->frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
  2333. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2334. uint8_t *ptr = dst, *ptr_r = dst_r;
  2335. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
  2336. ptr_r += 4 * uvstep1d, n += step) {
  2337. int mode = b->uvmode;
  2338. uint8_t *a = &a_buf[32];
  2339. int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2340. mode = check_intra_mode(s, mode, &a, ptr_r,
  2341. s->frames[CUR_FRAME].tf.f->linesize[1],
  2342. ptr, s->uv_stride, l,
  2343. col, x, w4, row, y, b->uvtx, p + 1);
  2344. s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a);
  2345. if (eob)
  2346. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2347. s->uvblock[p] + 16 * n, eob);
  2348. }
  2349. dst_r += 4 * uvstep1d * s->frames[CUR_FRAME].tf.f->linesize[1];
  2350. dst += 4 * uvstep1d * s->uv_stride;
  2351. }
  2352. }
  2353. }
  2354. static av_always_inline void mc_luma_dir(VP9Context *s, vp9_mc_func (*mc)[2],
  2355. uint8_t *dst, ptrdiff_t dst_stride,
  2356. const uint8_t *ref, ptrdiff_t ref_stride,
  2357. ThreadFrame *ref_frame,
  2358. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2359. int bw, int bh, int w, int h)
  2360. {
  2361. int mx = mv->x, my = mv->y, th;
  2362. y += my >> 3;
  2363. x += mx >> 3;
  2364. ref += y * ref_stride + x;
  2365. mx &= 7;
  2366. my &= 7;
  2367. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2368. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2369. // the longest loopfilter of the next sbrow
  2370. th = (y + bh + 4 * !!my + 7) >> 6;
  2371. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2372. if (x < !!mx * 3 || y < !!my * 3 ||
  2373. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2374. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2375. ref - !!my * 3 * ref_stride - !!mx * 3,
  2376. 80, ref_stride,
  2377. bw + !!mx * 7, bh + !!my * 7,
  2378. x - !!mx * 3, y - !!my * 3, w, h);
  2379. ref = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2380. ref_stride = 80;
  2381. }
  2382. mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
  2383. }
  2384. static av_always_inline void mc_chroma_dir(VP9Context *s, vp9_mc_func (*mc)[2],
  2385. uint8_t *dst_u, uint8_t *dst_v,
  2386. ptrdiff_t dst_stride,
  2387. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  2388. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  2389. ThreadFrame *ref_frame,
  2390. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2391. int bw, int bh, int w, int h)
  2392. {
  2393. int mx = mv->x, my = mv->y, th;
  2394. y += my >> 4;
  2395. x += mx >> 4;
  2396. ref_u += y * src_stride_u + x;
  2397. ref_v += y * src_stride_v + x;
  2398. mx &= 15;
  2399. my &= 15;
  2400. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2401. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2402. // the longest loopfilter of the next sbrow
  2403. th = (y + bh + 4 * !!my + 7) >> 5;
  2404. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2405. if (x < !!mx * 3 || y < !!my * 3 ||
  2406. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2407. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2408. ref_u - !!my * 3 * src_stride_u - !!mx * 3,
  2409. 80, src_stride_u,
  2410. bw + !!mx * 7, bh + !!my * 7,
  2411. x - !!mx * 3, y - !!my * 3, w, h);
  2412. ref_u = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2413. mc[!!mx][!!my](dst_u, dst_stride, ref_u, 80, bh, mx, my);
  2414. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2415. ref_v - !!my * 3 * src_stride_v - !!mx * 3,
  2416. 80, src_stride_v,
  2417. bw + !!mx * 7, bh + !!my * 7,
  2418. x - !!mx * 3, y - !!my * 3, w, h);
  2419. ref_v = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2420. mc[!!mx][!!my](dst_v, dst_stride, ref_v, 80, bh, mx, my);
  2421. } else {
  2422. mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
  2423. mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
  2424. }
  2425. }
  2426. static void inter_recon(AVCodecContext *ctx)
  2427. {
  2428. static const uint8_t bwlog_tab[2][N_BS_SIZES] = {
  2429. { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
  2430. { 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4 },
  2431. };
  2432. VP9Context *s = ctx->priv_data;
  2433. VP9Block *b = s->b;
  2434. int row = s->row, col = s->col;
  2435. ThreadFrame *tref1 = &s->refs[s->refidx[b->ref[0]]], *tref2;
  2436. AVFrame *ref1 = tref1->f, *ref2;
  2437. int w1 = ref1->width, h1 = ref1->height, w2, h2;
  2438. ptrdiff_t ls_y = s->y_stride, ls_uv = s->uv_stride;
  2439. if (b->comp) {
  2440. tref2 = &s->refs[s->refidx[b->ref[1]]];
  2441. ref2 = tref2->f;
  2442. w2 = ref2->width;
  2443. h2 = ref2->height;
  2444. }
  2445. // y inter pred
  2446. if (b->bs > BS_8x8) {
  2447. if (b->bs == BS_8x4) {
  2448. mc_luma_dir(s, s->dsp.mc[3][b->filter][0], s->dst[0], ls_y,
  2449. ref1->data[0], ref1->linesize[0], tref1,
  2450. row << 3, col << 3, &b->mv[0][0], 8, 4, w1, h1);
  2451. mc_luma_dir(s, s->dsp.mc[3][b->filter][0],
  2452. s->dst[0] + 4 * ls_y, ls_y,
  2453. ref1->data[0], ref1->linesize[0], tref1,
  2454. (row << 3) + 4, col << 3, &b->mv[2][0], 8, 4, w1, h1);
  2455. if (b->comp) {
  2456. mc_luma_dir(s, s->dsp.mc[3][b->filter][1], s->dst[0], ls_y,
  2457. ref2->data[0], ref2->linesize[0], tref2,
  2458. row << 3, col << 3, &b->mv[0][1], 8, 4, w2, h2);
  2459. mc_luma_dir(s, s->dsp.mc[3][b->filter][1],
  2460. s->dst[0] + 4 * ls_y, ls_y,
  2461. ref2->data[0], ref2->linesize[0], tref2,
  2462. (row << 3) + 4, col << 3, &b->mv[2][1], 8, 4, w2, h2);
  2463. }
  2464. } else if (b->bs == BS_4x8) {
  2465. mc_luma_dir(s, s->dsp.mc[4][b->filter][0], s->dst[0], ls_y,
  2466. ref1->data[0], ref1->linesize[0], tref1,
  2467. row << 3, col << 3, &b->mv[0][0], 4, 8, w1, h1);
  2468. mc_luma_dir(s, s->dsp.mc[4][b->filter][0], s->dst[0] + 4, ls_y,
  2469. ref1->data[0], ref1->linesize[0], tref1,
  2470. row << 3, (col << 3) + 4, &b->mv[1][0], 4, 8, w1, h1);
  2471. if (b->comp) {
  2472. mc_luma_dir(s, s->dsp.mc[4][b->filter][1], s->dst[0], ls_y,
  2473. ref2->data[0], ref2->linesize[0], tref2,
  2474. row << 3, col << 3, &b->mv[0][1], 4, 8, w2, h2);
  2475. mc_luma_dir(s, s->dsp.mc[4][b->filter][1], s->dst[0] + 4, ls_y,
  2476. ref2->data[0], ref2->linesize[0], tref2,
  2477. row << 3, (col << 3) + 4, &b->mv[1][1], 4, 8, w2, h2);
  2478. }
  2479. } else {
  2480. av_assert2(b->bs == BS_4x4);
  2481. // FIXME if two horizontally adjacent blocks have the same MV,
  2482. // do a w8 instead of a w4 call
  2483. mc_luma_dir(s, s->dsp.mc[4][b->filter][0], s->dst[0], ls_y,
  2484. ref1->data[0], ref1->linesize[0], tref1,
  2485. row << 3, col << 3, &b->mv[0][0], 4, 4, w1, h1);
  2486. mc_luma_dir(s, s->dsp.mc[4][b->filter][0], s->dst[0] + 4, ls_y,
  2487. ref1->data[0], ref1->linesize[0], tref1,
  2488. row << 3, (col << 3) + 4, &b->mv[1][0], 4, 4, w1, h1);
  2489. mc_luma_dir(s, s->dsp.mc[4][b->filter][0],
  2490. s->dst[0] + 4 * ls_y, ls_y,
  2491. ref1->data[0], ref1->linesize[0], tref1,
  2492. (row << 3) + 4, col << 3, &b->mv[2][0], 4, 4, w1, h1);
  2493. mc_luma_dir(s, s->dsp.mc[4][b->filter][0],
  2494. s->dst[0] + 4 * ls_y + 4, ls_y,
  2495. ref1->data[0], ref1->linesize[0], tref1,
  2496. (row << 3) + 4, (col << 3) + 4, &b->mv[3][0], 4, 4, w1, h1);
  2497. if (b->comp) {
  2498. mc_luma_dir(s, s->dsp.mc[4][b->filter][1], s->dst[0], ls_y,
  2499. ref2->data[0], ref2->linesize[0], tref2,
  2500. row << 3, col << 3, &b->mv[0][1], 4, 4, w2, h2);
  2501. mc_luma_dir(s, s->dsp.mc[4][b->filter][1], s->dst[0] + 4, ls_y,
  2502. ref2->data[0], ref2->linesize[0], tref2,
  2503. row << 3, (col << 3) + 4, &b->mv[1][1], 4, 4, w2, h2);
  2504. mc_luma_dir(s, s->dsp.mc[4][b->filter][1],
  2505. s->dst[0] + 4 * ls_y, ls_y,
  2506. ref2->data[0], ref2->linesize[0], tref2,
  2507. (row << 3) + 4, col << 3, &b->mv[2][1], 4, 4, w2, h2);
  2508. mc_luma_dir(s, s->dsp.mc[4][b->filter][1],
  2509. s->dst[0] + 4 * ls_y + 4, ls_y,
  2510. ref2->data[0], ref2->linesize[0], tref2,
  2511. (row << 3) + 4, (col << 3) + 4, &b->mv[3][1], 4, 4, w2, h2);
  2512. }
  2513. }
  2514. } else {
  2515. int bwl = bwlog_tab[0][b->bs];
  2516. int bw = bwh_tab[0][b->bs][0] * 4, bh = bwh_tab[0][b->bs][1] * 4;
  2517. mc_luma_dir(s, s->dsp.mc[bwl][b->filter][0], s->dst[0], ls_y,
  2518. ref1->data[0], ref1->linesize[0], tref1,
  2519. row << 3, col << 3, &b->mv[0][0],bw, bh, w1, h1);
  2520. if (b->comp)
  2521. mc_luma_dir(s, s->dsp.mc[bwl][b->filter][1], s->dst[0], ls_y,
  2522. ref2->data[0], ref2->linesize[0], tref2,
  2523. row << 3, col << 3, &b->mv[0][1], bw, bh, w2, h2);
  2524. }
  2525. // uv inter pred
  2526. {
  2527. int bwl = bwlog_tab[1][b->bs];
  2528. int bw = bwh_tab[1][b->bs][0] * 4, bh = bwh_tab[1][b->bs][1] * 4;
  2529. VP56mv mvuv;
  2530. w1 = (w1 + 1) >> 1;
  2531. h1 = (h1 + 1) >> 1;
  2532. if (b->comp) {
  2533. w2 = (w2 + 1) >> 1;
  2534. h2 = (h2 + 1) >> 1;
  2535. }
  2536. if (b->bs > BS_8x8) {
  2537. mvuv.x = ROUNDED_DIV(b->mv[0][0].x + b->mv[1][0].x + b->mv[2][0].x + b->mv[3][0].x, 4);
  2538. mvuv.y = ROUNDED_DIV(b->mv[0][0].y + b->mv[1][0].y + b->mv[2][0].y + b->mv[3][0].y, 4);
  2539. } else {
  2540. mvuv = b->mv[0][0];
  2541. }
  2542. mc_chroma_dir(s, s->dsp.mc[bwl][b->filter][0],
  2543. s->dst[1], s->dst[2], ls_uv,
  2544. ref1->data[1], ref1->linesize[1],
  2545. ref1->data[2], ref1->linesize[2], tref1,
  2546. row << 2, col << 2, &mvuv, bw, bh, w1, h1);
  2547. if (b->comp) {
  2548. if (b->bs > BS_8x8) {
  2549. mvuv.x = ROUNDED_DIV(b->mv[0][1].x + b->mv[1][1].x + b->mv[2][1].x + b->mv[3][1].x, 4);
  2550. mvuv.y = ROUNDED_DIV(b->mv[0][1].y + b->mv[1][1].y + b->mv[2][1].y + b->mv[3][1].y, 4);
  2551. } else {
  2552. mvuv = b->mv[0][1];
  2553. }
  2554. mc_chroma_dir(s, s->dsp.mc[bwl][b->filter][1],
  2555. s->dst[1], s->dst[2], ls_uv,
  2556. ref2->data[1], ref2->linesize[1],
  2557. ref2->data[2], ref2->linesize[2], tref2,
  2558. row << 2, col << 2, &mvuv, bw, bh, w2, h2);
  2559. }
  2560. }
  2561. if (!b->skip) {
  2562. /* mostly copied intra_reconn() */
  2563. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2564. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2565. int end_x = FFMIN(2 * (s->cols - col), w4);
  2566. int end_y = FFMIN(2 * (s->rows - row), h4);
  2567. int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
  2568. int uvstep1d = 1 << b->uvtx, p;
  2569. uint8_t *dst = s->dst[0];
  2570. // y itxfm add
  2571. for (n = 0, y = 0; y < end_y; y += step1d) {
  2572. uint8_t *ptr = dst;
  2573. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d, n += step) {
  2574. int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2575. if (eob)
  2576. s->dsp.itxfm_add[tx][DCT_DCT](ptr, s->y_stride,
  2577. s->block + 16 * n, eob);
  2578. }
  2579. dst += 4 * s->y_stride * step1d;
  2580. }
  2581. // uv itxfm add
  2582. end_x >>= 1;
  2583. end_y >>= 1;
  2584. step = 1 << (b->uvtx * 2);
  2585. for (p = 0; p < 2; p++) {
  2586. dst = s->dst[p + 1];
  2587. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2588. uint8_t *ptr = dst;
  2589. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d, n += step) {
  2590. int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2591. if (eob)
  2592. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2593. s->uvblock[p] + 16 * n, eob);
  2594. }
  2595. dst += 4 * uvstep1d * s->uv_stride;
  2596. }
  2597. }
  2598. }
  2599. }
  2600. static av_always_inline void mask_edges(struct VP9Filter *lflvl, int is_uv,
  2601. int row_and_7, int col_and_7,
  2602. int w, int h, int col_end, int row_end,
  2603. enum TxfmMode tx, int skip_inter)
  2604. {
  2605. // FIXME I'm pretty sure all loops can be replaced by a single LUT if
  2606. // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
  2607. // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
  2608. // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
  2609. // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
  2610. // edges. This means that for UV, we work on two subsampled blocks at
  2611. // a time, and we only use the topleft block's mode information to set
  2612. // things like block strength. Thus, for any block size smaller than
  2613. // 16x16, ignore the odd portion of the block.
  2614. if (tx == TX_4X4 && is_uv) {
  2615. if (h == 1) {
  2616. if (row_and_7 & 1)
  2617. return;
  2618. if (!row_end)
  2619. h += 1;
  2620. }
  2621. if (w == 1) {
  2622. if (col_and_7 & 1)
  2623. return;
  2624. if (!col_end)
  2625. w += 1;
  2626. }
  2627. }
  2628. if (tx == TX_4X4 && !skip_inter) {
  2629. int t = 1 << col_and_7, m_col = (t << w) - t, y;
  2630. int m_col_odd = (t << (w - 1)) - t;
  2631. // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
  2632. if (is_uv) {
  2633. int m_row_8 = m_col & 0x01, m_row_4 = m_col - m_row_8;
  2634. for (y = row_and_7; y < h + row_and_7; y++) {
  2635. int col_mask_id = 2 - !(y & 7);
  2636. lflvl->mask[is_uv][0][y][1] |= m_row_8;
  2637. lflvl->mask[is_uv][0][y][2] |= m_row_4;
  2638. // for odd lines, if the odd col is not being filtered,
  2639. // skip odd row also:
  2640. // .---. <-- a
  2641. // | |
  2642. // |___| <-- b
  2643. // ^ ^
  2644. // c d
  2645. //
  2646. // if a/c are even row/col and b/d are odd, and d is skipped,
  2647. // e.g. right edge of size-66x66.webm, then skip b also (bug)
  2648. if ((col_end & 1) && (y & 1)) {
  2649. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col_odd;
  2650. } else {
  2651. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col;
  2652. }
  2653. }
  2654. } else {
  2655. int m_row_8 = m_col & 0x11, m_row_4 = m_col - m_row_8;
  2656. for (y = row_and_7; y < h + row_and_7; y++) {
  2657. int col_mask_id = 2 - !(y & 3);
  2658. lflvl->mask[is_uv][0][y][1] |= m_row_8; // row edge
  2659. lflvl->mask[is_uv][0][y][2] |= m_row_4;
  2660. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col; // col edge
  2661. lflvl->mask[is_uv][0][y][3] |= m_col;
  2662. lflvl->mask[is_uv][1][y][3] |= m_col;
  2663. }
  2664. }
  2665. } else {
  2666. int y, t = 1 << col_and_7, m_col = (t << w) - t;
  2667. if (!skip_inter) {
  2668. int mask_id = (tx == TX_8X8);
  2669. int l2 = tx + is_uv - 1, step1d = 1 << l2;
  2670. static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
  2671. int m_row = m_col & masks[l2];
  2672. // at odd UV col/row edges tx16/tx32 loopfilter edges, force
  2673. // 8wd loopfilter to prevent going off the visible edge.
  2674. if (is_uv && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
  2675. int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
  2676. int m_row_8 = m_row - m_row_16;
  2677. for (y = row_and_7; y < h + row_and_7; y++) {
  2678. lflvl->mask[is_uv][0][y][0] |= m_row_16;
  2679. lflvl->mask[is_uv][0][y][1] |= m_row_8;
  2680. }
  2681. } else {
  2682. for (y = row_and_7; y < h + row_and_7; y++)
  2683. lflvl->mask[is_uv][0][y][mask_id] |= m_row;
  2684. }
  2685. if (is_uv && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
  2686. for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
  2687. lflvl->mask[is_uv][1][y][0] |= m_col;
  2688. if (y - row_and_7 == h - 1)
  2689. lflvl->mask[is_uv][1][y][1] |= m_col;
  2690. } else {
  2691. for (y = row_and_7; y < h + row_and_7; y += step1d)
  2692. lflvl->mask[is_uv][1][y][mask_id] |= m_col;
  2693. }
  2694. } else if (tx != TX_4X4) {
  2695. int mask_id;
  2696. mask_id = (tx == TX_8X8) || (is_uv && h == 1);
  2697. lflvl->mask[is_uv][1][row_and_7][mask_id] |= m_col;
  2698. mask_id = (tx == TX_8X8) || (is_uv && w == 1);
  2699. for (y = row_and_7; y < h + row_and_7; y++)
  2700. lflvl->mask[is_uv][0][y][mask_id] |= t;
  2701. } else if (is_uv) {
  2702. int t8 = t & 0x01, t4 = t - t8;
  2703. for (y = row_and_7; y < h + row_and_7; y++) {
  2704. lflvl->mask[is_uv][0][y][2] |= t4;
  2705. lflvl->mask[is_uv][0][y][1] |= t8;
  2706. }
  2707. lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 7)] |= m_col;
  2708. } else {
  2709. int t8 = t & 0x11, t4 = t - t8;
  2710. for (y = row_and_7; y < h + row_and_7; y++) {
  2711. lflvl->mask[is_uv][0][y][2] |= t4;
  2712. lflvl->mask[is_uv][0][y][1] |= t8;
  2713. }
  2714. lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 3)] |= m_col;
  2715. }
  2716. }
  2717. }
  2718. static void decode_b(AVCodecContext *ctx, int row, int col,
  2719. struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
  2720. enum BlockLevel bl, enum BlockPartition bp)
  2721. {
  2722. VP9Context *s = ctx->priv_data;
  2723. VP9Block *b = s->b;
  2724. enum BlockSize bs = bl * 3 + bp;
  2725. int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
  2726. int emu[2];
  2727. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2728. s->row = row;
  2729. s->row7 = row & 7;
  2730. s->col = col;
  2731. s->col7 = col & 7;
  2732. s->min_mv.x = -(128 + col * 64);
  2733. s->min_mv.y = -(128 + row * 64);
  2734. s->max_mv.x = 128 + (s->cols - col - w4) * 64;
  2735. s->max_mv.y = 128 + (s->rows - row - h4) * 64;
  2736. if (s->pass < 2) {
  2737. b->bs = bs;
  2738. b->bl = bl;
  2739. b->bp = bp;
  2740. decode_mode(ctx);
  2741. b->uvtx = b->tx - (w4 * 2 == (1 << b->tx) || h4 * 2 == (1 << b->tx));
  2742. if (!b->skip) {
  2743. decode_coeffs(ctx);
  2744. } else {
  2745. int row7 = s->row7;
  2746. #define SPLAT_ZERO_CTX(v, n) \
  2747. switch (n) { \
  2748. case 1: v = 0; break; \
  2749. case 2: AV_ZERO16(&v); break; \
  2750. case 4: AV_ZERO32(&v); break; \
  2751. case 8: AV_ZERO64(&v); break; \
  2752. case 16: AV_ZERO128(&v); break; \
  2753. }
  2754. #define SPLAT_ZERO_YUV(dir, var, off, n) \
  2755. do { \
  2756. SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
  2757. SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
  2758. SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
  2759. } while (0)
  2760. switch (w4) {
  2761. case 1: SPLAT_ZERO_YUV(above, nnz_ctx, col, 1); break;
  2762. case 2: SPLAT_ZERO_YUV(above, nnz_ctx, col, 2); break;
  2763. case 4: SPLAT_ZERO_YUV(above, nnz_ctx, col, 4); break;
  2764. case 8: SPLAT_ZERO_YUV(above, nnz_ctx, col, 8); break;
  2765. }
  2766. switch (h4) {
  2767. case 1: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 1); break;
  2768. case 2: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 2); break;
  2769. case 4: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 4); break;
  2770. case 8: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 8); break;
  2771. }
  2772. }
  2773. if (s->pass == 1) {
  2774. s->b++;
  2775. s->block += w4 * h4 * 64;
  2776. s->uvblock[0] += w4 * h4 * 16;
  2777. s->uvblock[1] += w4 * h4 * 16;
  2778. s->eob += 4 * w4 * h4;
  2779. s->uveob[0] += w4 * h4;
  2780. s->uveob[1] += w4 * h4;
  2781. return;
  2782. }
  2783. }
  2784. // emulated overhangs if the stride of the target buffer can't hold. This
  2785. // allows to support emu-edge and so on even if we have large block
  2786. // overhangs
  2787. emu[0] = (col + w4) * 8 > f->linesize[0] ||
  2788. (row + h4) > s->rows;
  2789. emu[1] = (col + w4) * 4 > f->linesize[1] ||
  2790. (row + h4) > s->rows;
  2791. if (emu[0]) {
  2792. s->dst[0] = s->tmp_y;
  2793. s->y_stride = 64;
  2794. } else {
  2795. s->dst[0] = f->data[0] + yoff;
  2796. s->y_stride = f->linesize[0];
  2797. }
  2798. if (emu[1]) {
  2799. s->dst[1] = s->tmp_uv[0];
  2800. s->dst[2] = s->tmp_uv[1];
  2801. s->uv_stride = 32;
  2802. } else {
  2803. s->dst[1] = f->data[1] + uvoff;
  2804. s->dst[2] = f->data[2] + uvoff;
  2805. s->uv_stride = f->linesize[1];
  2806. }
  2807. if (b->intra) {
  2808. intra_recon(ctx, yoff, uvoff);
  2809. } else {
  2810. inter_recon(ctx);
  2811. }
  2812. if (emu[0]) {
  2813. int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
  2814. for (n = 0; o < w; n++) {
  2815. int bw = 64 >> n;
  2816. av_assert2(n <= 4);
  2817. if (w & bw) {
  2818. s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o, f->linesize[0],
  2819. s->tmp_y + o, 64, h, 0, 0);
  2820. o += bw;
  2821. }
  2822. }
  2823. }
  2824. if (emu[1]) {
  2825. int w = FFMIN(s->cols - col, w4) * 4, h = FFMIN(s->rows - row, h4) * 4, n, o = 0;
  2826. for (n = 1; o < w; n++) {
  2827. int bw = 64 >> n;
  2828. av_assert2(n <= 4);
  2829. if (w & bw) {
  2830. s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o, f->linesize[1],
  2831. s->tmp_uv[0] + o, 32, h, 0, 0);
  2832. s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o, f->linesize[2],
  2833. s->tmp_uv[1] + o, 32, h, 0, 0);
  2834. o += bw;
  2835. }
  2836. }
  2837. }
  2838. // pick filter level and find edges to apply filter to
  2839. if (s->filter.level &&
  2840. (lvl = s->segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
  2841. [b->mode[3] != ZEROMV]) > 0) {
  2842. int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
  2843. int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7;
  2844. setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
  2845. mask_edges(lflvl, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
  2846. mask_edges(lflvl, 1, row7, col7, x_end, y_end,
  2847. s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
  2848. s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
  2849. b->uvtx, skip_inter);
  2850. if (!s->filter.lim_lut[lvl]) {
  2851. int sharp = s->filter.sharpness;
  2852. int limit = lvl;
  2853. if (sharp > 0) {
  2854. limit >>= (sharp + 3) >> 2;
  2855. limit = FFMIN(limit, 9 - sharp);
  2856. }
  2857. limit = FFMAX(limit, 1);
  2858. s->filter.lim_lut[lvl] = limit;
  2859. s->filter.mblim_lut[lvl] = 2 * (lvl + 2) + limit;
  2860. }
  2861. }
  2862. if (s->pass == 2) {
  2863. s->b++;
  2864. s->block += w4 * h4 * 64;
  2865. s->uvblock[0] += w4 * h4 * 16;
  2866. s->uvblock[1] += w4 * h4 * 16;
  2867. s->eob += 4 * w4 * h4;
  2868. s->uveob[0] += w4 * h4;
  2869. s->uveob[1] += w4 * h4;
  2870. }
  2871. }
  2872. static void decode_sb(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  2873. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  2874. {
  2875. VP9Context *s = ctx->priv_data;
  2876. int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
  2877. (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
  2878. const uint8_t *p = s->keyframe ? vp9_default_kf_partition_probs[bl][c] :
  2879. s->prob.p.partition[bl][c];
  2880. enum BlockPartition bp;
  2881. ptrdiff_t hbs = 4 >> bl;
  2882. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2883. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  2884. if (bl == BL_8X8) {
  2885. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  2886. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2887. } else if (col + hbs < s->cols) { // FIXME why not <=?
  2888. if (row + hbs < s->rows) { // FIXME why not <=?
  2889. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  2890. switch (bp) {
  2891. case PARTITION_NONE:
  2892. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2893. break;
  2894. case PARTITION_H:
  2895. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2896. yoff += hbs * 8 * y_stride;
  2897. uvoff += hbs * 4 * uv_stride;
  2898. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
  2899. break;
  2900. case PARTITION_V:
  2901. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2902. yoff += hbs * 8;
  2903. uvoff += hbs * 4;
  2904. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
  2905. break;
  2906. case PARTITION_SPLIT:
  2907. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2908. decode_sb(ctx, row, col + hbs, lflvl,
  2909. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2910. yoff += hbs * 8 * y_stride;
  2911. uvoff += hbs * 4 * uv_stride;
  2912. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2913. decode_sb(ctx, row + hbs, col + hbs, lflvl,
  2914. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2915. break;
  2916. default:
  2917. av_assert0(0);
  2918. }
  2919. } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
  2920. bp = PARTITION_SPLIT;
  2921. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2922. decode_sb(ctx, row, col + hbs, lflvl,
  2923. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2924. } else {
  2925. bp = PARTITION_H;
  2926. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2927. }
  2928. } else if (row + hbs < s->rows) { // FIXME why not <=?
  2929. if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
  2930. bp = PARTITION_SPLIT;
  2931. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2932. yoff += hbs * 8 * y_stride;
  2933. uvoff += hbs * 4 * uv_stride;
  2934. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2935. } else {
  2936. bp = PARTITION_V;
  2937. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2938. }
  2939. } else {
  2940. bp = PARTITION_SPLIT;
  2941. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2942. }
  2943. s->counts.partition[bl][c][bp]++;
  2944. }
  2945. static void decode_sb_mem(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  2946. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  2947. {
  2948. VP9Context *s = ctx->priv_data;
  2949. VP9Block *b = s->b;
  2950. ptrdiff_t hbs = 4 >> bl;
  2951. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2952. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  2953. if (bl == BL_8X8) {
  2954. av_assert2(b->bl == BL_8X8);
  2955. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2956. } else if (s->b->bl == bl) {
  2957. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2958. if (b->bp == PARTITION_H && row + hbs < s->rows) {
  2959. yoff += hbs * 8 * y_stride;
  2960. uvoff += hbs * 4 * uv_stride;
  2961. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2962. } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
  2963. yoff += hbs * 8;
  2964. uvoff += hbs * 4;
  2965. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
  2966. }
  2967. } else {
  2968. decode_sb_mem(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2969. if (col + hbs < s->cols) { // FIXME why not <=?
  2970. if (row + hbs < s->rows) {
  2971. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff + 8 * hbs,
  2972. uvoff + 4 * hbs, bl + 1);
  2973. yoff += hbs * 8 * y_stride;
  2974. uvoff += hbs * 4 * uv_stride;
  2975. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2976. decode_sb_mem(ctx, row + hbs, col + hbs, lflvl,
  2977. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2978. } else {
  2979. yoff += hbs * 8;
  2980. uvoff += hbs * 4;
  2981. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
  2982. }
  2983. } else if (row + hbs < s->rows) {
  2984. yoff += hbs * 8 * y_stride;
  2985. uvoff += hbs * 4 * uv_stride;
  2986. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2987. }
  2988. }
  2989. }
  2990. static void loopfilter_sb(AVCodecContext *ctx, struct VP9Filter *lflvl,
  2991. int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
  2992. {
  2993. VP9Context *s = ctx->priv_data;
  2994. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2995. uint8_t *dst = f->data[0] + yoff, *lvl = lflvl->level;
  2996. ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1];
  2997. int y, x, p;
  2998. // FIXME in how far can we interleave the v/h loopfilter calls? E.g.
  2999. // if you think of them as acting on a 8x8 block max, we can interleave
  3000. // each v/h within the single x loop, but that only works if we work on
  3001. // 8 pixel blocks, and we won't always do that (we want at least 16px
  3002. // to use SSE2 optimizations, perhaps 32 for AVX2)
  3003. // filter edges between columns, Y plane (e.g. block1 | block2)
  3004. for (y = 0; y < 8; y += 2, dst += 16 * ls_y, lvl += 16) {
  3005. uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[0][0][y];
  3006. uint8_t *hmask2 = lflvl->mask[0][0][y + 1];
  3007. unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2], hm13 = hmask1[3];
  3008. unsigned hm2 = hmask2[1] | hmask2[2], hm23 = hmask2[3];
  3009. unsigned hm = hm1 | hm2 | hm13 | hm23;
  3010. for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 8, l++) {
  3011. if (hm1 & x) {
  3012. int L = *l, H = L >> 4;
  3013. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3014. if (col || x > 1) {
  3015. if (hmask1[0] & x) {
  3016. if (hmask2[0] & x) {
  3017. av_assert2(l[8] == L);
  3018. s->dsp.loop_filter_16[0](ptr, ls_y, E, I, H);
  3019. } else {
  3020. s->dsp.loop_filter_8[2][0](ptr, ls_y, E, I, H);
  3021. }
  3022. } else if (hm2 & x) {
  3023. L = l[8];
  3024. H |= (L >> 4) << 8;
  3025. E |= s->filter.mblim_lut[L] << 8;
  3026. I |= s->filter.lim_lut[L] << 8;
  3027. s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
  3028. [!!(hmask2[1] & x)]
  3029. [0](ptr, ls_y, E, I, H);
  3030. } else {
  3031. s->dsp.loop_filter_8[!!(hmask1[1] & x)]
  3032. [0](ptr, ls_y, E, I, H);
  3033. }
  3034. }
  3035. } else if (hm2 & x) {
  3036. int L = l[8], H = L >> 4;
  3037. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3038. if (col || x > 1) {
  3039. s->dsp.loop_filter_8[!!(hmask2[1] & x)]
  3040. [0](ptr + 8 * ls_y, ls_y, E, I, H);
  3041. }
  3042. }
  3043. if (hm13 & x) {
  3044. int L = *l, H = L >> 4;
  3045. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3046. if (hm23 & x) {
  3047. L = l[8];
  3048. H |= (L >> 4) << 8;
  3049. E |= s->filter.mblim_lut[L] << 8;
  3050. I |= s->filter.lim_lut[L] << 8;
  3051. s->dsp.loop_filter_mix2[0][0][0](ptr + 4, ls_y, E, I, H);
  3052. } else {
  3053. s->dsp.loop_filter_8[0][0](ptr + 4, ls_y, E, I, H);
  3054. }
  3055. } else if (hm23 & x) {
  3056. int L = l[8], H = L >> 4;
  3057. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3058. s->dsp.loop_filter_8[0][0](ptr + 8 * ls_y + 4, ls_y, E, I, H);
  3059. }
  3060. }
  3061. }
  3062. // block1
  3063. // filter edges between rows, Y plane (e.g. ------)
  3064. // block2
  3065. dst = f->data[0] + yoff;
  3066. lvl = lflvl->level;
  3067. for (y = 0; y < 8; y++, dst += 8 * ls_y, lvl += 8) {
  3068. uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[0][1][y];
  3069. unsigned vm = vmask[0] | vmask[1] | vmask[2], vm3 = vmask[3];
  3070. for (x = 1; vm & ~(x - 1); x <<= 2, ptr += 16, l += 2) {
  3071. if (row || y) {
  3072. if (vm & x) {
  3073. int L = *l, H = L >> 4;
  3074. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3075. if (vmask[0] & x) {
  3076. if (vmask[0] & (x << 1)) {
  3077. av_assert2(l[1] == L);
  3078. s->dsp.loop_filter_16[1](ptr, ls_y, E, I, H);
  3079. } else {
  3080. s->dsp.loop_filter_8[2][1](ptr, ls_y, E, I, H);
  3081. }
  3082. } else if (vm & (x << 1)) {
  3083. L = l[1];
  3084. H |= (L >> 4) << 8;
  3085. E |= s->filter.mblim_lut[L] << 8;
  3086. I |= s->filter.lim_lut[L] << 8;
  3087. s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
  3088. [!!(vmask[1] & (x << 1))]
  3089. [1](ptr, ls_y, E, I, H);
  3090. } else {
  3091. s->dsp.loop_filter_8[!!(vmask[1] & x)]
  3092. [1](ptr, ls_y, E, I, H);
  3093. }
  3094. } else if (vm & (x << 1)) {
  3095. int L = l[1], H = L >> 4;
  3096. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3097. s->dsp.loop_filter_8[!!(vmask[1] & (x << 1))]
  3098. [1](ptr + 8, ls_y, E, I, H);
  3099. }
  3100. }
  3101. if (vm3 & x) {
  3102. int L = *l, H = L >> 4;
  3103. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3104. if (vm3 & (x << 1)) {
  3105. L = l[1];
  3106. H |= (L >> 4) << 8;
  3107. E |= s->filter.mblim_lut[L] << 8;
  3108. I |= s->filter.lim_lut[L] << 8;
  3109. s->dsp.loop_filter_mix2[0][0][1](ptr + ls_y * 4, ls_y, E, I, H);
  3110. } else {
  3111. s->dsp.loop_filter_8[0][1](ptr + ls_y * 4, ls_y, E, I, H);
  3112. }
  3113. } else if (vm3 & (x << 1)) {
  3114. int L = l[1], H = L >> 4;
  3115. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3116. s->dsp.loop_filter_8[0][1](ptr + ls_y * 4 + 8, ls_y, E, I, H);
  3117. }
  3118. }
  3119. }
  3120. // same principle but for U/V planes
  3121. for (p = 0; p < 2; p++) {
  3122. lvl = lflvl->level;
  3123. dst = f->data[1 + p] + uvoff;
  3124. for (y = 0; y < 8; y += 4, dst += 16 * ls_uv, lvl += 32) {
  3125. uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[1][0][y];
  3126. uint8_t *hmask2 = lflvl->mask[1][0][y + 2];
  3127. unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2];
  3128. unsigned hm2 = hmask2[1] | hmask2[2], hm = hm1 | hm2;
  3129. for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 4) {
  3130. if (col || x > 1) {
  3131. if (hm1 & x) {
  3132. int L = *l, H = L >> 4;
  3133. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3134. if (hmask1[0] & x) {
  3135. if (hmask2[0] & x) {
  3136. av_assert2(l[16] == L);
  3137. s->dsp.loop_filter_16[0](ptr, ls_uv, E, I, H);
  3138. } else {
  3139. s->dsp.loop_filter_8[2][0](ptr, ls_uv, E, I, H);
  3140. }
  3141. } else if (hm2 & x) {
  3142. L = l[16];
  3143. H |= (L >> 4) << 8;
  3144. E |= s->filter.mblim_lut[L] << 8;
  3145. I |= s->filter.lim_lut[L] << 8;
  3146. s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
  3147. [!!(hmask2[1] & x)]
  3148. [0](ptr, ls_uv, E, I, H);
  3149. } else {
  3150. s->dsp.loop_filter_8[!!(hmask1[1] & x)]
  3151. [0](ptr, ls_uv, E, I, H);
  3152. }
  3153. } else if (hm2 & x) {
  3154. int L = l[16], H = L >> 4;
  3155. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3156. s->dsp.loop_filter_8[!!(hmask2[1] & x)]
  3157. [0](ptr + 8 * ls_uv, ls_uv, E, I, H);
  3158. }
  3159. }
  3160. if (x & 0xAA)
  3161. l += 2;
  3162. }
  3163. }
  3164. lvl = lflvl->level;
  3165. dst = f->data[1 + p] + uvoff;
  3166. for (y = 0; y < 8; y++, dst += 4 * ls_uv) {
  3167. uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[1][1][y];
  3168. unsigned vm = vmask[0] | vmask[1] | vmask[2];
  3169. for (x = 1; vm & ~(x - 1); x <<= 4, ptr += 16, l += 4) {
  3170. if (row || y) {
  3171. if (vm & x) {
  3172. int L = *l, H = L >> 4;
  3173. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3174. if (vmask[0] & x) {
  3175. if (vmask[0] & (x << 2)) {
  3176. av_assert2(l[2] == L);
  3177. s->dsp.loop_filter_16[1](ptr, ls_uv, E, I, H);
  3178. } else {
  3179. s->dsp.loop_filter_8[2][1](ptr, ls_uv, E, I, H);
  3180. }
  3181. } else if (vm & (x << 2)) {
  3182. L = l[2];
  3183. H |= (L >> 4) << 8;
  3184. E |= s->filter.mblim_lut[L] << 8;
  3185. I |= s->filter.lim_lut[L] << 8;
  3186. s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
  3187. [!!(vmask[1] & (x << 2))]
  3188. [1](ptr, ls_uv, E, I, H);
  3189. } else {
  3190. s->dsp.loop_filter_8[!!(vmask[1] & x)]
  3191. [1](ptr, ls_uv, E, I, H);
  3192. }
  3193. } else if (vm & (x << 2)) {
  3194. int L = l[2], H = L >> 4;
  3195. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3196. s->dsp.loop_filter_8[!!(vmask[1] & (x << 2))]
  3197. [1](ptr + 8, ls_uv, E, I, H);
  3198. }
  3199. }
  3200. }
  3201. if (y & 1)
  3202. lvl += 16;
  3203. }
  3204. }
  3205. }
  3206. static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
  3207. {
  3208. int sb_start = ( idx * n) >> log2_n;
  3209. int sb_end = ((idx + 1) * n) >> log2_n;
  3210. *start = FFMIN(sb_start, n) << 3;
  3211. *end = FFMIN(sb_end, n) << 3;
  3212. }
  3213. static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
  3214. int max_count, int update_factor)
  3215. {
  3216. unsigned ct = ct0 + ct1, p2, p1;
  3217. if (!ct)
  3218. return;
  3219. p1 = *p;
  3220. p2 = ((ct0 << 8) + (ct >> 1)) / ct;
  3221. p2 = av_clip(p2, 1, 255);
  3222. ct = FFMIN(ct, max_count);
  3223. update_factor = FASTDIV(update_factor * ct, max_count);
  3224. // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
  3225. *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
  3226. }
  3227. static void adapt_probs(VP9Context *s)
  3228. {
  3229. int i, j, k, l, m;
  3230. prob_context *p = &s->prob_ctx[s->framectxid].p;
  3231. int uf = (s->keyframe || s->intraonly || !s->last_keyframe) ? 112 : 128;
  3232. // coefficients
  3233. for (i = 0; i < 4; i++)
  3234. for (j = 0; j < 2; j++)
  3235. for (k = 0; k < 2; k++)
  3236. for (l = 0; l < 6; l++)
  3237. for (m = 0; m < 6; m++) {
  3238. uint8_t *pp = s->prob_ctx[s->framectxid].coef[i][j][k][l][m];
  3239. unsigned *e = s->counts.eob[i][j][k][l][m];
  3240. unsigned *c = s->counts.coef[i][j][k][l][m];
  3241. if (l == 0 && m >= 3) // dc only has 3 pt
  3242. break;
  3243. adapt_prob(&pp[0], e[0], e[1], 24, uf);
  3244. adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
  3245. adapt_prob(&pp[2], c[1], c[2], 24, uf);
  3246. }
  3247. if (s->keyframe || s->intraonly) {
  3248. memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
  3249. memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
  3250. memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
  3251. memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
  3252. return;
  3253. }
  3254. // skip flag
  3255. for (i = 0; i < 3; i++)
  3256. adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
  3257. // intra/inter flag
  3258. for (i = 0; i < 4; i++)
  3259. adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
  3260. // comppred flag
  3261. if (s->comppredmode == PRED_SWITCHABLE) {
  3262. for (i = 0; i < 5; i++)
  3263. adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
  3264. }
  3265. // reference frames
  3266. if (s->comppredmode != PRED_SINGLEREF) {
  3267. for (i = 0; i < 5; i++)
  3268. adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
  3269. s->counts.comp_ref[i][1], 20, 128);
  3270. }
  3271. if (s->comppredmode != PRED_COMPREF) {
  3272. for (i = 0; i < 5; i++) {
  3273. uint8_t *pp = p->single_ref[i];
  3274. unsigned (*c)[2] = s->counts.single_ref[i];
  3275. adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
  3276. adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
  3277. }
  3278. }
  3279. // block partitioning
  3280. for (i = 0; i < 4; i++)
  3281. for (j = 0; j < 4; j++) {
  3282. uint8_t *pp = p->partition[i][j];
  3283. unsigned *c = s->counts.partition[i][j];
  3284. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3285. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3286. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3287. }
  3288. // tx size
  3289. if (s->txfmmode == TX_SWITCHABLE) {
  3290. for (i = 0; i < 2; i++) {
  3291. unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
  3292. adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
  3293. adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
  3294. adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
  3295. adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
  3296. adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
  3297. adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
  3298. }
  3299. }
  3300. // interpolation filter
  3301. if (s->filtermode == FILTER_SWITCHABLE) {
  3302. for (i = 0; i < 4; i++) {
  3303. uint8_t *pp = p->filter[i];
  3304. unsigned *c = s->counts.filter[i];
  3305. adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
  3306. adapt_prob(&pp[1], c[1], c[2], 20, 128);
  3307. }
  3308. }
  3309. // inter modes
  3310. for (i = 0; i < 7; i++) {
  3311. uint8_t *pp = p->mv_mode[i];
  3312. unsigned *c = s->counts.mv_mode[i];
  3313. adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
  3314. adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
  3315. adapt_prob(&pp[2], c[1], c[3], 20, 128);
  3316. }
  3317. // mv joints
  3318. {
  3319. uint8_t *pp = p->mv_joint;
  3320. unsigned *c = s->counts.mv_joint;
  3321. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3322. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3323. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3324. }
  3325. // mv components
  3326. for (i = 0; i < 2; i++) {
  3327. uint8_t *pp;
  3328. unsigned *c, (*c2)[2], sum;
  3329. adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
  3330. s->counts.mv_comp[i].sign[1], 20, 128);
  3331. pp = p->mv_comp[i].classes;
  3332. c = s->counts.mv_comp[i].classes;
  3333. sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
  3334. adapt_prob(&pp[0], c[0], sum, 20, 128);
  3335. sum -= c[1];
  3336. adapt_prob(&pp[1], c[1], sum, 20, 128);
  3337. sum -= c[2] + c[3];
  3338. adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
  3339. adapt_prob(&pp[3], c[2], c[3], 20, 128);
  3340. sum -= c[4] + c[5];
  3341. adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
  3342. adapt_prob(&pp[5], c[4], c[5], 20, 128);
  3343. sum -= c[6];
  3344. adapt_prob(&pp[6], c[6], sum, 20, 128);
  3345. adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
  3346. adapt_prob(&pp[8], c[7], c[8], 20, 128);
  3347. adapt_prob(&pp[9], c[9], c[10], 20, 128);
  3348. adapt_prob(&p->mv_comp[i].class0, s->counts.mv_comp[i].class0[0],
  3349. s->counts.mv_comp[i].class0[1], 20, 128);
  3350. pp = p->mv_comp[i].bits;
  3351. c2 = s->counts.mv_comp[i].bits;
  3352. for (j = 0; j < 10; j++)
  3353. adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
  3354. for (j = 0; j < 2; j++) {
  3355. pp = p->mv_comp[i].class0_fp[j];
  3356. c = s->counts.mv_comp[i].class0_fp[j];
  3357. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3358. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3359. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3360. }
  3361. pp = p->mv_comp[i].fp;
  3362. c = s->counts.mv_comp[i].fp;
  3363. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3364. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3365. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3366. if (s->highprecisionmvs) {
  3367. adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0],
  3368. s->counts.mv_comp[i].class0_hp[1], 20, 128);
  3369. adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
  3370. s->counts.mv_comp[i].hp[1], 20, 128);
  3371. }
  3372. }
  3373. // y intra modes
  3374. for (i = 0; i < 4; i++) {
  3375. uint8_t *pp = p->y_mode[i];
  3376. unsigned *c = s->counts.y_mode[i], sum, s2;
  3377. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3378. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3379. sum -= c[TM_VP8_PRED];
  3380. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3381. sum -= c[VERT_PRED];
  3382. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3383. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3384. sum -= s2;
  3385. adapt_prob(&pp[3], s2, sum, 20, 128);
  3386. s2 -= c[HOR_PRED];
  3387. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3388. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3389. sum -= c[DIAG_DOWN_LEFT_PRED];
  3390. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3391. sum -= c[VERT_LEFT_PRED];
  3392. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3393. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3394. }
  3395. // uv intra modes
  3396. for (i = 0; i < 10; i++) {
  3397. uint8_t *pp = p->uv_mode[i];
  3398. unsigned *c = s->counts.uv_mode[i], sum, s2;
  3399. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3400. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3401. sum -= c[TM_VP8_PRED];
  3402. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3403. sum -= c[VERT_PRED];
  3404. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3405. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3406. sum -= s2;
  3407. adapt_prob(&pp[3], s2, sum, 20, 128);
  3408. s2 -= c[HOR_PRED];
  3409. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3410. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3411. sum -= c[DIAG_DOWN_LEFT_PRED];
  3412. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3413. sum -= c[VERT_LEFT_PRED];
  3414. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3415. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3416. }
  3417. }
  3418. static void free_buffers(VP9Context *s)
  3419. {
  3420. av_freep(&s->intra_pred_data[0]);
  3421. av_freep(&s->b_base);
  3422. av_freep(&s->block_base);
  3423. }
  3424. static av_cold int vp9_decode_free(AVCodecContext *ctx)
  3425. {
  3426. VP9Context *s = ctx->priv_data;
  3427. int i;
  3428. for (i = 0; i < 3; i++) {
  3429. if (s->frames[i].tf.f->data[0])
  3430. vp9_unref_frame(ctx, &s->frames[i]);
  3431. av_frame_free(&s->frames[i].tf.f);
  3432. }
  3433. for (i = 0; i < 8; i++) {
  3434. if (s->refs[i].f->data[0])
  3435. ff_thread_release_buffer(ctx, &s->refs[i]);
  3436. av_frame_free(&s->refs[i].f);
  3437. if (s->next_refs[i].f->data[0])
  3438. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3439. av_frame_free(&s->next_refs[i].f);
  3440. }
  3441. free_buffers(s);
  3442. av_freep(&s->c_b);
  3443. s->c_b_size = 0;
  3444. return 0;
  3445. }
  3446. static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
  3447. int *got_frame, AVPacket *pkt)
  3448. {
  3449. const uint8_t *data = pkt->data;
  3450. int size = pkt->size;
  3451. VP9Context *s = ctx->priv_data;
  3452. int res, tile_row, tile_col, i, ref, row, col;
  3453. int retain_segmap_ref = s->segmentation.enabled && !s->segmentation.update_map;
  3454. ptrdiff_t yoff, uvoff, ls_y, ls_uv;
  3455. AVFrame *f;
  3456. if ((res = decode_frame_header(ctx, data, size, &ref)) < 0) {
  3457. return res;
  3458. } else if (res == 0) {
  3459. if (!s->refs[ref].f->data[0]) {
  3460. av_log(ctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
  3461. return AVERROR_INVALIDDATA;
  3462. }
  3463. if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0)
  3464. return res;
  3465. *got_frame = 1;
  3466. return pkt->size;
  3467. }
  3468. data += res;
  3469. size -= res;
  3470. if (!retain_segmap_ref) {
  3471. if (s->frames[REF_FRAME_SEGMAP].tf.f->data[0])
  3472. vp9_unref_frame(ctx, &s->frames[REF_FRAME_SEGMAP]);
  3473. if (!s->keyframe && !s->intraonly && !s->errorres && s->frames[CUR_FRAME].tf.f->data[0] &&
  3474. (res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_SEGMAP], &s->frames[CUR_FRAME])) < 0)
  3475. return res;
  3476. }
  3477. if (s->frames[REF_FRAME_MVPAIR].tf.f->data[0])
  3478. vp9_unref_frame(ctx, &s->frames[REF_FRAME_MVPAIR]);
  3479. if (!s->intraonly && !s->keyframe && !s->errorres && s->frames[CUR_FRAME].tf.f->data[0] &&
  3480. (res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_MVPAIR], &s->frames[CUR_FRAME])) < 0)
  3481. return res;
  3482. if (s->frames[CUR_FRAME].tf.f->data[0])
  3483. vp9_unref_frame(ctx, &s->frames[CUR_FRAME]);
  3484. if ((res = vp9_alloc_frame(ctx, &s->frames[CUR_FRAME])) < 0)
  3485. return res;
  3486. f = s->frames[CUR_FRAME].tf.f;
  3487. f->key_frame = s->keyframe;
  3488. f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  3489. ls_y = f->linesize[0];
  3490. ls_uv =f->linesize[1];
  3491. // ref frame setup
  3492. for (i = 0; i < 8; i++) {
  3493. if (s->next_refs[i].f->data[0])
  3494. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3495. if (s->refreshrefmask & (1 << i)) {
  3496. res = ff_thread_ref_frame(&s->next_refs[i], &s->frames[CUR_FRAME].tf);
  3497. } else {
  3498. res = ff_thread_ref_frame(&s->next_refs[i], &s->refs[i]);
  3499. }
  3500. if (res < 0)
  3501. return res;
  3502. }
  3503. if (s->fullrange)
  3504. ctx->color_range = AVCOL_RANGE_JPEG;
  3505. else
  3506. ctx->color_range = AVCOL_RANGE_MPEG;
  3507. switch (s->colorspace) {
  3508. case 1: ctx->colorspace = AVCOL_SPC_BT470BG; break;
  3509. case 2: ctx->colorspace = AVCOL_SPC_BT709; break;
  3510. case 3: ctx->colorspace = AVCOL_SPC_SMPTE170M; break;
  3511. case 4: ctx->colorspace = AVCOL_SPC_SMPTE240M; break;
  3512. }
  3513. // main tile decode loop
  3514. memset(s->above_partition_ctx, 0, s->cols);
  3515. memset(s->above_skip_ctx, 0, s->cols);
  3516. if (s->keyframe || s->intraonly) {
  3517. memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
  3518. } else {
  3519. memset(s->above_mode_ctx, NEARESTMV, s->cols);
  3520. }
  3521. memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
  3522. memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 8);
  3523. memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 8);
  3524. memset(s->above_segpred_ctx, 0, s->cols);
  3525. s->pass = s->frames[CUR_FRAME].uses_2pass =
  3526. ctx->active_thread_type == FF_THREAD_FRAME && s->refreshctx && !s->parallelmode;
  3527. if ((res = update_block_buffers(ctx)) < 0) {
  3528. av_log(ctx, AV_LOG_ERROR,
  3529. "Failed to allocate block buffers\n");
  3530. return res;
  3531. }
  3532. if (s->refreshctx && s->parallelmode) {
  3533. int j, k, l, m;
  3534. for (i = 0; i < 4; i++) {
  3535. for (j = 0; j < 2; j++)
  3536. for (k = 0; k < 2; k++)
  3537. for (l = 0; l < 6; l++)
  3538. for (m = 0; m < 6; m++)
  3539. memcpy(s->prob_ctx[s->framectxid].coef[i][j][k][l][m],
  3540. s->prob.coef[i][j][k][l][m], 3);
  3541. if (s->txfmmode == i)
  3542. break;
  3543. }
  3544. s->prob_ctx[s->framectxid].p = s->prob.p;
  3545. ff_thread_finish_setup(ctx);
  3546. } else if (!s->refreshctx) {
  3547. ff_thread_finish_setup(ctx);
  3548. }
  3549. do {
  3550. yoff = uvoff = 0;
  3551. s->b = s->b_base;
  3552. s->block = s->block_base;
  3553. s->uvblock[0] = s->uvblock_base[0];
  3554. s->uvblock[1] = s->uvblock_base[1];
  3555. s->eob = s->eob_base;
  3556. s->uveob[0] = s->uveob_base[0];
  3557. s->uveob[1] = s->uveob_base[1];
  3558. for (tile_row = 0; tile_row < s->tiling.tile_rows; tile_row++) {
  3559. set_tile_offset(&s->tiling.tile_row_start, &s->tiling.tile_row_end,
  3560. tile_row, s->tiling.log2_tile_rows, s->sb_rows);
  3561. if (s->pass != 2) {
  3562. for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
  3563. unsigned tile_size;
  3564. if (tile_col == s->tiling.tile_cols - 1 &&
  3565. tile_row == s->tiling.tile_rows - 1) {
  3566. tile_size = size;
  3567. } else {
  3568. tile_size = AV_RB32(data);
  3569. data += 4;
  3570. size -= 4;
  3571. }
  3572. if (tile_size > size) {
  3573. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3574. return AVERROR_INVALIDDATA;
  3575. }
  3576. ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
  3577. if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit
  3578. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3579. return AVERROR_INVALIDDATA;
  3580. }
  3581. data += tile_size;
  3582. size -= tile_size;
  3583. }
  3584. }
  3585. for (row = s->tiling.tile_row_start; row < s->tiling.tile_row_end;
  3586. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 32) {
  3587. struct VP9Filter *lflvl_ptr = s->lflvl;
  3588. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  3589. for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
  3590. set_tile_offset(&s->tiling.tile_col_start, &s->tiling.tile_col_end,
  3591. tile_col, s->tiling.log2_tile_cols, s->sb_cols);
  3592. if (s->pass != 2) {
  3593. memset(s->left_partition_ctx, 0, 8);
  3594. memset(s->left_skip_ctx, 0, 8);
  3595. if (s->keyframe || s->intraonly) {
  3596. memset(s->left_mode_ctx, DC_PRED, 16);
  3597. } else {
  3598. memset(s->left_mode_ctx, NEARESTMV, 8);
  3599. }
  3600. memset(s->left_y_nnz_ctx, 0, 16);
  3601. memset(s->left_uv_nnz_ctx, 0, 16);
  3602. memset(s->left_segpred_ctx, 0, 8);
  3603. memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
  3604. }
  3605. for (col = s->tiling.tile_col_start;
  3606. col < s->tiling.tile_col_end;
  3607. col += 8, yoff2 += 64, uvoff2 += 32, lflvl_ptr++) {
  3608. // FIXME integrate with lf code (i.e. zero after each
  3609. // use, similar to invtxfm coefficients, or similar)
  3610. if (s->pass != 1) {
  3611. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  3612. }
  3613. if (s->pass == 2) {
  3614. decode_sb_mem(ctx, row, col, lflvl_ptr,
  3615. yoff2, uvoff2, BL_64X64);
  3616. } else {
  3617. decode_sb(ctx, row, col, lflvl_ptr,
  3618. yoff2, uvoff2, BL_64X64);
  3619. }
  3620. }
  3621. if (s->pass != 2) {
  3622. memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
  3623. }
  3624. }
  3625. if (s->pass == 1) {
  3626. continue;
  3627. }
  3628. // backup pre-loopfilter reconstruction data for intra
  3629. // prediction of next row of sb64s
  3630. if (row + 8 < s->rows) {
  3631. memcpy(s->intra_pred_data[0],
  3632. f->data[0] + yoff + 63 * ls_y,
  3633. 8 * s->cols);
  3634. memcpy(s->intra_pred_data[1],
  3635. f->data[1] + uvoff + 31 * ls_uv,
  3636. 4 * s->cols);
  3637. memcpy(s->intra_pred_data[2],
  3638. f->data[2] + uvoff + 31 * ls_uv,
  3639. 4 * s->cols);
  3640. }
  3641. // loopfilter one row
  3642. if (s->filter.level) {
  3643. yoff2 = yoff;
  3644. uvoff2 = uvoff;
  3645. lflvl_ptr = s->lflvl;
  3646. for (col = 0; col < s->cols;
  3647. col += 8, yoff2 += 64, uvoff2 += 32, lflvl_ptr++) {
  3648. loopfilter_sb(ctx, lflvl_ptr, row, col, yoff2, uvoff2);
  3649. }
  3650. }
  3651. // FIXME maybe we can make this more finegrained by running the
  3652. // loopfilter per-block instead of after each sbrow
  3653. // In fact that would also make intra pred left preparation easier?
  3654. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, row >> 3, 0);
  3655. }
  3656. }
  3657. if (s->pass < 2 && s->refreshctx && !s->parallelmode) {
  3658. adapt_probs(s);
  3659. ff_thread_finish_setup(ctx);
  3660. }
  3661. } while (s->pass++ == 1);
  3662. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3663. // ref frame setup
  3664. for (i = 0; i < 8; i++) {
  3665. if (s->refs[i].f->data[0])
  3666. ff_thread_release_buffer(ctx, &s->refs[i]);
  3667. ff_thread_ref_frame(&s->refs[i], &s->next_refs[i]);
  3668. }
  3669. if (!s->invisible) {
  3670. if ((res = av_frame_ref(frame, s->frames[CUR_FRAME].tf.f)) < 0)
  3671. return res;
  3672. *got_frame = 1;
  3673. }
  3674. return pkt->size;
  3675. }
  3676. static void vp9_decode_flush(AVCodecContext *ctx)
  3677. {
  3678. VP9Context *s = ctx->priv_data;
  3679. int i;
  3680. for (i = 0; i < 3; i++)
  3681. vp9_unref_frame(ctx, &s->frames[i]);
  3682. for (i = 0; i < 8; i++)
  3683. ff_thread_release_buffer(ctx, &s->refs[i]);
  3684. }
  3685. static int init_frames(AVCodecContext *ctx)
  3686. {
  3687. VP9Context *s = ctx->priv_data;
  3688. int i;
  3689. for (i = 0; i < 3; i++) {
  3690. s->frames[i].tf.f = av_frame_alloc();
  3691. if (!s->frames[i].tf.f) {
  3692. vp9_decode_free(ctx);
  3693. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3694. return AVERROR(ENOMEM);
  3695. }
  3696. }
  3697. for (i = 0; i < 8; i++) {
  3698. s->refs[i].f = av_frame_alloc();
  3699. s->next_refs[i].f = av_frame_alloc();
  3700. if (!s->refs[i].f || !s->next_refs[i].f) {
  3701. vp9_decode_free(ctx);
  3702. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3703. return AVERROR(ENOMEM);
  3704. }
  3705. }
  3706. return 0;
  3707. }
  3708. static av_cold int vp9_decode_init(AVCodecContext *ctx)
  3709. {
  3710. VP9Context *s = ctx->priv_data;
  3711. ctx->internal->allocate_progress = 1;
  3712. ctx->pix_fmt = AV_PIX_FMT_YUV420P;
  3713. ff_vp9dsp_init(&s->dsp);
  3714. ff_videodsp_init(&s->vdsp, 8);
  3715. s->filter.sharpness = -1;
  3716. return init_frames(ctx);
  3717. }
  3718. static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
  3719. {
  3720. return init_frames(avctx);
  3721. }
  3722. static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  3723. {
  3724. int i, res;
  3725. VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
  3726. // detect size changes in other threads
  3727. if (s->intra_pred_data[0] &&
  3728. (!ssrc->intra_pred_data[0] || s->cols != ssrc->cols || s->rows != ssrc->rows)) {
  3729. free_buffers(s);
  3730. }
  3731. for (i = 0; i < 3; i++) {
  3732. if (s->frames[i].tf.f->data[0])
  3733. vp9_unref_frame(dst, &s->frames[i]);
  3734. if (ssrc->frames[i].tf.f->data[0]) {
  3735. if ((res = vp9_ref_frame(dst, &s->frames[i], &ssrc->frames[i])) < 0)
  3736. return res;
  3737. }
  3738. }
  3739. for (i = 0; i < 8; i++) {
  3740. if (s->refs[i].f->data[0])
  3741. ff_thread_release_buffer(dst, &s->refs[i]);
  3742. if (ssrc->next_refs[i].f->data[0]) {
  3743. if ((res = ff_thread_ref_frame(&s->refs[i], &ssrc->next_refs[i])) < 0)
  3744. return res;
  3745. }
  3746. }
  3747. s->invisible = ssrc->invisible;
  3748. s->keyframe = ssrc->keyframe;
  3749. s->segmentation.enabled = ssrc->segmentation.enabled;
  3750. s->segmentation.update_map = ssrc->segmentation.update_map;
  3751. memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
  3752. memcpy(&s->lf_delta, &ssrc->lf_delta, sizeof(s->lf_delta));
  3753. if (ssrc->segmentation.enabled) {
  3754. memcpy(&s->segmentation.feat, &ssrc->segmentation.feat,
  3755. sizeof(s->segmentation.feat));
  3756. }
  3757. return 0;
  3758. }
  3759. AVCodec ff_vp9_decoder = {
  3760. .name = "vp9",
  3761. .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
  3762. .type = AVMEDIA_TYPE_VIDEO,
  3763. .id = AV_CODEC_ID_VP9,
  3764. .priv_data_size = sizeof(VP9Context),
  3765. .init = vp9_decode_init,
  3766. .close = vp9_decode_free,
  3767. .decode = vp9_decode_frame,
  3768. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  3769. .flush = vp9_decode_flush,
  3770. .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
  3771. .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
  3772. };