You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4087 lines
160KB

  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "internal.h"
  26. #include "thread.h"
  27. #include "videodsp.h"
  28. #include "vp56.h"
  29. #include "vp9.h"
  30. #include "vp9data.h"
  31. #include "vp9dsp.h"
  32. #include "libavutil/avassert.h"
  33. #define VP9_SYNCCODE 0x498342
  34. enum CompPredMode {
  35. PRED_SINGLEREF,
  36. PRED_COMPREF,
  37. PRED_SWITCHABLE,
  38. };
  39. enum BlockLevel {
  40. BL_64X64,
  41. BL_32X32,
  42. BL_16X16,
  43. BL_8X8,
  44. };
  45. enum BlockSize {
  46. BS_64x64,
  47. BS_64x32,
  48. BS_32x64,
  49. BS_32x32,
  50. BS_32x16,
  51. BS_16x32,
  52. BS_16x16,
  53. BS_16x8,
  54. BS_8x16,
  55. BS_8x8,
  56. BS_8x4,
  57. BS_4x8,
  58. BS_4x4,
  59. N_BS_SIZES,
  60. };
  61. struct VP9mvrefPair {
  62. VP56mv mv[2];
  63. int8_t ref[2];
  64. };
  65. typedef struct VP9Frame {
  66. ThreadFrame tf;
  67. AVBufferRef *extradata;
  68. uint8_t *segmentation_map;
  69. struct VP9mvrefPair *mv;
  70. } VP9Frame;
  71. struct VP9Filter {
  72. uint8_t level[8 * 8];
  73. uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
  74. [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
  75. };
  76. typedef struct VP9Block {
  77. uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
  78. enum FilterMode filter;
  79. VP56mv mv[4 /* b_idx */][2 /* ref */];
  80. enum BlockSize bs;
  81. enum TxfmMode tx, uvtx;
  82. enum BlockLevel bl;
  83. enum BlockPartition bp;
  84. } VP9Block;
  85. typedef struct VP9Context {
  86. VP9DSPContext dsp;
  87. VideoDSPContext vdsp;
  88. GetBitContext gb;
  89. VP56RangeCoder c;
  90. VP56RangeCoder *c_b;
  91. unsigned c_b_size;
  92. VP9Block *b_base, *b;
  93. int pass, uses_2pass, last_uses_2pass;
  94. int row, row7, col, col7;
  95. uint8_t *dst[3];
  96. ptrdiff_t y_stride, uv_stride;
  97. // bitstream header
  98. uint8_t profile;
  99. uint8_t keyframe, last_keyframe;
  100. uint8_t invisible;
  101. uint8_t use_last_frame_mvs;
  102. uint8_t errorres;
  103. uint8_t colorspace;
  104. uint8_t fullrange;
  105. uint8_t intraonly;
  106. uint8_t resetctx;
  107. uint8_t refreshrefmask;
  108. uint8_t highprecisionmvs;
  109. enum FilterMode filtermode;
  110. uint8_t allowcompinter;
  111. uint8_t fixcompref;
  112. uint8_t refreshctx;
  113. uint8_t parallelmode;
  114. uint8_t framectxid;
  115. uint8_t refidx[3];
  116. uint8_t signbias[3];
  117. uint8_t varcompref[2];
  118. ThreadFrame refs[8], next_refs[8];
  119. #define CUR_FRAME 0
  120. #define LAST_FRAME 1
  121. VP9Frame frames[2];
  122. struct {
  123. uint8_t level;
  124. int8_t sharpness;
  125. uint8_t lim_lut[64];
  126. uint8_t mblim_lut[64];
  127. } filter;
  128. struct {
  129. uint8_t enabled;
  130. int8_t mode[2];
  131. int8_t ref[4];
  132. } lf_delta;
  133. uint8_t yac_qi;
  134. int8_t ydc_qdelta, uvdc_qdelta, uvac_qdelta;
  135. uint8_t lossless;
  136. struct {
  137. uint8_t enabled;
  138. uint8_t temporal;
  139. uint8_t absolute_vals;
  140. uint8_t update_map;
  141. struct {
  142. uint8_t q_enabled;
  143. uint8_t lf_enabled;
  144. uint8_t ref_enabled;
  145. uint8_t skip_enabled;
  146. uint8_t ref_val;
  147. int16_t q_val;
  148. int8_t lf_val;
  149. int16_t qmul[2][2];
  150. uint8_t lflvl[4][2];
  151. } feat[8];
  152. } segmentation;
  153. struct {
  154. unsigned log2_tile_cols, log2_tile_rows;
  155. unsigned tile_cols, tile_rows;
  156. unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  157. } tiling;
  158. unsigned sb_cols, sb_rows, rows, cols;
  159. struct {
  160. prob_context p;
  161. uint8_t coef[4][2][2][6][6][3];
  162. } prob_ctx[4];
  163. struct {
  164. prob_context p;
  165. uint8_t coef[4][2][2][6][6][11];
  166. uint8_t seg[7];
  167. uint8_t segpred[3];
  168. } prob;
  169. struct {
  170. unsigned y_mode[4][10];
  171. unsigned uv_mode[10][10];
  172. unsigned filter[4][3];
  173. unsigned mv_mode[7][4];
  174. unsigned intra[4][2];
  175. unsigned comp[5][2];
  176. unsigned single_ref[5][2][2];
  177. unsigned comp_ref[5][2];
  178. unsigned tx32p[2][4];
  179. unsigned tx16p[2][3];
  180. unsigned tx8p[2][2];
  181. unsigned skip[3][2];
  182. unsigned mv_joint[4];
  183. struct {
  184. unsigned sign[2];
  185. unsigned classes[11];
  186. unsigned class0[2];
  187. unsigned bits[10][2];
  188. unsigned class0_fp[2][4];
  189. unsigned fp[4];
  190. unsigned class0_hp[2];
  191. unsigned hp[2];
  192. } mv_comp[2];
  193. unsigned partition[4][4][4];
  194. unsigned coef[4][2][2][6][6][3];
  195. unsigned eob[4][2][2][6][6][2];
  196. } counts;
  197. enum TxfmMode txfmmode;
  198. enum CompPredMode comppredmode;
  199. // contextual (left/above) cache
  200. DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
  201. DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
  202. DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
  203. DECLARE_ALIGNED(8, uint8_t, left_uv_nnz_ctx)[2][8];
  204. DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
  205. DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
  206. DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
  207. DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
  208. DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
  209. DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
  210. DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
  211. DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
  212. uint8_t *above_partition_ctx;
  213. uint8_t *above_mode_ctx;
  214. // FIXME maybe merge some of the below in a flags field?
  215. uint8_t *above_y_nnz_ctx;
  216. uint8_t *above_uv_nnz_ctx[2];
  217. uint8_t *above_skip_ctx; // 1bit
  218. uint8_t *above_txfm_ctx; // 2bit
  219. uint8_t *above_segpred_ctx; // 1bit
  220. uint8_t *above_intra_ctx; // 1bit
  221. uint8_t *above_comp_ctx; // 1bit
  222. uint8_t *above_ref_ctx; // 2bit
  223. uint8_t *above_filter_ctx;
  224. VP56mv (*above_mv_ctx)[2];
  225. // whole-frame cache
  226. uint8_t *intra_pred_data[3];
  227. struct VP9Filter *lflvl;
  228. DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[135*144];
  229. // block reconstruction intermediates
  230. int block_alloc_using_2pass;
  231. int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
  232. uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
  233. struct { int x, y; } min_mv, max_mv;
  234. DECLARE_ALIGNED(32, uint8_t, tmp_y)[64*64];
  235. DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][32*32];
  236. uint16_t mvscale[3][2];
  237. uint8_t mvstep[3][2];
  238. } VP9Context;
  239. static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
  240. {
  241. { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
  242. { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
  243. }, {
  244. { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
  245. { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
  246. }
  247. };
  248. static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
  249. {
  250. VP9Context *s = ctx->priv_data;
  251. int ret, sz;
  252. if ((ret = ff_thread_get_buffer(ctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
  253. return ret;
  254. sz = 64 * s->sb_cols * s->sb_rows;
  255. if (!(f->extradata = av_buffer_allocz(sz * (1 + sizeof(struct VP9mvrefPair))))) {
  256. ff_thread_release_buffer(ctx, &f->tf);
  257. return AVERROR(ENOMEM);
  258. }
  259. f->segmentation_map = f->extradata->data;
  260. f->mv = (struct VP9mvrefPair *) (f->extradata->data + sz);
  261. // retain segmentation map if it doesn't update
  262. if (s->segmentation.enabled && !s->segmentation.update_map &&
  263. !s->intraonly && !s->keyframe && !s->errorres &&
  264. ctx->active_thread_type != FF_THREAD_FRAME) {
  265. memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
  266. }
  267. return 0;
  268. }
  269. static void vp9_unref_frame(AVCodecContext *ctx, VP9Frame *f)
  270. {
  271. ff_thread_release_buffer(ctx, &f->tf);
  272. av_buffer_unref(&f->extradata);
  273. }
  274. static int vp9_ref_frame(AVCodecContext *ctx, VP9Frame *dst, VP9Frame *src)
  275. {
  276. int res;
  277. if ((res = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0) {
  278. return res;
  279. } else if (!(dst->extradata = av_buffer_ref(src->extradata))) {
  280. vp9_unref_frame(ctx, dst);
  281. return AVERROR(ENOMEM);
  282. }
  283. dst->segmentation_map = src->segmentation_map;
  284. dst->mv = src->mv;
  285. return 0;
  286. }
  287. static int update_size(AVCodecContext *ctx, int w, int h)
  288. {
  289. VP9Context *s = ctx->priv_data;
  290. uint8_t *p;
  291. av_assert0(w > 0 && h > 0);
  292. if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height)
  293. return 0;
  294. ctx->width = w;
  295. ctx->height = h;
  296. s->sb_cols = (w + 63) >> 6;
  297. s->sb_rows = (h + 63) >> 6;
  298. s->cols = (w + 7) >> 3;
  299. s->rows = (h + 7) >> 3;
  300. #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
  301. av_freep(&s->intra_pred_data[0]);
  302. p = av_malloc(s->sb_cols * (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
  303. if (!p)
  304. return AVERROR(ENOMEM);
  305. assign(s->intra_pred_data[0], uint8_t *, 64);
  306. assign(s->intra_pred_data[1], uint8_t *, 32);
  307. assign(s->intra_pred_data[2], uint8_t *, 32);
  308. assign(s->above_y_nnz_ctx, uint8_t *, 16);
  309. assign(s->above_mode_ctx, uint8_t *, 16);
  310. assign(s->above_mv_ctx, VP56mv(*)[2], 16);
  311. assign(s->above_partition_ctx, uint8_t *, 8);
  312. assign(s->above_skip_ctx, uint8_t *, 8);
  313. assign(s->above_txfm_ctx, uint8_t *, 8);
  314. assign(s->above_uv_nnz_ctx[0], uint8_t *, 8);
  315. assign(s->above_uv_nnz_ctx[1], uint8_t *, 8);
  316. assign(s->above_segpred_ctx, uint8_t *, 8);
  317. assign(s->above_intra_ctx, uint8_t *, 8);
  318. assign(s->above_comp_ctx, uint8_t *, 8);
  319. assign(s->above_ref_ctx, uint8_t *, 8);
  320. assign(s->above_filter_ctx, uint8_t *, 8);
  321. assign(s->lflvl, struct VP9Filter *, 1);
  322. #undef assign
  323. // these will be re-allocated a little later
  324. av_freep(&s->b_base);
  325. av_freep(&s->block_base);
  326. return 0;
  327. }
  328. static int update_block_buffers(AVCodecContext *ctx)
  329. {
  330. VP9Context *s = ctx->priv_data;
  331. if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->uses_2pass)
  332. return 0;
  333. av_free(s->b_base);
  334. av_free(s->block_base);
  335. if (s->uses_2pass) {
  336. int sbs = s->sb_cols * s->sb_rows;
  337. s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
  338. s->block_base = av_mallocz((64 * 64 + 128) * sbs * 3);
  339. if (!s->b_base || !s->block_base)
  340. return AVERROR(ENOMEM);
  341. s->uvblock_base[0] = s->block_base + sbs * 64 * 64;
  342. s->uvblock_base[1] = s->uvblock_base[0] + sbs * 32 * 32;
  343. s->eob_base = (uint8_t *) (s->uvblock_base[1] + sbs * 32 * 32);
  344. s->uveob_base[0] = s->eob_base + 256 * sbs;
  345. s->uveob_base[1] = s->uveob_base[0] + 64 * sbs;
  346. } else {
  347. s->b_base = av_malloc(sizeof(VP9Block));
  348. s->block_base = av_mallocz((64 * 64 + 128) * 3);
  349. if (!s->b_base || !s->block_base)
  350. return AVERROR(ENOMEM);
  351. s->uvblock_base[0] = s->block_base + 64 * 64;
  352. s->uvblock_base[1] = s->uvblock_base[0] + 32 * 32;
  353. s->eob_base = (uint8_t *) (s->uvblock_base[1] + 32 * 32);
  354. s->uveob_base[0] = s->eob_base + 256;
  355. s->uveob_base[1] = s->uveob_base[0] + 64;
  356. }
  357. s->block_alloc_using_2pass = s->uses_2pass;
  358. return 0;
  359. }
  360. // for some reason the sign bit is at the end, not the start, of a bit sequence
  361. static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
  362. {
  363. int v = get_bits(gb, n);
  364. return get_bits1(gb) ? -v : v;
  365. }
  366. static av_always_inline int inv_recenter_nonneg(int v, int m)
  367. {
  368. return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
  369. }
  370. // differential forward probability updates
  371. static int update_prob(VP56RangeCoder *c, int p)
  372. {
  373. static const int inv_map_table[255] = {
  374. 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
  375. 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
  376. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
  377. 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
  378. 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
  379. 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  380. 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
  381. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
  382. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
  383. 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
  384. 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
  385. 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
  386. 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
  387. 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
  388. 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
  389. 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
  390. 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
  391. 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
  392. 252, 253, 253,
  393. };
  394. int d;
  395. /* This code is trying to do a differential probability update. For a
  396. * current probability A in the range [1, 255], the difference to a new
  397. * probability of any value can be expressed differentially as 1-A,255-A
  398. * where some part of this (absolute range) exists both in positive as
  399. * well as the negative part, whereas another part only exists in one
  400. * half. We're trying to code this shared part differentially, i.e.
  401. * times two where the value of the lowest bit specifies the sign, and
  402. * the single part is then coded on top of this. This absolute difference
  403. * then again has a value of [0,254], but a bigger value in this range
  404. * indicates that we're further away from the original value A, so we
  405. * can code this as a VLC code, since higher values are increasingly
  406. * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
  407. * updates vs. the 'fine, exact' updates further down the range, which
  408. * adds one extra dimension to this differential update model. */
  409. if (!vp8_rac_get(c)) {
  410. d = vp8_rac_get_uint(c, 4) + 0;
  411. } else if (!vp8_rac_get(c)) {
  412. d = vp8_rac_get_uint(c, 4) + 16;
  413. } else if (!vp8_rac_get(c)) {
  414. d = vp8_rac_get_uint(c, 5) + 32;
  415. } else {
  416. d = vp8_rac_get_uint(c, 7);
  417. if (d >= 65)
  418. d = (d << 1) - 65 + vp8_rac_get(c);
  419. d += 64;
  420. av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
  421. }
  422. return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
  423. 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
  424. }
  425. static int decode_frame_header(AVCodecContext *ctx,
  426. const uint8_t *data, int size, int *ref)
  427. {
  428. VP9Context *s = ctx->priv_data;
  429. int c, i, j, k, l, m, n, w, h, max, size2, res, sharp;
  430. int last_invisible;
  431. const uint8_t *data2;
  432. /* general header */
  433. if ((res = init_get_bits8(&s->gb, data, size)) < 0) {
  434. av_log(ctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
  435. return res;
  436. }
  437. if (get_bits(&s->gb, 2) != 0x2) { // frame marker
  438. av_log(ctx, AV_LOG_ERROR, "Invalid frame marker\n");
  439. return AVERROR_INVALIDDATA;
  440. }
  441. s->profile = get_bits1(&s->gb);
  442. if (get_bits1(&s->gb)) { // reserved bit
  443. av_log(ctx, AV_LOG_ERROR, "Reserved bit should be zero\n");
  444. return AVERROR_INVALIDDATA;
  445. }
  446. if (get_bits1(&s->gb)) {
  447. *ref = get_bits(&s->gb, 3);
  448. return 0;
  449. }
  450. s->last_uses_2pass = s->uses_2pass;
  451. s->last_keyframe = s->keyframe;
  452. s->keyframe = !get_bits1(&s->gb);
  453. last_invisible = s->invisible;
  454. s->invisible = !get_bits1(&s->gb);
  455. s->errorres = get_bits1(&s->gb);
  456. s->use_last_frame_mvs = !s->errorres && !last_invisible;
  457. if (s->keyframe) {
  458. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  459. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  460. return AVERROR_INVALIDDATA;
  461. }
  462. s->colorspace = get_bits(&s->gb, 3);
  463. if (s->colorspace == 7) { // RGB = profile 1
  464. av_log(ctx, AV_LOG_ERROR, "RGB not supported in profile 0\n");
  465. return AVERROR_INVALIDDATA;
  466. }
  467. s->fullrange = get_bits1(&s->gb);
  468. // for profile 1, here follows the subsampling bits
  469. s->refreshrefmask = 0xff;
  470. w = get_bits(&s->gb, 16) + 1;
  471. h = get_bits(&s->gb, 16) + 1;
  472. if (get_bits1(&s->gb)) // display size
  473. skip_bits(&s->gb, 32);
  474. } else {
  475. s->intraonly = s->invisible ? get_bits1(&s->gb) : 0;
  476. s->resetctx = s->errorres ? 0 : get_bits(&s->gb, 2);
  477. if (s->intraonly) {
  478. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  479. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  480. return AVERROR_INVALIDDATA;
  481. }
  482. s->refreshrefmask = get_bits(&s->gb, 8);
  483. w = get_bits(&s->gb, 16) + 1;
  484. h = get_bits(&s->gb, 16) + 1;
  485. if (get_bits1(&s->gb)) // display size
  486. skip_bits(&s->gb, 32);
  487. } else {
  488. s->refreshrefmask = get_bits(&s->gb, 8);
  489. s->refidx[0] = get_bits(&s->gb, 3);
  490. s->signbias[0] = get_bits1(&s->gb);
  491. s->refidx[1] = get_bits(&s->gb, 3);
  492. s->signbias[1] = get_bits1(&s->gb);
  493. s->refidx[2] = get_bits(&s->gb, 3);
  494. s->signbias[2] = get_bits1(&s->gb);
  495. if (!s->refs[s->refidx[0]].f->data[0] ||
  496. !s->refs[s->refidx[1]].f->data[0] ||
  497. !s->refs[s->refidx[2]].f->data[0]) {
  498. av_log(ctx, AV_LOG_ERROR, "Not all references are available\n");
  499. return AVERROR_INVALIDDATA;
  500. }
  501. if (get_bits1(&s->gb)) {
  502. w = s->refs[s->refidx[0]].f->width;
  503. h = s->refs[s->refidx[0]].f->height;
  504. } else if (get_bits1(&s->gb)) {
  505. w = s->refs[s->refidx[1]].f->width;
  506. h = s->refs[s->refidx[1]].f->height;
  507. } else if (get_bits1(&s->gb)) {
  508. w = s->refs[s->refidx[2]].f->width;
  509. h = s->refs[s->refidx[2]].f->height;
  510. } else {
  511. w = get_bits(&s->gb, 16) + 1;
  512. h = get_bits(&s->gb, 16) + 1;
  513. }
  514. // Note that in this code, "CUR_FRAME" is actually before we
  515. // have formally allocated a frame, and thus actually represents
  516. // the _last_ frame
  517. s->use_last_frame_mvs &= s->frames[CUR_FRAME].tf.f->width == w &&
  518. s->frames[CUR_FRAME].tf.f->height == h;
  519. if (get_bits1(&s->gb)) // display size
  520. skip_bits(&s->gb, 32);
  521. s->highprecisionmvs = get_bits1(&s->gb);
  522. s->filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
  523. get_bits(&s->gb, 2);
  524. s->allowcompinter = s->signbias[0] != s->signbias[1] ||
  525. s->signbias[0] != s->signbias[2];
  526. if (s->allowcompinter) {
  527. if (s->signbias[0] == s->signbias[1]) {
  528. s->fixcompref = 2;
  529. s->varcompref[0] = 0;
  530. s->varcompref[1] = 1;
  531. } else if (s->signbias[0] == s->signbias[2]) {
  532. s->fixcompref = 1;
  533. s->varcompref[0] = 0;
  534. s->varcompref[1] = 2;
  535. } else {
  536. s->fixcompref = 0;
  537. s->varcompref[0] = 1;
  538. s->varcompref[1] = 2;
  539. }
  540. }
  541. for (i = 0; i < 3; i++) {
  542. AVFrame *ref = s->refs[s->refidx[i]].f;
  543. int refw = ref->width, refh = ref->height;
  544. if (refw == w && refh == h) {
  545. s->mvscale[i][0] = s->mvscale[i][1] = 0;
  546. } else {
  547. if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
  548. av_log(ctx, AV_LOG_ERROR,
  549. "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
  550. refw, refh, w, h);
  551. return AVERROR_INVALIDDATA;
  552. }
  553. s->mvscale[i][0] = (refw << 14) / w;
  554. s->mvscale[i][1] = (refh << 14) / h;
  555. s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
  556. s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
  557. }
  558. }
  559. }
  560. }
  561. s->refreshctx = s->errorres ? 0 : get_bits1(&s->gb);
  562. s->parallelmode = s->errorres ? 1 : get_bits1(&s->gb);
  563. s->framectxid = c = get_bits(&s->gb, 2);
  564. /* loopfilter header data */
  565. s->filter.level = get_bits(&s->gb, 6);
  566. sharp = get_bits(&s->gb, 3);
  567. // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
  568. // the old cache values since they are still valid
  569. if (s->filter.sharpness != sharp)
  570. memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut));
  571. s->filter.sharpness = sharp;
  572. if ((s->lf_delta.enabled = get_bits1(&s->gb))) {
  573. if (get_bits1(&s->gb)) {
  574. for (i = 0; i < 4; i++)
  575. if (get_bits1(&s->gb))
  576. s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
  577. for (i = 0; i < 2; i++)
  578. if (get_bits1(&s->gb))
  579. s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
  580. }
  581. } else {
  582. memset(&s->lf_delta, 0, sizeof(s->lf_delta));
  583. }
  584. /* quantization header data */
  585. s->yac_qi = get_bits(&s->gb, 8);
  586. s->ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  587. s->uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  588. s->uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  589. s->lossless = s->yac_qi == 0 && s->ydc_qdelta == 0 &&
  590. s->uvdc_qdelta == 0 && s->uvac_qdelta == 0;
  591. /* segmentation header info */
  592. if ((s->segmentation.enabled = get_bits1(&s->gb))) {
  593. if ((s->segmentation.update_map = get_bits1(&s->gb))) {
  594. for (i = 0; i < 7; i++)
  595. s->prob.seg[i] = get_bits1(&s->gb) ?
  596. get_bits(&s->gb, 8) : 255;
  597. if ((s->segmentation.temporal = get_bits1(&s->gb))) {
  598. for (i = 0; i < 3; i++)
  599. s->prob.segpred[i] = get_bits1(&s->gb) ?
  600. get_bits(&s->gb, 8) : 255;
  601. }
  602. }
  603. if ((!s->segmentation.update_map || s->segmentation.temporal) &&
  604. (w != s->frames[CUR_FRAME].tf.f->width ||
  605. h != s->frames[CUR_FRAME].tf.f->height)) {
  606. av_log(ctx, AV_LOG_ERROR,
  607. "Reference segmap (temp=%d,update=%d) enabled on size-change!\n",
  608. s->segmentation.temporal, s->segmentation.update_map);
  609. return AVERROR_INVALIDDATA;
  610. }
  611. if (get_bits1(&s->gb)) {
  612. s->segmentation.absolute_vals = get_bits1(&s->gb);
  613. for (i = 0; i < 8; i++) {
  614. if ((s->segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
  615. s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
  616. if ((s->segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
  617. s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
  618. if ((s->segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
  619. s->segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
  620. s->segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
  621. }
  622. }
  623. } else {
  624. s->segmentation.feat[0].q_enabled = 0;
  625. s->segmentation.feat[0].lf_enabled = 0;
  626. s->segmentation.feat[0].skip_enabled = 0;
  627. s->segmentation.feat[0].ref_enabled = 0;
  628. }
  629. // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
  630. for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) {
  631. int qyac, qydc, quvac, quvdc, lflvl, sh;
  632. if (s->segmentation.feat[i].q_enabled) {
  633. if (s->segmentation.absolute_vals)
  634. qyac = s->segmentation.feat[i].q_val;
  635. else
  636. qyac = s->yac_qi + s->segmentation.feat[i].q_val;
  637. } else {
  638. qyac = s->yac_qi;
  639. }
  640. qydc = av_clip_uintp2(qyac + s->ydc_qdelta, 8);
  641. quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8);
  642. quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8);
  643. qyac = av_clip_uintp2(qyac, 8);
  644. s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[qydc];
  645. s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[qyac];
  646. s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[quvdc];
  647. s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[quvac];
  648. sh = s->filter.level >= 32;
  649. if (s->segmentation.feat[i].lf_enabled) {
  650. if (s->segmentation.absolute_vals)
  651. lflvl = s->segmentation.feat[i].lf_val;
  652. else
  653. lflvl = s->filter.level + s->segmentation.feat[i].lf_val;
  654. } else {
  655. lflvl = s->filter.level;
  656. }
  657. s->segmentation.feat[i].lflvl[0][0] =
  658. s->segmentation.feat[i].lflvl[0][1] =
  659. av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6);
  660. for (j = 1; j < 4; j++) {
  661. s->segmentation.feat[i].lflvl[j][0] =
  662. av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
  663. s->lf_delta.mode[0]) << sh), 6);
  664. s->segmentation.feat[i].lflvl[j][1] =
  665. av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
  666. s->lf_delta.mode[1]) << sh), 6);
  667. }
  668. }
  669. /* tiling info */
  670. if ((res = update_size(ctx, w, h)) < 0) {
  671. av_log(ctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d\n", w, h);
  672. return res;
  673. }
  674. for (s->tiling.log2_tile_cols = 0;
  675. (s->sb_cols >> s->tiling.log2_tile_cols) > 64;
  676. s->tiling.log2_tile_cols++) ;
  677. for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
  678. max = FFMAX(0, max - 1);
  679. while (max > s->tiling.log2_tile_cols) {
  680. if (get_bits1(&s->gb))
  681. s->tiling.log2_tile_cols++;
  682. else
  683. break;
  684. }
  685. s->tiling.log2_tile_rows = decode012(&s->gb);
  686. s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows;
  687. if (s->tiling.tile_cols != (1 << s->tiling.log2_tile_cols)) {
  688. s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols;
  689. s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
  690. sizeof(VP56RangeCoder) * s->tiling.tile_cols);
  691. if (!s->c_b) {
  692. av_log(ctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
  693. return AVERROR(ENOMEM);
  694. }
  695. }
  696. if (s->keyframe || s->errorres || s->intraonly) {
  697. s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
  698. s->prob_ctx[3].p = vp9_default_probs;
  699. memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
  700. sizeof(vp9_default_coef_probs));
  701. memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
  702. sizeof(vp9_default_coef_probs));
  703. memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
  704. sizeof(vp9_default_coef_probs));
  705. memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
  706. sizeof(vp9_default_coef_probs));
  707. }
  708. // next 16 bits is size of the rest of the header (arith-coded)
  709. size2 = get_bits(&s->gb, 16);
  710. data2 = align_get_bits(&s->gb);
  711. if (size2 > size - (data2 - data)) {
  712. av_log(ctx, AV_LOG_ERROR, "Invalid compressed header size\n");
  713. return AVERROR_INVALIDDATA;
  714. }
  715. ff_vp56_init_range_decoder(&s->c, data2, size2);
  716. if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
  717. av_log(ctx, AV_LOG_ERROR, "Marker bit was set\n");
  718. return AVERROR_INVALIDDATA;
  719. }
  720. if (s->keyframe || s->intraonly) {
  721. memset(s->counts.coef, 0, sizeof(s->counts.coef) + sizeof(s->counts.eob));
  722. } else {
  723. memset(&s->counts, 0, sizeof(s->counts));
  724. }
  725. // FIXME is it faster to not copy here, but do it down in the fw updates
  726. // as explicit copies if the fw update is missing (and skip the copy upon
  727. // fw update)?
  728. s->prob.p = s->prob_ctx[c].p;
  729. // txfm updates
  730. if (s->lossless) {
  731. s->txfmmode = TX_4X4;
  732. } else {
  733. s->txfmmode = vp8_rac_get_uint(&s->c, 2);
  734. if (s->txfmmode == 3)
  735. s->txfmmode += vp8_rac_get(&s->c);
  736. if (s->txfmmode == TX_SWITCHABLE) {
  737. for (i = 0; i < 2; i++)
  738. if (vp56_rac_get_prob_branchy(&s->c, 252))
  739. s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
  740. for (i = 0; i < 2; i++)
  741. for (j = 0; j < 2; j++)
  742. if (vp56_rac_get_prob_branchy(&s->c, 252))
  743. s->prob.p.tx16p[i][j] =
  744. update_prob(&s->c, s->prob.p.tx16p[i][j]);
  745. for (i = 0; i < 2; i++)
  746. for (j = 0; j < 3; j++)
  747. if (vp56_rac_get_prob_branchy(&s->c, 252))
  748. s->prob.p.tx32p[i][j] =
  749. update_prob(&s->c, s->prob.p.tx32p[i][j]);
  750. }
  751. }
  752. // coef updates
  753. for (i = 0; i < 4; i++) {
  754. uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
  755. if (vp8_rac_get(&s->c)) {
  756. for (j = 0; j < 2; j++)
  757. for (k = 0; k < 2; k++)
  758. for (l = 0; l < 6; l++)
  759. for (m = 0; m < 6; m++) {
  760. uint8_t *p = s->prob.coef[i][j][k][l][m];
  761. uint8_t *r = ref[j][k][l][m];
  762. if (m >= 3 && l == 0) // dc only has 3 pt
  763. break;
  764. for (n = 0; n < 3; n++) {
  765. if (vp56_rac_get_prob_branchy(&s->c, 252)) {
  766. p[n] = update_prob(&s->c, r[n]);
  767. } else {
  768. p[n] = r[n];
  769. }
  770. }
  771. p[3] = 0;
  772. }
  773. } else {
  774. for (j = 0; j < 2; j++)
  775. for (k = 0; k < 2; k++)
  776. for (l = 0; l < 6; l++)
  777. for (m = 0; m < 6; m++) {
  778. uint8_t *p = s->prob.coef[i][j][k][l][m];
  779. uint8_t *r = ref[j][k][l][m];
  780. if (m > 3 && l == 0) // dc only has 3 pt
  781. break;
  782. memcpy(p, r, 3);
  783. p[3] = 0;
  784. }
  785. }
  786. if (s->txfmmode == i)
  787. break;
  788. }
  789. // mode updates
  790. for (i = 0; i < 3; i++)
  791. if (vp56_rac_get_prob_branchy(&s->c, 252))
  792. s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
  793. if (!s->keyframe && !s->intraonly) {
  794. for (i = 0; i < 7; i++)
  795. for (j = 0; j < 3; j++)
  796. if (vp56_rac_get_prob_branchy(&s->c, 252))
  797. s->prob.p.mv_mode[i][j] =
  798. update_prob(&s->c, s->prob.p.mv_mode[i][j]);
  799. if (s->filtermode == FILTER_SWITCHABLE)
  800. for (i = 0; i < 4; i++)
  801. for (j = 0; j < 2; j++)
  802. if (vp56_rac_get_prob_branchy(&s->c, 252))
  803. s->prob.p.filter[i][j] =
  804. update_prob(&s->c, s->prob.p.filter[i][j]);
  805. for (i = 0; i < 4; i++)
  806. if (vp56_rac_get_prob_branchy(&s->c, 252))
  807. s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
  808. if (s->allowcompinter) {
  809. s->comppredmode = vp8_rac_get(&s->c);
  810. if (s->comppredmode)
  811. s->comppredmode += vp8_rac_get(&s->c);
  812. if (s->comppredmode == PRED_SWITCHABLE)
  813. for (i = 0; i < 5; i++)
  814. if (vp56_rac_get_prob_branchy(&s->c, 252))
  815. s->prob.p.comp[i] =
  816. update_prob(&s->c, s->prob.p.comp[i]);
  817. } else {
  818. s->comppredmode = PRED_SINGLEREF;
  819. }
  820. if (s->comppredmode != PRED_COMPREF) {
  821. for (i = 0; i < 5; i++) {
  822. if (vp56_rac_get_prob_branchy(&s->c, 252))
  823. s->prob.p.single_ref[i][0] =
  824. update_prob(&s->c, s->prob.p.single_ref[i][0]);
  825. if (vp56_rac_get_prob_branchy(&s->c, 252))
  826. s->prob.p.single_ref[i][1] =
  827. update_prob(&s->c, s->prob.p.single_ref[i][1]);
  828. }
  829. }
  830. if (s->comppredmode != PRED_SINGLEREF) {
  831. for (i = 0; i < 5; i++)
  832. if (vp56_rac_get_prob_branchy(&s->c, 252))
  833. s->prob.p.comp_ref[i] =
  834. update_prob(&s->c, s->prob.p.comp_ref[i]);
  835. }
  836. for (i = 0; i < 4; i++)
  837. for (j = 0; j < 9; j++)
  838. if (vp56_rac_get_prob_branchy(&s->c, 252))
  839. s->prob.p.y_mode[i][j] =
  840. update_prob(&s->c, s->prob.p.y_mode[i][j]);
  841. for (i = 0; i < 4; i++)
  842. for (j = 0; j < 4; j++)
  843. for (k = 0; k < 3; k++)
  844. if (vp56_rac_get_prob_branchy(&s->c, 252))
  845. s->prob.p.partition[3 - i][j][k] =
  846. update_prob(&s->c, s->prob.p.partition[3 - i][j][k]);
  847. // mv fields don't use the update_prob subexp model for some reason
  848. for (i = 0; i < 3; i++)
  849. if (vp56_rac_get_prob_branchy(&s->c, 252))
  850. s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  851. for (i = 0; i < 2; i++) {
  852. if (vp56_rac_get_prob_branchy(&s->c, 252))
  853. s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  854. for (j = 0; j < 10; j++)
  855. if (vp56_rac_get_prob_branchy(&s->c, 252))
  856. s->prob.p.mv_comp[i].classes[j] =
  857. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  858. if (vp56_rac_get_prob_branchy(&s->c, 252))
  859. s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  860. for (j = 0; j < 10; j++)
  861. if (vp56_rac_get_prob_branchy(&s->c, 252))
  862. s->prob.p.mv_comp[i].bits[j] =
  863. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  864. }
  865. for (i = 0; i < 2; i++) {
  866. for (j = 0; j < 2; j++)
  867. for (k = 0; k < 3; k++)
  868. if (vp56_rac_get_prob_branchy(&s->c, 252))
  869. s->prob.p.mv_comp[i].class0_fp[j][k] =
  870. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  871. for (j = 0; j < 3; j++)
  872. if (vp56_rac_get_prob_branchy(&s->c, 252))
  873. s->prob.p.mv_comp[i].fp[j] =
  874. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  875. }
  876. if (s->highprecisionmvs) {
  877. for (i = 0; i < 2; i++) {
  878. if (vp56_rac_get_prob_branchy(&s->c, 252))
  879. s->prob.p.mv_comp[i].class0_hp =
  880. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  881. if (vp56_rac_get_prob_branchy(&s->c, 252))
  882. s->prob.p.mv_comp[i].hp =
  883. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  884. }
  885. }
  886. }
  887. return (data2 - data) + size2;
  888. }
  889. static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
  890. VP9Context *s)
  891. {
  892. dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
  893. dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
  894. }
  895. static void find_ref_mvs(VP9Context *s,
  896. VP56mv *pmv, int ref, int z, int idx, int sb)
  897. {
  898. static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
  899. [BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
  900. { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }},
  901. [BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
  902. { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }},
  903. [BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
  904. { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }},
  905. [BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
  906. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  907. [BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
  908. { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  909. [BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
  910. { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }},
  911. [BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
  912. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  913. [BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
  914. { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }},
  915. [BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
  916. { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }},
  917. [BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  918. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  919. [BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  920. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  921. [BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  922. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  923. [BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  924. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  925. };
  926. VP9Block *b = s->b;
  927. int row = s->row, col = s->col, row7 = s->row7;
  928. const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
  929. #define INVALID_MV 0x80008000U
  930. uint32_t mem = INVALID_MV;
  931. int i;
  932. #define RETURN_DIRECT_MV(mv) \
  933. do { \
  934. uint32_t m = AV_RN32A(&mv); \
  935. if (!idx) { \
  936. AV_WN32A(pmv, m); \
  937. return; \
  938. } else if (mem == INVALID_MV) { \
  939. mem = m; \
  940. } else if (m != mem) { \
  941. AV_WN32A(pmv, m); \
  942. return; \
  943. } \
  944. } while (0)
  945. if (sb >= 0) {
  946. if (sb == 2 || sb == 1) {
  947. RETURN_DIRECT_MV(b->mv[0][z]);
  948. } else if (sb == 3) {
  949. RETURN_DIRECT_MV(b->mv[2][z]);
  950. RETURN_DIRECT_MV(b->mv[1][z]);
  951. RETURN_DIRECT_MV(b->mv[0][z]);
  952. }
  953. #define RETURN_MV(mv) \
  954. do { \
  955. if (sb > 0) { \
  956. VP56mv tmp; \
  957. uint32_t m; \
  958. clamp_mv(&tmp, &mv, s); \
  959. m = AV_RN32A(&tmp); \
  960. if (!idx) { \
  961. AV_WN32A(pmv, m); \
  962. return; \
  963. } else if (mem == INVALID_MV) { \
  964. mem = m; \
  965. } else if (m != mem) { \
  966. AV_WN32A(pmv, m); \
  967. return; \
  968. } \
  969. } else { \
  970. uint32_t m = AV_RN32A(&mv); \
  971. if (!idx) { \
  972. clamp_mv(pmv, &mv, s); \
  973. return; \
  974. } else if (mem == INVALID_MV) { \
  975. mem = m; \
  976. } else if (m != mem) { \
  977. clamp_mv(pmv, &mv, s); \
  978. return; \
  979. } \
  980. } \
  981. } while (0)
  982. if (row > 0) {
  983. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
  984. if (mv->ref[0] == ref) {
  985. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
  986. } else if (mv->ref[1] == ref) {
  987. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
  988. }
  989. }
  990. if (col > s->tiling.tile_col_start) {
  991. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
  992. if (mv->ref[0] == ref) {
  993. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
  994. } else if (mv->ref[1] == ref) {
  995. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
  996. }
  997. }
  998. i = 2;
  999. } else {
  1000. i = 0;
  1001. }
  1002. // previously coded MVs in this neighbourhood, using same reference frame
  1003. for (; i < 8; i++) {
  1004. int c = p[i][0] + col, r = p[i][1] + row;
  1005. if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  1006. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  1007. if (mv->ref[0] == ref) {
  1008. RETURN_MV(mv->mv[0]);
  1009. } else if (mv->ref[1] == ref) {
  1010. RETURN_MV(mv->mv[1]);
  1011. }
  1012. }
  1013. }
  1014. // MV at this position in previous frame, using same reference frame
  1015. if (s->use_last_frame_mvs) {
  1016. struct VP9mvrefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col];
  1017. if (!s->last_uses_2pass)
  1018. ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
  1019. if (mv->ref[0] == ref) {
  1020. RETURN_MV(mv->mv[0]);
  1021. } else if (mv->ref[1] == ref) {
  1022. RETURN_MV(mv->mv[1]);
  1023. }
  1024. }
  1025. #define RETURN_SCALE_MV(mv, scale) \
  1026. do { \
  1027. if (scale) { \
  1028. VP56mv mv_temp = { -mv.x, -mv.y }; \
  1029. RETURN_MV(mv_temp); \
  1030. } else { \
  1031. RETURN_MV(mv); \
  1032. } \
  1033. } while (0)
  1034. // previously coded MVs in this neighbourhood, using different reference frame
  1035. for (i = 0; i < 8; i++) {
  1036. int c = p[i][0] + col, r = p[i][1] + row;
  1037. if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  1038. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  1039. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1040. RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
  1041. }
  1042. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1043. // BUG - libvpx has this condition regardless of whether
  1044. // we used the first ref MV and pre-scaling
  1045. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1046. RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
  1047. }
  1048. }
  1049. }
  1050. // MV at this position in previous frame, using different reference frame
  1051. if (s->use_last_frame_mvs) {
  1052. struct VP9mvrefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col];
  1053. // no need to await_progress, because we already did that above
  1054. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1055. RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
  1056. }
  1057. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1058. // BUG - libvpx has this condition regardless of whether
  1059. // we used the first ref MV and pre-scaling
  1060. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1061. RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
  1062. }
  1063. }
  1064. AV_ZERO32(pmv);
  1065. #undef INVALID_MV
  1066. #undef RETURN_MV
  1067. #undef RETURN_SCALE_MV
  1068. }
  1069. static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
  1070. {
  1071. int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
  1072. int n, c = vp8_rac_get_tree(&s->c, vp9_mv_class_tree,
  1073. s->prob.p.mv_comp[idx].classes);
  1074. s->counts.mv_comp[idx].sign[sign]++;
  1075. s->counts.mv_comp[idx].classes[c]++;
  1076. if (c) {
  1077. int m;
  1078. for (n = 0, m = 0; m < c; m++) {
  1079. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
  1080. n |= bit << m;
  1081. s->counts.mv_comp[idx].bits[m][bit]++;
  1082. }
  1083. n <<= 3;
  1084. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp);
  1085. n |= bit << 1;
  1086. s->counts.mv_comp[idx].fp[bit]++;
  1087. if (hp) {
  1088. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
  1089. s->counts.mv_comp[idx].hp[bit]++;
  1090. n |= bit;
  1091. } else {
  1092. n |= 1;
  1093. // bug in libvpx - we count for bw entropy purposes even if the
  1094. // bit wasn't coded
  1095. s->counts.mv_comp[idx].hp[1]++;
  1096. }
  1097. n += 8 << c;
  1098. } else {
  1099. n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
  1100. s->counts.mv_comp[idx].class0[n]++;
  1101. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree,
  1102. s->prob.p.mv_comp[idx].class0_fp[n]);
  1103. s->counts.mv_comp[idx].class0_fp[n][bit]++;
  1104. n = (n << 3) | (bit << 1);
  1105. if (hp) {
  1106. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
  1107. s->counts.mv_comp[idx].class0_hp[bit]++;
  1108. n |= bit;
  1109. } else {
  1110. n |= 1;
  1111. // bug in libvpx - we count for bw entropy purposes even if the
  1112. // bit wasn't coded
  1113. s->counts.mv_comp[idx].class0_hp[1]++;
  1114. }
  1115. }
  1116. return sign ? -(n + 1) : (n + 1);
  1117. }
  1118. static void fill_mv(VP9Context *s,
  1119. VP56mv *mv, int mode, int sb)
  1120. {
  1121. VP9Block *b = s->b;
  1122. if (mode == ZEROMV) {
  1123. AV_ZERO64(mv);
  1124. } else {
  1125. int hp;
  1126. // FIXME cache this value and reuse for other subblocks
  1127. find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
  1128. mode == NEWMV ? -1 : sb);
  1129. // FIXME maybe move this code into find_ref_mvs()
  1130. if ((mode == NEWMV || sb == -1) &&
  1131. !(hp = s->highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
  1132. if (mv[0].y & 1) {
  1133. if (mv[0].y < 0)
  1134. mv[0].y++;
  1135. else
  1136. mv[0].y--;
  1137. }
  1138. if (mv[0].x & 1) {
  1139. if (mv[0].x < 0)
  1140. mv[0].x++;
  1141. else
  1142. mv[0].x--;
  1143. }
  1144. }
  1145. if (mode == NEWMV) {
  1146. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1147. s->prob.p.mv_joint);
  1148. s->counts.mv_joint[j]++;
  1149. if (j >= MV_JOINT_V)
  1150. mv[0].y += read_mv_component(s, 0, hp);
  1151. if (j & 1)
  1152. mv[0].x += read_mv_component(s, 1, hp);
  1153. }
  1154. if (b->comp) {
  1155. // FIXME cache this value and reuse for other subblocks
  1156. find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
  1157. mode == NEWMV ? -1 : sb);
  1158. if ((mode == NEWMV || sb == -1) &&
  1159. !(hp = s->highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
  1160. if (mv[1].y & 1) {
  1161. if (mv[1].y < 0)
  1162. mv[1].y++;
  1163. else
  1164. mv[1].y--;
  1165. }
  1166. if (mv[1].x & 1) {
  1167. if (mv[1].x < 0)
  1168. mv[1].x++;
  1169. else
  1170. mv[1].x--;
  1171. }
  1172. }
  1173. if (mode == NEWMV) {
  1174. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1175. s->prob.p.mv_joint);
  1176. s->counts.mv_joint[j]++;
  1177. if (j >= MV_JOINT_V)
  1178. mv[1].y += read_mv_component(s, 0, hp);
  1179. if (j & 1)
  1180. mv[1].x += read_mv_component(s, 1, hp);
  1181. }
  1182. }
  1183. }
  1184. }
  1185. static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
  1186. ptrdiff_t stride, int v)
  1187. {
  1188. switch (w) {
  1189. case 1:
  1190. do {
  1191. *ptr = v;
  1192. ptr += stride;
  1193. } while (--h);
  1194. break;
  1195. case 2: {
  1196. int v16 = v * 0x0101;
  1197. do {
  1198. AV_WN16A(ptr, v16);
  1199. ptr += stride;
  1200. } while (--h);
  1201. break;
  1202. }
  1203. case 4: {
  1204. uint32_t v32 = v * 0x01010101;
  1205. do {
  1206. AV_WN32A(ptr, v32);
  1207. ptr += stride;
  1208. } while (--h);
  1209. break;
  1210. }
  1211. case 8: {
  1212. #if HAVE_FAST_64BIT
  1213. uint64_t v64 = v * 0x0101010101010101ULL;
  1214. do {
  1215. AV_WN64A(ptr, v64);
  1216. ptr += stride;
  1217. } while (--h);
  1218. #else
  1219. uint32_t v32 = v * 0x01010101;
  1220. do {
  1221. AV_WN32A(ptr, v32);
  1222. AV_WN32A(ptr + 4, v32);
  1223. ptr += stride;
  1224. } while (--h);
  1225. #endif
  1226. break;
  1227. }
  1228. }
  1229. }
  1230. static void decode_mode(AVCodecContext *ctx)
  1231. {
  1232. static const uint8_t left_ctx[N_BS_SIZES] = {
  1233. 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
  1234. };
  1235. static const uint8_t above_ctx[N_BS_SIZES] = {
  1236. 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
  1237. };
  1238. static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
  1239. TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
  1240. TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
  1241. };
  1242. VP9Context *s = ctx->priv_data;
  1243. VP9Block *b = s->b;
  1244. int row = s->row, col = s->col, row7 = s->row7;
  1245. enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
  1246. int w4 = FFMIN(s->cols - col, bwh_tab[1][b->bs][0]);
  1247. int h4 = FFMIN(s->rows - row, bwh_tab[1][b->bs][1]), y;
  1248. int have_a = row > 0, have_l = col > s->tiling.tile_col_start;
  1249. int vref, filter_id;
  1250. if (!s->segmentation.enabled) {
  1251. b->seg_id = 0;
  1252. } else if (s->keyframe || s->intraonly) {
  1253. b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg);
  1254. } else if (!s->segmentation.update_map ||
  1255. (s->segmentation.temporal &&
  1256. vp56_rac_get_prob_branchy(&s->c,
  1257. s->prob.segpred[s->above_segpred_ctx[col] +
  1258. s->left_segpred_ctx[row7]]))) {
  1259. if (!s->errorres) {
  1260. int pred = 8, x;
  1261. uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map;
  1262. if (!s->last_uses_2pass)
  1263. ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
  1264. for (y = 0; y < h4; y++) {
  1265. int idx_base = (y + row) * 8 * s->sb_cols + col;
  1266. for (x = 0; x < w4; x++)
  1267. pred = FFMIN(pred, refsegmap[idx_base + x]);
  1268. if (!s->segmentation.update_map && ctx->active_thread_type == FF_THREAD_FRAME) {
  1269. // FIXME maybe retain reference to previous frame as
  1270. // segmap reference instead of copying the whole map
  1271. // into a new buffer
  1272. memcpy(&s->frames[CUR_FRAME].segmentation_map[idx_base],
  1273. &refsegmap[idx_base], w4);
  1274. }
  1275. }
  1276. av_assert1(pred < 8);
  1277. b->seg_id = pred;
  1278. } else {
  1279. b->seg_id = 0;
  1280. }
  1281. memset(&s->above_segpred_ctx[col], 1, w4);
  1282. memset(&s->left_segpred_ctx[row7], 1, h4);
  1283. } else {
  1284. b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree,
  1285. s->prob.seg);
  1286. memset(&s->above_segpred_ctx[col], 0, w4);
  1287. memset(&s->left_segpred_ctx[row7], 0, h4);
  1288. }
  1289. if (s->segmentation.enabled &&
  1290. (s->segmentation.update_map || s->keyframe || s->intraonly)) {
  1291. setctx_2d(&s->frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
  1292. w4, h4, 8 * s->sb_cols, b->seg_id);
  1293. }
  1294. b->skip = s->segmentation.enabled &&
  1295. s->segmentation.feat[b->seg_id].skip_enabled;
  1296. if (!b->skip) {
  1297. int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col];
  1298. b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]);
  1299. s->counts.skip[c][b->skip]++;
  1300. }
  1301. if (s->keyframe || s->intraonly) {
  1302. b->intra = 1;
  1303. } else if (s->segmentation.feat[b->seg_id].ref_enabled) {
  1304. b->intra = !s->segmentation.feat[b->seg_id].ref_val;
  1305. } else {
  1306. int c, bit;
  1307. if (have_a && have_l) {
  1308. c = s->above_intra_ctx[col] + s->left_intra_ctx[row7];
  1309. c += (c == 2);
  1310. } else {
  1311. c = have_a ? 2 * s->above_intra_ctx[col] :
  1312. have_l ? 2 * s->left_intra_ctx[row7] : 0;
  1313. }
  1314. bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]);
  1315. s->counts.intra[c][bit]++;
  1316. b->intra = !bit;
  1317. }
  1318. if ((b->intra || !b->skip) && s->txfmmode == TX_SWITCHABLE) {
  1319. int c;
  1320. if (have_a) {
  1321. if (have_l) {
  1322. c = (s->above_skip_ctx[col] ? max_tx :
  1323. s->above_txfm_ctx[col]) +
  1324. (s->left_skip_ctx[row7] ? max_tx :
  1325. s->left_txfm_ctx[row7]) > max_tx;
  1326. } else {
  1327. c = s->above_skip_ctx[col] ? 1 :
  1328. (s->above_txfm_ctx[col] * 2 > max_tx);
  1329. }
  1330. } else if (have_l) {
  1331. c = s->left_skip_ctx[row7] ? 1 :
  1332. (s->left_txfm_ctx[row7] * 2 > max_tx);
  1333. } else {
  1334. c = 1;
  1335. }
  1336. switch (max_tx) {
  1337. case TX_32X32:
  1338. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]);
  1339. if (b->tx) {
  1340. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]);
  1341. if (b->tx == 2)
  1342. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]);
  1343. }
  1344. s->counts.tx32p[c][b->tx]++;
  1345. break;
  1346. case TX_16X16:
  1347. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]);
  1348. if (b->tx)
  1349. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]);
  1350. s->counts.tx16p[c][b->tx]++;
  1351. break;
  1352. case TX_8X8:
  1353. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]);
  1354. s->counts.tx8p[c][b->tx]++;
  1355. break;
  1356. case TX_4X4:
  1357. b->tx = TX_4X4;
  1358. break;
  1359. }
  1360. } else {
  1361. b->tx = FFMIN(max_tx, s->txfmmode);
  1362. }
  1363. if (s->keyframe || s->intraonly) {
  1364. uint8_t *a = &s->above_mode_ctx[col * 2];
  1365. uint8_t *l = &s->left_mode_ctx[(row7) << 1];
  1366. b->comp = 0;
  1367. if (b->bs > BS_8x8) {
  1368. // FIXME the memory storage intermediates here aren't really
  1369. // necessary, they're just there to make the code slightly
  1370. // simpler for now
  1371. b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1372. vp9_default_kf_ymode_probs[a[0]][l[0]]);
  1373. if (b->bs != BS_8x4) {
  1374. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1375. vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
  1376. l[0] = a[1] = b->mode[1];
  1377. } else {
  1378. l[0] = a[1] = b->mode[1] = b->mode[0];
  1379. }
  1380. if (b->bs != BS_4x8) {
  1381. b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1382. vp9_default_kf_ymode_probs[a[0]][l[1]]);
  1383. if (b->bs != BS_8x4) {
  1384. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1385. vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
  1386. l[1] = a[1] = b->mode[3];
  1387. } else {
  1388. l[1] = a[1] = b->mode[3] = b->mode[2];
  1389. }
  1390. } else {
  1391. b->mode[2] = b->mode[0];
  1392. l[1] = a[1] = b->mode[3] = b->mode[1];
  1393. }
  1394. } else {
  1395. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1396. vp9_default_kf_ymode_probs[*a][*l]);
  1397. b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0];
  1398. // FIXME this can probably be optimized
  1399. memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
  1400. memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
  1401. }
  1402. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1403. vp9_default_kf_uvmode_probs[b->mode[3]]);
  1404. } else if (b->intra) {
  1405. b->comp = 0;
  1406. if (b->bs > BS_8x8) {
  1407. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1408. s->prob.p.y_mode[0]);
  1409. s->counts.y_mode[0][b->mode[0]]++;
  1410. if (b->bs != BS_8x4) {
  1411. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1412. s->prob.p.y_mode[0]);
  1413. s->counts.y_mode[0][b->mode[1]]++;
  1414. } else {
  1415. b->mode[1] = b->mode[0];
  1416. }
  1417. if (b->bs != BS_4x8) {
  1418. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1419. s->prob.p.y_mode[0]);
  1420. s->counts.y_mode[0][b->mode[2]]++;
  1421. if (b->bs != BS_8x4) {
  1422. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1423. s->prob.p.y_mode[0]);
  1424. s->counts.y_mode[0][b->mode[3]]++;
  1425. } else {
  1426. b->mode[3] = b->mode[2];
  1427. }
  1428. } else {
  1429. b->mode[2] = b->mode[0];
  1430. b->mode[3] = b->mode[1];
  1431. }
  1432. } else {
  1433. static const uint8_t size_group[10] = {
  1434. 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
  1435. };
  1436. int sz = size_group[b->bs];
  1437. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1438. s->prob.p.y_mode[sz]);
  1439. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1440. s->counts.y_mode[sz][b->mode[3]]++;
  1441. }
  1442. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1443. s->prob.p.uv_mode[b->mode[3]]);
  1444. s->counts.uv_mode[b->mode[3]][b->uvmode]++;
  1445. } else {
  1446. static const uint8_t inter_mode_ctx_lut[14][14] = {
  1447. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1448. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1449. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1450. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1451. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1452. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1453. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1454. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1455. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1456. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1457. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1458. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1459. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
  1460. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
  1461. };
  1462. if (s->segmentation.feat[b->seg_id].ref_enabled) {
  1463. av_assert2(s->segmentation.feat[b->seg_id].ref_val != 0);
  1464. b->comp = 0;
  1465. b->ref[0] = s->segmentation.feat[b->seg_id].ref_val - 1;
  1466. } else {
  1467. // read comp_pred flag
  1468. if (s->comppredmode != PRED_SWITCHABLE) {
  1469. b->comp = s->comppredmode == PRED_COMPREF;
  1470. } else {
  1471. int c;
  1472. // FIXME add intra as ref=0xff (or -1) to make these easier?
  1473. if (have_a) {
  1474. if (have_l) {
  1475. if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) {
  1476. c = 4;
  1477. } else if (s->above_comp_ctx[col]) {
  1478. c = 2 + (s->left_intra_ctx[row7] ||
  1479. s->left_ref_ctx[row7] == s->fixcompref);
  1480. } else if (s->left_comp_ctx[row7]) {
  1481. c = 2 + (s->above_intra_ctx[col] ||
  1482. s->above_ref_ctx[col] == s->fixcompref);
  1483. } else {
  1484. c = (!s->above_intra_ctx[col] &&
  1485. s->above_ref_ctx[col] == s->fixcompref) ^
  1486. (!s->left_intra_ctx[row7] &&
  1487. s->left_ref_ctx[row & 7] == s->fixcompref);
  1488. }
  1489. } else {
  1490. c = s->above_comp_ctx[col] ? 3 :
  1491. (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->fixcompref);
  1492. }
  1493. } else if (have_l) {
  1494. c = s->left_comp_ctx[row7] ? 3 :
  1495. (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->fixcompref);
  1496. } else {
  1497. c = 1;
  1498. }
  1499. b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]);
  1500. s->counts.comp[c][b->comp]++;
  1501. }
  1502. // read actual references
  1503. // FIXME probably cache a few variables here to prevent repetitive
  1504. // memory accesses below
  1505. if (b->comp) /* two references */ {
  1506. int fix_idx = s->signbias[s->fixcompref], var_idx = !fix_idx, c, bit;
  1507. b->ref[fix_idx] = s->fixcompref;
  1508. // FIXME can this codeblob be replaced by some sort of LUT?
  1509. if (have_a) {
  1510. if (have_l) {
  1511. if (s->above_intra_ctx[col]) {
  1512. if (s->left_intra_ctx[row7]) {
  1513. c = 2;
  1514. } else {
  1515. c = 1 + 2 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1516. }
  1517. } else if (s->left_intra_ctx[row7]) {
  1518. c = 1 + 2 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1519. } else {
  1520. int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
  1521. if (refl == refa && refa == s->varcompref[1]) {
  1522. c = 0;
  1523. } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
  1524. if ((refa == s->fixcompref && refl == s->varcompref[0]) ||
  1525. (refl == s->fixcompref && refa == s->varcompref[0])) {
  1526. c = 4;
  1527. } else {
  1528. c = (refa == refl) ? 3 : 1;
  1529. }
  1530. } else if (!s->left_comp_ctx[row7]) {
  1531. if (refa == s->varcompref[1] && refl != s->varcompref[1]) {
  1532. c = 1;
  1533. } else {
  1534. c = (refl == s->varcompref[1] &&
  1535. refa != s->varcompref[1]) ? 2 : 4;
  1536. }
  1537. } else if (!s->above_comp_ctx[col]) {
  1538. if (refl == s->varcompref[1] && refa != s->varcompref[1]) {
  1539. c = 1;
  1540. } else {
  1541. c = (refa == s->varcompref[1] &&
  1542. refl != s->varcompref[1]) ? 2 : 4;
  1543. }
  1544. } else {
  1545. c = (refl == refa) ? 4 : 2;
  1546. }
  1547. }
  1548. } else {
  1549. if (s->above_intra_ctx[col]) {
  1550. c = 2;
  1551. } else if (s->above_comp_ctx[col]) {
  1552. c = 4 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1553. } else {
  1554. c = 3 * (s->above_ref_ctx[col] != s->varcompref[1]);
  1555. }
  1556. }
  1557. } else if (have_l) {
  1558. if (s->left_intra_ctx[row7]) {
  1559. c = 2;
  1560. } else if (s->left_comp_ctx[row7]) {
  1561. c = 4 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1562. } else {
  1563. c = 3 * (s->left_ref_ctx[row7] != s->varcompref[1]);
  1564. }
  1565. } else {
  1566. c = 2;
  1567. }
  1568. bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]);
  1569. b->ref[var_idx] = s->varcompref[bit];
  1570. s->counts.comp_ref[c][bit]++;
  1571. } else /* single reference */ {
  1572. int bit, c;
  1573. if (have_a && !s->above_intra_ctx[col]) {
  1574. if (have_l && !s->left_intra_ctx[row7]) {
  1575. if (s->left_comp_ctx[row7]) {
  1576. if (s->above_comp_ctx[col]) {
  1577. c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7] ||
  1578. !s->above_ref_ctx[col]);
  1579. } else {
  1580. c = (3 * !s->above_ref_ctx[col]) +
  1581. (!s->fixcompref || !s->left_ref_ctx[row7]);
  1582. }
  1583. } else if (s->above_comp_ctx[col]) {
  1584. c = (3 * !s->left_ref_ctx[row7]) +
  1585. (!s->fixcompref || !s->above_ref_ctx[col]);
  1586. } else {
  1587. c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
  1588. }
  1589. } else if (s->above_intra_ctx[col]) {
  1590. c = 2;
  1591. } else if (s->above_comp_ctx[col]) {
  1592. c = 1 + (!s->fixcompref || !s->above_ref_ctx[col]);
  1593. } else {
  1594. c = 4 * (!s->above_ref_ctx[col]);
  1595. }
  1596. } else if (have_l && !s->left_intra_ctx[row7]) {
  1597. if (s->left_intra_ctx[row7]) {
  1598. c = 2;
  1599. } else if (s->left_comp_ctx[row7]) {
  1600. c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7]);
  1601. } else {
  1602. c = 4 * (!s->left_ref_ctx[row7]);
  1603. }
  1604. } else {
  1605. c = 2;
  1606. }
  1607. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]);
  1608. s->counts.single_ref[c][0][bit]++;
  1609. if (!bit) {
  1610. b->ref[0] = 0;
  1611. } else {
  1612. // FIXME can this codeblob be replaced by some sort of LUT?
  1613. if (have_a) {
  1614. if (have_l) {
  1615. if (s->left_intra_ctx[row7]) {
  1616. if (s->above_intra_ctx[col]) {
  1617. c = 2;
  1618. } else if (s->above_comp_ctx[col]) {
  1619. c = 1 + 2 * (s->fixcompref == 1 ||
  1620. s->above_ref_ctx[col] == 1);
  1621. } else if (!s->above_ref_ctx[col]) {
  1622. c = 3;
  1623. } else {
  1624. c = 4 * (s->above_ref_ctx[col] == 1);
  1625. }
  1626. } else if (s->above_intra_ctx[col]) {
  1627. if (s->left_intra_ctx[row7]) {
  1628. c = 2;
  1629. } else if (s->left_comp_ctx[row7]) {
  1630. c = 1 + 2 * (s->fixcompref == 1 ||
  1631. s->left_ref_ctx[row7] == 1);
  1632. } else if (!s->left_ref_ctx[row7]) {
  1633. c = 3;
  1634. } else {
  1635. c = 4 * (s->left_ref_ctx[row7] == 1);
  1636. }
  1637. } else if (s->above_comp_ctx[col]) {
  1638. if (s->left_comp_ctx[row7]) {
  1639. if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
  1640. c = 3 * (s->fixcompref == 1 ||
  1641. s->left_ref_ctx[row7] == 1);
  1642. } else {
  1643. c = 2;
  1644. }
  1645. } else if (!s->left_ref_ctx[row7]) {
  1646. c = 1 + 2 * (s->fixcompref == 1 ||
  1647. s->above_ref_ctx[col] == 1);
  1648. } else {
  1649. c = 3 * (s->left_ref_ctx[row7] == 1) +
  1650. (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1651. }
  1652. } else if (s->left_comp_ctx[row7]) {
  1653. if (!s->above_ref_ctx[col]) {
  1654. c = 1 + 2 * (s->fixcompref == 1 ||
  1655. s->left_ref_ctx[row7] == 1);
  1656. } else {
  1657. c = 3 * (s->above_ref_ctx[col] == 1) +
  1658. (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1659. }
  1660. } else if (!s->above_ref_ctx[col]) {
  1661. if (!s->left_ref_ctx[row7]) {
  1662. c = 3;
  1663. } else {
  1664. c = 4 * (s->left_ref_ctx[row7] == 1);
  1665. }
  1666. } else if (!s->left_ref_ctx[row7]) {
  1667. c = 4 * (s->above_ref_ctx[col] == 1);
  1668. } else {
  1669. c = 2 * (s->left_ref_ctx[row7] == 1) +
  1670. 2 * (s->above_ref_ctx[col] == 1);
  1671. }
  1672. } else {
  1673. if (s->above_intra_ctx[col] ||
  1674. (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
  1675. c = 2;
  1676. } else if (s->above_comp_ctx[col]) {
  1677. c = 3 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1678. } else {
  1679. c = 4 * (s->above_ref_ctx[col] == 1);
  1680. }
  1681. }
  1682. } else if (have_l) {
  1683. if (s->left_intra_ctx[row7] ||
  1684. (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) {
  1685. c = 2;
  1686. } else if (s->left_comp_ctx[row7]) {
  1687. c = 3 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1688. } else {
  1689. c = 4 * (s->left_ref_ctx[row7] == 1);
  1690. }
  1691. } else {
  1692. c = 2;
  1693. }
  1694. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]);
  1695. s->counts.single_ref[c][1][bit]++;
  1696. b->ref[0] = 1 + bit;
  1697. }
  1698. }
  1699. }
  1700. if (b->bs <= BS_8x8) {
  1701. if (s->segmentation.feat[b->seg_id].skip_enabled) {
  1702. b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV;
  1703. } else {
  1704. static const uint8_t off[10] = {
  1705. 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
  1706. };
  1707. // FIXME this needs to use the LUT tables from find_ref_mvs
  1708. // because not all are -1,0/0,-1
  1709. int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
  1710. [s->left_mode_ctx[row7 + off[b->bs]]];
  1711. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1712. s->prob.p.mv_mode[c]);
  1713. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1714. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1715. }
  1716. }
  1717. if (s->filtermode == FILTER_SWITCHABLE) {
  1718. int c;
  1719. if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
  1720. if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1721. c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ?
  1722. s->left_filter_ctx[row7] : 3;
  1723. } else {
  1724. c = s->above_filter_ctx[col];
  1725. }
  1726. } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1727. c = s->left_filter_ctx[row7];
  1728. } else {
  1729. c = 3;
  1730. }
  1731. filter_id = vp8_rac_get_tree(&s->c, vp9_filter_tree,
  1732. s->prob.p.filter[c]);
  1733. s->counts.filter[c][filter_id]++;
  1734. b->filter = vp9_filter_lut[filter_id];
  1735. } else {
  1736. b->filter = s->filtermode;
  1737. }
  1738. if (b->bs > BS_8x8) {
  1739. int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]];
  1740. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1741. s->prob.p.mv_mode[c]);
  1742. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1743. fill_mv(s, b->mv[0], b->mode[0], 0);
  1744. if (b->bs != BS_8x4) {
  1745. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1746. s->prob.p.mv_mode[c]);
  1747. s->counts.mv_mode[c][b->mode[1] - 10]++;
  1748. fill_mv(s, b->mv[1], b->mode[1], 1);
  1749. } else {
  1750. b->mode[1] = b->mode[0];
  1751. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1752. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1753. }
  1754. if (b->bs != BS_4x8) {
  1755. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1756. s->prob.p.mv_mode[c]);
  1757. s->counts.mv_mode[c][b->mode[2] - 10]++;
  1758. fill_mv(s, b->mv[2], b->mode[2], 2);
  1759. if (b->bs != BS_8x4) {
  1760. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1761. s->prob.p.mv_mode[c]);
  1762. s->counts.mv_mode[c][b->mode[3] - 10]++;
  1763. fill_mv(s, b->mv[3], b->mode[3], 3);
  1764. } else {
  1765. b->mode[3] = b->mode[2];
  1766. AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
  1767. AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
  1768. }
  1769. } else {
  1770. b->mode[2] = b->mode[0];
  1771. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1772. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1773. b->mode[3] = b->mode[1];
  1774. AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
  1775. AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
  1776. }
  1777. } else {
  1778. fill_mv(s, b->mv[0], b->mode[0], -1);
  1779. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1780. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1781. AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
  1782. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1783. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1784. AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
  1785. }
  1786. vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0];
  1787. }
  1788. #if HAVE_FAST_64BIT
  1789. #define SPLAT_CTX(var, val, n) \
  1790. switch (n) { \
  1791. case 1: var = val; break; \
  1792. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1793. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1794. case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
  1795. case 16: { \
  1796. uint64_t v64 = val * 0x0101010101010101ULL; \
  1797. AV_WN64A( &var, v64); \
  1798. AV_WN64A(&((uint8_t *) &var)[8], v64); \
  1799. break; \
  1800. } \
  1801. }
  1802. #else
  1803. #define SPLAT_CTX(var, val, n) \
  1804. switch (n) { \
  1805. case 1: var = val; break; \
  1806. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1807. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1808. case 8: { \
  1809. uint32_t v32 = val * 0x01010101; \
  1810. AV_WN32A( &var, v32); \
  1811. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1812. break; \
  1813. } \
  1814. case 16: { \
  1815. uint32_t v32 = val * 0x01010101; \
  1816. AV_WN32A( &var, v32); \
  1817. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1818. AV_WN32A(&((uint8_t *) &var)[8], v32); \
  1819. AV_WN32A(&((uint8_t *) &var)[12], v32); \
  1820. break; \
  1821. } \
  1822. }
  1823. #endif
  1824. switch (bwh_tab[1][b->bs][0]) {
  1825. #define SET_CTXS(dir, off, n) \
  1826. do { \
  1827. SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
  1828. SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
  1829. SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
  1830. if (!s->keyframe && !s->intraonly) { \
  1831. SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
  1832. SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
  1833. SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
  1834. if (!b->intra) { \
  1835. SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
  1836. if (s->filtermode == FILTER_SWITCHABLE) { \
  1837. SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
  1838. } \
  1839. } \
  1840. } \
  1841. } while (0)
  1842. case 1: SET_CTXS(above, col, 1); break;
  1843. case 2: SET_CTXS(above, col, 2); break;
  1844. case 4: SET_CTXS(above, col, 4); break;
  1845. case 8: SET_CTXS(above, col, 8); break;
  1846. }
  1847. switch (bwh_tab[1][b->bs][1]) {
  1848. case 1: SET_CTXS(left, row7, 1); break;
  1849. case 2: SET_CTXS(left, row7, 2); break;
  1850. case 4: SET_CTXS(left, row7, 4); break;
  1851. case 8: SET_CTXS(left, row7, 8); break;
  1852. }
  1853. #undef SPLAT_CTX
  1854. #undef SET_CTXS
  1855. if (!s->keyframe && !s->intraonly) {
  1856. if (b->bs > BS_8x8) {
  1857. int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1858. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
  1859. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
  1860. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0);
  1861. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1);
  1862. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
  1863. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
  1864. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
  1865. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
  1866. } else {
  1867. int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1868. for (n = 0; n < w4 * 2; n++) {
  1869. AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
  1870. AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
  1871. }
  1872. for (n = 0; n < h4 * 2; n++) {
  1873. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0);
  1874. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
  1875. }
  1876. }
  1877. }
  1878. // FIXME kinda ugly
  1879. for (y = 0; y < h4; y++) {
  1880. int x, o = (row + y) * s->sb_cols * 8 + col;
  1881. struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[o];
  1882. if (b->intra) {
  1883. for (x = 0; x < w4; x++) {
  1884. mv[x].ref[0] =
  1885. mv[x].ref[1] = -1;
  1886. }
  1887. } else if (b->comp) {
  1888. for (x = 0; x < w4; x++) {
  1889. mv[x].ref[0] = b->ref[0];
  1890. mv[x].ref[1] = b->ref[1];
  1891. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1892. AV_COPY32(&mv[x].mv[1], &b->mv[3][1]);
  1893. }
  1894. } else {
  1895. for (x = 0; x < w4; x++) {
  1896. mv[x].ref[0] = b->ref[0];
  1897. mv[x].ref[1] = -1;
  1898. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1899. }
  1900. }
  1901. }
  1902. }
  1903. // FIXME merge cnt/eob arguments?
  1904. static av_always_inline int
  1905. decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  1906. int is_tx32x32, unsigned (*cnt)[6][3],
  1907. unsigned (*eob)[6][2], uint8_t (*p)[6][11],
  1908. int nnz, const int16_t *scan, const int16_t (*nb)[2],
  1909. const int16_t *band_counts, const int16_t *qmul)
  1910. {
  1911. int i = 0, band = 0, band_left = band_counts[band];
  1912. uint8_t *tp = p[0][nnz];
  1913. uint8_t cache[1024];
  1914. do {
  1915. int val, rc;
  1916. val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
  1917. eob[band][nnz][val]++;
  1918. if (!val)
  1919. break;
  1920. skip_eob:
  1921. if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
  1922. cnt[band][nnz][0]++;
  1923. if (!--band_left)
  1924. band_left = band_counts[++band];
  1925. cache[scan[i]] = 0;
  1926. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  1927. tp = p[band][nnz];
  1928. if (++i == n_coeffs)
  1929. break; //invalid input; blocks should end with EOB
  1930. goto skip_eob;
  1931. }
  1932. rc = scan[i];
  1933. if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
  1934. cnt[band][nnz][1]++;
  1935. val = 1;
  1936. cache[rc] = 1;
  1937. } else {
  1938. // fill in p[3-10] (model fill) - only once per frame for each pos
  1939. if (!tp[3])
  1940. memcpy(&tp[3], vp9_model_pareto8[tp[2]], 8);
  1941. cnt[band][nnz][2]++;
  1942. if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
  1943. if (!vp56_rac_get_prob_branchy(c, tp[4])) {
  1944. cache[rc] = val = 2;
  1945. } else {
  1946. val = 3 + vp56_rac_get_prob(c, tp[5]);
  1947. cache[rc] = 3;
  1948. }
  1949. } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
  1950. cache[rc] = 4;
  1951. if (!vp56_rac_get_prob_branchy(c, tp[7])) {
  1952. val = 5 + vp56_rac_get_prob(c, 159);
  1953. } else {
  1954. val = 7 + (vp56_rac_get_prob(c, 165) << 1);
  1955. val += vp56_rac_get_prob(c, 145);
  1956. }
  1957. } else { // cat 3-6
  1958. cache[rc] = 5;
  1959. if (!vp56_rac_get_prob_branchy(c, tp[8])) {
  1960. if (!vp56_rac_get_prob_branchy(c, tp[9])) {
  1961. val = 11 + (vp56_rac_get_prob(c, 173) << 2);
  1962. val += (vp56_rac_get_prob(c, 148) << 1);
  1963. val += vp56_rac_get_prob(c, 140);
  1964. } else {
  1965. val = 19 + (vp56_rac_get_prob(c, 176) << 3);
  1966. val += (vp56_rac_get_prob(c, 155) << 2);
  1967. val += (vp56_rac_get_prob(c, 140) << 1);
  1968. val += vp56_rac_get_prob(c, 135);
  1969. }
  1970. } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
  1971. val = 35 + (vp56_rac_get_prob(c, 180) << 4);
  1972. val += (vp56_rac_get_prob(c, 157) << 3);
  1973. val += (vp56_rac_get_prob(c, 141) << 2);
  1974. val += (vp56_rac_get_prob(c, 134) << 1);
  1975. val += vp56_rac_get_prob(c, 130);
  1976. } else {
  1977. val = 67 + (vp56_rac_get_prob(c, 254) << 13);
  1978. val += (vp56_rac_get_prob(c, 254) << 12);
  1979. val += (vp56_rac_get_prob(c, 254) << 11);
  1980. val += (vp56_rac_get_prob(c, 252) << 10);
  1981. val += (vp56_rac_get_prob(c, 249) << 9);
  1982. val += (vp56_rac_get_prob(c, 243) << 8);
  1983. val += (vp56_rac_get_prob(c, 230) << 7);
  1984. val += (vp56_rac_get_prob(c, 196) << 6);
  1985. val += (vp56_rac_get_prob(c, 177) << 5);
  1986. val += (vp56_rac_get_prob(c, 153) << 4);
  1987. val += (vp56_rac_get_prob(c, 140) << 3);
  1988. val += (vp56_rac_get_prob(c, 133) << 2);
  1989. val += (vp56_rac_get_prob(c, 130) << 1);
  1990. val += vp56_rac_get_prob(c, 129);
  1991. }
  1992. }
  1993. }
  1994. if (!--band_left)
  1995. band_left = band_counts[++band];
  1996. if (is_tx32x32)
  1997. coef[rc] = ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2;
  1998. else
  1999. coef[rc] = (vp8_rac_get(c) ? -val : val) * qmul[!!i];
  2000. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  2001. tp = p[band][nnz];
  2002. } while (++i < n_coeffs);
  2003. return i;
  2004. }
  2005. static int decode_coeffs_b(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  2006. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2007. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2008. const int16_t (*nb)[2], const int16_t *band_counts,
  2009. const int16_t *qmul)
  2010. {
  2011. return decode_coeffs_b_generic(c, coef, n_coeffs, 0, cnt, eob, p,
  2012. nnz, scan, nb, band_counts, qmul);
  2013. }
  2014. static int decode_coeffs_b32(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  2015. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2016. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2017. const int16_t (*nb)[2], const int16_t *band_counts,
  2018. const int16_t *qmul)
  2019. {
  2020. return decode_coeffs_b_generic(c, coef, n_coeffs, 1, cnt, eob, p,
  2021. nnz, scan, nb, band_counts, qmul);
  2022. }
  2023. static void decode_coeffs(AVCodecContext *ctx)
  2024. {
  2025. VP9Context *s = ctx->priv_data;
  2026. VP9Block *b = s->b;
  2027. int row = s->row, col = s->col;
  2028. uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
  2029. unsigned (*c)[6][3] = s->counts.coef[b->tx][0 /* y */][!b->intra];
  2030. unsigned (*e)[6][2] = s->counts.eob[b->tx][0 /* y */][!b->intra];
  2031. int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
  2032. int end_x = FFMIN(2 * (s->cols - col), w4);
  2033. int end_y = FFMIN(2 * (s->rows - row), h4);
  2034. int n, pl, x, y, res;
  2035. int16_t (*qmul)[2] = s->segmentation.feat[b->seg_id].qmul;
  2036. int tx = 4 * s->lossless + b->tx;
  2037. const int16_t * const *yscans = vp9_scans[tx];
  2038. const int16_t (* const *ynbs)[2] = vp9_scans_nb[tx];
  2039. const int16_t *uvscan = vp9_scans[b->uvtx][DCT_DCT];
  2040. const int16_t (*uvnb)[2] = vp9_scans_nb[b->uvtx][DCT_DCT];
  2041. uint8_t *a = &s->above_y_nnz_ctx[col * 2];
  2042. uint8_t *l = &s->left_y_nnz_ctx[(row & 7) << 1];
  2043. static const int16_t band_counts[4][8] = {
  2044. { 1, 2, 3, 4, 3, 16 - 13 },
  2045. { 1, 2, 3, 4, 11, 64 - 21 },
  2046. { 1, 2, 3, 4, 11, 256 - 21 },
  2047. { 1, 2, 3, 4, 11, 1024 - 21 },
  2048. };
  2049. const int16_t *y_band_counts = band_counts[b->tx];
  2050. const int16_t *uv_band_counts = band_counts[b->uvtx];
  2051. #define MERGE(la, end, step, rd) \
  2052. for (n = 0; n < end; n += step) \
  2053. la[n] = !!rd(&la[n])
  2054. #define MERGE_CTX(step, rd) \
  2055. do { \
  2056. MERGE(l, end_y, step, rd); \
  2057. MERGE(a, end_x, step, rd); \
  2058. } while (0)
  2059. #define DECODE_Y_COEF_LOOP(step, mode_index, v) \
  2060. for (n = 0, y = 0; y < end_y; y += step) { \
  2061. for (x = 0; x < end_x; x += step, n += step * step) { \
  2062. enum TxfmType txtp = vp9_intra_txfm_type[b->mode[mode_index]]; \
  2063. res = decode_coeffs_b##v(&s->c, s->block + 16 * n, 16 * step * step, \
  2064. c, e, p, a[x] + l[y], yscans[txtp], \
  2065. ynbs[txtp], y_band_counts, qmul[0]); \
  2066. a[x] = l[y] = !!res; \
  2067. if (step >= 4) { \
  2068. AV_WN16A(&s->eob[n], res); \
  2069. } else { \
  2070. s->eob[n] = res; \
  2071. } \
  2072. } \
  2073. }
  2074. #define SPLAT(la, end, step, cond) \
  2075. if (step == 2) { \
  2076. for (n = 1; n < end; n += step) \
  2077. la[n] = la[n - 1]; \
  2078. } else if (step == 4) { \
  2079. if (cond) { \
  2080. for (n = 0; n < end; n += step) \
  2081. AV_WN32A(&la[n], la[n] * 0x01010101); \
  2082. } else { \
  2083. for (n = 0; n < end; n += step) \
  2084. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
  2085. } \
  2086. } else /* step == 8 */ { \
  2087. if (cond) { \
  2088. if (HAVE_FAST_64BIT) { \
  2089. for (n = 0; n < end; n += step) \
  2090. AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
  2091. } else { \
  2092. for (n = 0; n < end; n += step) { \
  2093. uint32_t v32 = la[n] * 0x01010101; \
  2094. AV_WN32A(&la[n], v32); \
  2095. AV_WN32A(&la[n + 4], v32); \
  2096. } \
  2097. } \
  2098. } else { \
  2099. for (n = 0; n < end; n += step) \
  2100. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
  2101. } \
  2102. }
  2103. #define SPLAT_CTX(step) \
  2104. do { \
  2105. SPLAT(a, end_x, step, end_x == w4); \
  2106. SPLAT(l, end_y, step, end_y == h4); \
  2107. } while (0)
  2108. /* y tokens */
  2109. switch (b->tx) {
  2110. case TX_4X4:
  2111. DECODE_Y_COEF_LOOP(1, b->bs > BS_8x8 ? n : 0,);
  2112. break;
  2113. case TX_8X8:
  2114. MERGE_CTX(2, AV_RN16A);
  2115. DECODE_Y_COEF_LOOP(2, 0,);
  2116. SPLAT_CTX(2);
  2117. break;
  2118. case TX_16X16:
  2119. MERGE_CTX(4, AV_RN32A);
  2120. DECODE_Y_COEF_LOOP(4, 0,);
  2121. SPLAT_CTX(4);
  2122. break;
  2123. case TX_32X32:
  2124. MERGE_CTX(8, AV_RN64A);
  2125. DECODE_Y_COEF_LOOP(8, 0, 32);
  2126. SPLAT_CTX(8);
  2127. break;
  2128. }
  2129. #define DECODE_UV_COEF_LOOP(step) \
  2130. for (n = 0, y = 0; y < end_y; y += step) { \
  2131. for (x = 0; x < end_x; x += step, n += step * step) { \
  2132. res = decode_coeffs_b(&s->c, s->uvblock[pl] + 16 * n, \
  2133. 16 * step * step, c, e, p, a[x] + l[y], \
  2134. uvscan, uvnb, uv_band_counts, qmul[1]); \
  2135. a[x] = l[y] = !!res; \
  2136. if (step >= 4) { \
  2137. AV_WN16A(&s->uveob[pl][n], res); \
  2138. } else { \
  2139. s->uveob[pl][n] = res; \
  2140. } \
  2141. } \
  2142. }
  2143. p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
  2144. c = s->counts.coef[b->uvtx][1 /* uv */][!b->intra];
  2145. e = s->counts.eob[b->uvtx][1 /* uv */][!b->intra];
  2146. w4 >>= 1;
  2147. h4 >>= 1;
  2148. end_x >>= 1;
  2149. end_y >>= 1;
  2150. for (pl = 0; pl < 2; pl++) {
  2151. a = &s->above_uv_nnz_ctx[pl][col];
  2152. l = &s->left_uv_nnz_ctx[pl][row & 7];
  2153. switch (b->uvtx) {
  2154. case TX_4X4:
  2155. DECODE_UV_COEF_LOOP(1);
  2156. break;
  2157. case TX_8X8:
  2158. MERGE_CTX(2, AV_RN16A);
  2159. DECODE_UV_COEF_LOOP(2);
  2160. SPLAT_CTX(2);
  2161. break;
  2162. case TX_16X16:
  2163. MERGE_CTX(4, AV_RN32A);
  2164. DECODE_UV_COEF_LOOP(4);
  2165. SPLAT_CTX(4);
  2166. break;
  2167. case TX_32X32:
  2168. MERGE_CTX(8, AV_RN64A);
  2169. // a 64x64 (max) uv block can ever only contain 1 tx32x32 block
  2170. // so there is no need to loop
  2171. res = decode_coeffs_b32(&s->c, s->uvblock[pl],
  2172. 1024, c, e, p, a[0] + l[0],
  2173. uvscan, uvnb, uv_band_counts, qmul[1]);
  2174. a[0] = l[0] = !!res;
  2175. AV_WN16A(&s->uveob[pl][0], res);
  2176. SPLAT_CTX(8);
  2177. break;
  2178. }
  2179. }
  2180. }
  2181. static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
  2182. uint8_t *dst_edge, ptrdiff_t stride_edge,
  2183. uint8_t *dst_inner, ptrdiff_t stride_inner,
  2184. uint8_t *l, int col, int x, int w,
  2185. int row, int y, enum TxfmMode tx,
  2186. int p)
  2187. {
  2188. int have_top = row > 0 || y > 0;
  2189. int have_left = col > s->tiling.tile_col_start || x > 0;
  2190. int have_right = x < w - 1;
  2191. static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
  2192. [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
  2193. { DC_127_PRED, VERT_PRED } },
  2194. [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
  2195. { HOR_PRED, HOR_PRED } },
  2196. [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
  2197. { LEFT_DC_PRED, DC_PRED } },
  2198. [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
  2199. { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
  2200. [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
  2201. { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
  2202. [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
  2203. { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
  2204. [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
  2205. { HOR_DOWN_PRED, HOR_DOWN_PRED } },
  2206. [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
  2207. { DC_127_PRED, VERT_LEFT_PRED } },
  2208. [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
  2209. { HOR_UP_PRED, HOR_UP_PRED } },
  2210. [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
  2211. { HOR_PRED, TM_VP8_PRED } },
  2212. };
  2213. static const struct {
  2214. uint8_t needs_left:1;
  2215. uint8_t needs_top:1;
  2216. uint8_t needs_topleft:1;
  2217. uint8_t needs_topright:1;
  2218. } edges[N_INTRA_PRED_MODES] = {
  2219. [VERT_PRED] = { .needs_top = 1 },
  2220. [HOR_PRED] = { .needs_left = 1 },
  2221. [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
  2222. [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2223. [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2224. [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2225. [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2226. [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2227. [HOR_UP_PRED] = { .needs_left = 1 },
  2228. [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2229. [LEFT_DC_PRED] = { .needs_left = 1 },
  2230. [TOP_DC_PRED] = { .needs_top = 1 },
  2231. [DC_128_PRED] = { 0 },
  2232. [DC_127_PRED] = { 0 },
  2233. [DC_129_PRED] = { 0 }
  2234. };
  2235. av_assert2(mode >= 0 && mode < 10);
  2236. mode = mode_conv[mode][have_left][have_top];
  2237. if (edges[mode].needs_top) {
  2238. uint8_t *top, *topleft;
  2239. int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !p) - x) * 4;
  2240. int n_px_need_tr = 0;
  2241. if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
  2242. n_px_need_tr = 4;
  2243. // if top of sb64-row, use s->intra_pred_data[] instead of
  2244. // dst[-stride] for intra prediction (it contains pre- instead of
  2245. // post-loopfilter data)
  2246. if (have_top) {
  2247. top = !(row & 7) && !y ?
  2248. s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
  2249. y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
  2250. if (have_left)
  2251. topleft = !(row & 7) && !y ?
  2252. s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
  2253. y == 0 || x == 0 ? &dst_edge[-stride_edge] :
  2254. &dst_inner[-stride_inner];
  2255. }
  2256. if (have_top &&
  2257. (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
  2258. (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
  2259. n_px_need + n_px_need_tr <= n_px_have) {
  2260. *a = top;
  2261. } else {
  2262. if (have_top) {
  2263. if (n_px_need <= n_px_have) {
  2264. memcpy(*a, top, n_px_need);
  2265. } else {
  2266. memcpy(*a, top, n_px_have);
  2267. memset(&(*a)[n_px_have], (*a)[n_px_have - 1],
  2268. n_px_need - n_px_have);
  2269. }
  2270. } else {
  2271. memset(*a, 127, n_px_need);
  2272. }
  2273. if (edges[mode].needs_topleft) {
  2274. if (have_left && have_top) {
  2275. (*a)[-1] = topleft[-1];
  2276. } else {
  2277. (*a)[-1] = have_top ? 129 : 127;
  2278. }
  2279. }
  2280. if (tx == TX_4X4 && edges[mode].needs_topright) {
  2281. if (have_top && have_right &&
  2282. n_px_need + n_px_need_tr <= n_px_have) {
  2283. memcpy(&(*a)[4], &top[4], 4);
  2284. } else {
  2285. memset(&(*a)[4], (*a)[3], 4);
  2286. }
  2287. }
  2288. }
  2289. }
  2290. if (edges[mode].needs_left) {
  2291. if (have_left) {
  2292. int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !p) - y) * 4;
  2293. uint8_t *dst = x == 0 ? dst_edge : dst_inner;
  2294. ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
  2295. if (n_px_need <= n_px_have) {
  2296. for (i = 0; i < n_px_need; i++)
  2297. l[n_px_need - 1 - i] = dst[i * stride - 1];
  2298. } else {
  2299. for (i = 0; i < n_px_have; i++)
  2300. l[n_px_need - 1 - i] = dst[i * stride - 1];
  2301. memset(l, l[n_px_need - n_px_have], n_px_need - n_px_have);
  2302. }
  2303. } else {
  2304. memset(l, 129, 4 << tx);
  2305. }
  2306. }
  2307. return mode;
  2308. }
  2309. static void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
  2310. {
  2311. VP9Context *s = ctx->priv_data;
  2312. VP9Block *b = s->b;
  2313. int row = s->row, col = s->col;
  2314. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2315. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2316. int end_x = FFMIN(2 * (s->cols - col), w4);
  2317. int end_y = FFMIN(2 * (s->rows - row), h4);
  2318. int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
  2319. int uvstep1d = 1 << b->uvtx, p;
  2320. uint8_t *dst = s->dst[0], *dst_r = s->frames[CUR_FRAME].tf.f->data[0] + y_off;
  2321. LOCAL_ALIGNED_32(uint8_t, a_buf, [64]);
  2322. LOCAL_ALIGNED_32(uint8_t, l, [32]);
  2323. for (n = 0, y = 0; y < end_y; y += step1d) {
  2324. uint8_t *ptr = dst, *ptr_r = dst_r;
  2325. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d,
  2326. ptr_r += 4 * step1d, n += step) {
  2327. int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
  2328. y * 2 + x : 0];
  2329. uint8_t *a = &a_buf[32];
  2330. enum TxfmType txtp = vp9_intra_txfm_type[mode];
  2331. int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2332. mode = check_intra_mode(s, mode, &a, ptr_r,
  2333. s->frames[CUR_FRAME].tf.f->linesize[0],
  2334. ptr, s->y_stride, l,
  2335. col, x, w4, row, y, b->tx, 0);
  2336. s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a);
  2337. if (eob)
  2338. s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride,
  2339. s->block + 16 * n, eob);
  2340. }
  2341. dst_r += 4 * step1d * s->frames[CUR_FRAME].tf.f->linesize[0];
  2342. dst += 4 * step1d * s->y_stride;
  2343. }
  2344. // U/V
  2345. w4 >>= 1;
  2346. end_x >>= 1;
  2347. end_y >>= 1;
  2348. step = 1 << (b->uvtx * 2);
  2349. for (p = 0; p < 2; p++) {
  2350. dst = s->dst[1 + p];
  2351. dst_r = s->frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
  2352. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2353. uint8_t *ptr = dst, *ptr_r = dst_r;
  2354. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
  2355. ptr_r += 4 * uvstep1d, n += step) {
  2356. int mode = b->uvmode;
  2357. uint8_t *a = &a_buf[32];
  2358. int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2359. mode = check_intra_mode(s, mode, &a, ptr_r,
  2360. s->frames[CUR_FRAME].tf.f->linesize[1],
  2361. ptr, s->uv_stride, l,
  2362. col, x, w4, row, y, b->uvtx, p + 1);
  2363. s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a);
  2364. if (eob)
  2365. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2366. s->uvblock[p] + 16 * n, eob);
  2367. }
  2368. dst_r += 4 * uvstep1d * s->frames[CUR_FRAME].tf.f->linesize[1];
  2369. dst += 4 * uvstep1d * s->uv_stride;
  2370. }
  2371. }
  2372. }
  2373. static av_always_inline void mc_luma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
  2374. uint8_t *dst, ptrdiff_t dst_stride,
  2375. const uint8_t *ref, ptrdiff_t ref_stride,
  2376. ThreadFrame *ref_frame,
  2377. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2378. int bw, int bh, int w, int h,
  2379. const uint16_t *scale, const uint8_t *step)
  2380. {
  2381. #define scale_mv(n, dim) (((int64_t)n * scale[dim]) >> 14)
  2382. // BUG libvpx seems to scale the two components separately. This introduces
  2383. // rounding errors but we have to reproduce them to be exactly compatible
  2384. // with the output from libvpx...
  2385. int mx = scale_mv(mv->x * 2, 0) + scale_mv(x * 16, 0);
  2386. int my = scale_mv(mv->y * 2, 1) + scale_mv(y * 16, 1);
  2387. int refbw_m1, refbh_m1;
  2388. int th;
  2389. y = my >> 4;
  2390. x = mx >> 4;
  2391. ref += y * ref_stride + x;
  2392. mx &= 15;
  2393. my &= 15;
  2394. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  2395. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  2396. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2397. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2398. // the longest loopfilter of the next sbrow
  2399. th = (y + refbh_m1 + 4 + 7) >> 6;
  2400. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2401. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 4 >= h - refbh_m1) {
  2402. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2403. ref - 3 * ref_stride - 3,
  2404. 144, ref_stride,
  2405. refbw_m1 + 8, refbh_m1 + 8,
  2406. x - 3, y - 3, w, h);
  2407. ref = s->edge_emu_buffer + 3 * 144 + 3;
  2408. ref_stride = 144;
  2409. }
  2410. smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
  2411. }
  2412. static av_always_inline void mc_chroma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
  2413. uint8_t *dst_u, uint8_t *dst_v,
  2414. ptrdiff_t dst_stride,
  2415. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  2416. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  2417. ThreadFrame *ref_frame,
  2418. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2419. int bw, int bh, int w, int h,
  2420. const uint16_t *scale, const uint8_t *step)
  2421. {
  2422. // BUG https://code.google.com/p/webm/issues/detail?id=820
  2423. int mx = scale_mv(mv->x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
  2424. int my = scale_mv(mv->y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
  2425. #undef scale_mv
  2426. int refbw_m1, refbh_m1;
  2427. int th;
  2428. y = my >> 4;
  2429. x = mx >> 4;
  2430. ref_u += y * src_stride_u + x;
  2431. ref_v += y * src_stride_v + x;
  2432. mx &= 15;
  2433. my &= 15;
  2434. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  2435. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  2436. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2437. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2438. // the longest loopfilter of the next sbrow
  2439. th = (y + refbh_m1 + 4 + 7) >> 5;
  2440. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2441. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 4 >= h - refbh_m1) {
  2442. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2443. ref_u - 3 * src_stride_u - 3,
  2444. 144, src_stride_u,
  2445. refbw_m1 + 8, refbh_m1 + 8,
  2446. x - 3, y - 3, w, h);
  2447. ref_u = s->edge_emu_buffer + 3 * 144 + 3;
  2448. smc(dst_u, dst_stride, ref_u, 144, bh, mx, my, step[0], step[1]);
  2449. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2450. ref_v - 3 * src_stride_v - 3,
  2451. 144, src_stride_v,
  2452. refbw_m1 + 8, refbh_m1 + 8,
  2453. x - 3, y - 3, w, h);
  2454. ref_v = s->edge_emu_buffer + 3 * 144 + 3;
  2455. smc(dst_v, dst_stride, ref_v, 144, bh, mx, my, step[0], step[1]);
  2456. } else {
  2457. smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
  2458. smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
  2459. }
  2460. }
  2461. #define FN(x) x##_scaled
  2462. #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, bw, bh, w, h, i) \
  2463. mc_luma_scaled(s, s->dsp.s##mc, dst, dst_ls, src, src_ls, tref, row, col, \
  2464. mv, bw, bh, w, h, s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  2465. #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2466. row, col, mv, bw, bh, w, h, i) \
  2467. mc_chroma_scaled(s, s->dsp.s##mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2468. row, col, mv, bw, bh, w, h, s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  2469. #include "vp9_mc_template.c"
  2470. #undef mc_luma_dir
  2471. #undef mc_chroma_dir
  2472. #undef FN
  2473. static av_always_inline void mc_luma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
  2474. uint8_t *dst, ptrdiff_t dst_stride,
  2475. const uint8_t *ref, ptrdiff_t ref_stride,
  2476. ThreadFrame *ref_frame,
  2477. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2478. int bw, int bh, int w, int h)
  2479. {
  2480. int mx = mv->x, my = mv->y, th;
  2481. y += my >> 3;
  2482. x += mx >> 3;
  2483. ref += y * ref_stride + x;
  2484. mx &= 7;
  2485. my &= 7;
  2486. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2487. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2488. // the longest loopfilter of the next sbrow
  2489. th = (y + bh + 4 * !!my + 7) >> 6;
  2490. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2491. if (x < !!mx * 3 || y < !!my * 3 ||
  2492. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2493. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2494. ref - !!my * 3 * ref_stride - !!mx * 3,
  2495. 80, ref_stride,
  2496. bw + !!mx * 7, bh + !!my * 7,
  2497. x - !!mx * 3, y - !!my * 3, w, h);
  2498. ref = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2499. ref_stride = 80;
  2500. }
  2501. mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
  2502. }
  2503. static av_always_inline void mc_chroma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
  2504. uint8_t *dst_u, uint8_t *dst_v,
  2505. ptrdiff_t dst_stride,
  2506. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  2507. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  2508. ThreadFrame *ref_frame,
  2509. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2510. int bw, int bh, int w, int h)
  2511. {
  2512. int mx = mv->x, my = mv->y, th;
  2513. y += my >> 4;
  2514. x += mx >> 4;
  2515. ref_u += y * src_stride_u + x;
  2516. ref_v += y * src_stride_v + x;
  2517. mx &= 15;
  2518. my &= 15;
  2519. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2520. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2521. // the longest loopfilter of the next sbrow
  2522. th = (y + bh + 4 * !!my + 7) >> 5;
  2523. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2524. if (x < !!mx * 3 || y < !!my * 3 ||
  2525. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2526. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2527. ref_u - !!my * 3 * src_stride_u - !!mx * 3,
  2528. 80, src_stride_u,
  2529. bw + !!mx * 7, bh + !!my * 7,
  2530. x - !!mx * 3, y - !!my * 3, w, h);
  2531. ref_u = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2532. mc[!!mx][!!my](dst_u, dst_stride, ref_u, 80, bh, mx, my);
  2533. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2534. ref_v - !!my * 3 * src_stride_v - !!mx * 3,
  2535. 80, src_stride_v,
  2536. bw + !!mx * 7, bh + !!my * 7,
  2537. x - !!mx * 3, y - !!my * 3, w, h);
  2538. ref_v = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
  2539. mc[!!mx][!!my](dst_v, dst_stride, ref_v, 80, bh, mx, my);
  2540. } else {
  2541. mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
  2542. mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
  2543. }
  2544. }
  2545. #define FN(x) x
  2546. #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, bw, bh, w, h, i) \
  2547. mc_luma_unscaled(s, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
  2548. mv, bw, bh, w, h)
  2549. #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2550. row, col, mv, bw, bh, w, h, i) \
  2551. mc_chroma_unscaled(s, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2552. row, col, mv, bw, bh, w, h)
  2553. #include "vp9_mc_template.c"
  2554. #undef mc_luma_dir_dir
  2555. #undef mc_chroma_dir_dir
  2556. #undef FN
  2557. static void inter_recon(AVCodecContext *ctx)
  2558. {
  2559. VP9Context *s = ctx->priv_data;
  2560. VP9Block *b = s->b;
  2561. int row = s->row, col = s->col;
  2562. if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
  2563. inter_pred_scaled(ctx);
  2564. } else {
  2565. inter_pred(ctx);
  2566. }
  2567. if (!b->skip) {
  2568. /* mostly copied intra_recon() */
  2569. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2570. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2571. int end_x = FFMIN(2 * (s->cols - col), w4);
  2572. int end_y = FFMIN(2 * (s->rows - row), h4);
  2573. int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
  2574. int uvstep1d = 1 << b->uvtx, p;
  2575. uint8_t *dst = s->dst[0];
  2576. // y itxfm add
  2577. for (n = 0, y = 0; y < end_y; y += step1d) {
  2578. uint8_t *ptr = dst;
  2579. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d, n += step) {
  2580. int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2581. if (eob)
  2582. s->dsp.itxfm_add[tx][DCT_DCT](ptr, s->y_stride,
  2583. s->block + 16 * n, eob);
  2584. }
  2585. dst += 4 * s->y_stride * step1d;
  2586. }
  2587. // uv itxfm add
  2588. end_x >>= 1;
  2589. end_y >>= 1;
  2590. step = 1 << (b->uvtx * 2);
  2591. for (p = 0; p < 2; p++) {
  2592. dst = s->dst[p + 1];
  2593. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2594. uint8_t *ptr = dst;
  2595. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d, n += step) {
  2596. int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2597. if (eob)
  2598. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2599. s->uvblock[p] + 16 * n, eob);
  2600. }
  2601. dst += 4 * uvstep1d * s->uv_stride;
  2602. }
  2603. }
  2604. }
  2605. }
  2606. static av_always_inline void mask_edges(struct VP9Filter *lflvl, int is_uv,
  2607. int row_and_7, int col_and_7,
  2608. int w, int h, int col_end, int row_end,
  2609. enum TxfmMode tx, int skip_inter)
  2610. {
  2611. // FIXME I'm pretty sure all loops can be replaced by a single LUT if
  2612. // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
  2613. // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
  2614. // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
  2615. // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
  2616. // edges. This means that for UV, we work on two subsampled blocks at
  2617. // a time, and we only use the topleft block's mode information to set
  2618. // things like block strength. Thus, for any block size smaller than
  2619. // 16x16, ignore the odd portion of the block.
  2620. if (tx == TX_4X4 && is_uv) {
  2621. if (h == 1) {
  2622. if (row_and_7 & 1)
  2623. return;
  2624. if (!row_end)
  2625. h += 1;
  2626. }
  2627. if (w == 1) {
  2628. if (col_and_7 & 1)
  2629. return;
  2630. if (!col_end)
  2631. w += 1;
  2632. }
  2633. }
  2634. if (tx == TX_4X4 && !skip_inter) {
  2635. int t = 1 << col_and_7, m_col = (t << w) - t, y;
  2636. int m_col_odd = (t << (w - 1)) - t;
  2637. // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
  2638. if (is_uv) {
  2639. int m_row_8 = m_col & 0x01, m_row_4 = m_col - m_row_8;
  2640. for (y = row_and_7; y < h + row_and_7; y++) {
  2641. int col_mask_id = 2 - !(y & 7);
  2642. lflvl->mask[is_uv][0][y][1] |= m_row_8;
  2643. lflvl->mask[is_uv][0][y][2] |= m_row_4;
  2644. // for odd lines, if the odd col is not being filtered,
  2645. // skip odd row also:
  2646. // .---. <-- a
  2647. // | |
  2648. // |___| <-- b
  2649. // ^ ^
  2650. // c d
  2651. //
  2652. // if a/c are even row/col and b/d are odd, and d is skipped,
  2653. // e.g. right edge of size-66x66.webm, then skip b also (bug)
  2654. if ((col_end & 1) && (y & 1)) {
  2655. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col_odd;
  2656. } else {
  2657. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col;
  2658. }
  2659. }
  2660. } else {
  2661. int m_row_8 = m_col & 0x11, m_row_4 = m_col - m_row_8;
  2662. for (y = row_and_7; y < h + row_and_7; y++) {
  2663. int col_mask_id = 2 - !(y & 3);
  2664. lflvl->mask[is_uv][0][y][1] |= m_row_8; // row edge
  2665. lflvl->mask[is_uv][0][y][2] |= m_row_4;
  2666. lflvl->mask[is_uv][1][y][col_mask_id] |= m_col; // col edge
  2667. lflvl->mask[is_uv][0][y][3] |= m_col;
  2668. lflvl->mask[is_uv][1][y][3] |= m_col;
  2669. }
  2670. }
  2671. } else {
  2672. int y, t = 1 << col_and_7, m_col = (t << w) - t;
  2673. if (!skip_inter) {
  2674. int mask_id = (tx == TX_8X8);
  2675. int l2 = tx + is_uv - 1, step1d = 1 << l2;
  2676. static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
  2677. int m_row = m_col & masks[l2];
  2678. // at odd UV col/row edges tx16/tx32 loopfilter edges, force
  2679. // 8wd loopfilter to prevent going off the visible edge.
  2680. if (is_uv && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
  2681. int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
  2682. int m_row_8 = m_row - m_row_16;
  2683. for (y = row_and_7; y < h + row_and_7; y++) {
  2684. lflvl->mask[is_uv][0][y][0] |= m_row_16;
  2685. lflvl->mask[is_uv][0][y][1] |= m_row_8;
  2686. }
  2687. } else {
  2688. for (y = row_and_7; y < h + row_and_7; y++)
  2689. lflvl->mask[is_uv][0][y][mask_id] |= m_row;
  2690. }
  2691. if (is_uv && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
  2692. for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
  2693. lflvl->mask[is_uv][1][y][0] |= m_col;
  2694. if (y - row_and_7 == h - 1)
  2695. lflvl->mask[is_uv][1][y][1] |= m_col;
  2696. } else {
  2697. for (y = row_and_7; y < h + row_and_7; y += step1d)
  2698. lflvl->mask[is_uv][1][y][mask_id] |= m_col;
  2699. }
  2700. } else if (tx != TX_4X4) {
  2701. int mask_id;
  2702. mask_id = (tx == TX_8X8) || (is_uv && h == 1);
  2703. lflvl->mask[is_uv][1][row_and_7][mask_id] |= m_col;
  2704. mask_id = (tx == TX_8X8) || (is_uv && w == 1);
  2705. for (y = row_and_7; y < h + row_and_7; y++)
  2706. lflvl->mask[is_uv][0][y][mask_id] |= t;
  2707. } else if (is_uv) {
  2708. int t8 = t & 0x01, t4 = t - t8;
  2709. for (y = row_and_7; y < h + row_and_7; y++) {
  2710. lflvl->mask[is_uv][0][y][2] |= t4;
  2711. lflvl->mask[is_uv][0][y][1] |= t8;
  2712. }
  2713. lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 7)] |= m_col;
  2714. } else {
  2715. int t8 = t & 0x11, t4 = t - t8;
  2716. for (y = row_and_7; y < h + row_and_7; y++) {
  2717. lflvl->mask[is_uv][0][y][2] |= t4;
  2718. lflvl->mask[is_uv][0][y][1] |= t8;
  2719. }
  2720. lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 3)] |= m_col;
  2721. }
  2722. }
  2723. }
  2724. static void decode_b(AVCodecContext *ctx, int row, int col,
  2725. struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
  2726. enum BlockLevel bl, enum BlockPartition bp)
  2727. {
  2728. VP9Context *s = ctx->priv_data;
  2729. VP9Block *b = s->b;
  2730. enum BlockSize bs = bl * 3 + bp;
  2731. int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
  2732. int emu[2];
  2733. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2734. s->row = row;
  2735. s->row7 = row & 7;
  2736. s->col = col;
  2737. s->col7 = col & 7;
  2738. s->min_mv.x = -(128 + col * 64);
  2739. s->min_mv.y = -(128 + row * 64);
  2740. s->max_mv.x = 128 + (s->cols - col - w4) * 64;
  2741. s->max_mv.y = 128 + (s->rows - row - h4) * 64;
  2742. if (s->pass < 2) {
  2743. b->bs = bs;
  2744. b->bl = bl;
  2745. b->bp = bp;
  2746. decode_mode(ctx);
  2747. b->uvtx = b->tx - (w4 * 2 == (1 << b->tx) || h4 * 2 == (1 << b->tx));
  2748. if (!b->skip) {
  2749. decode_coeffs(ctx);
  2750. } else {
  2751. int row7 = s->row7;
  2752. #define SPLAT_ZERO_CTX(v, n) \
  2753. switch (n) { \
  2754. case 1: v = 0; break; \
  2755. case 2: AV_ZERO16(&v); break; \
  2756. case 4: AV_ZERO32(&v); break; \
  2757. case 8: AV_ZERO64(&v); break; \
  2758. case 16: AV_ZERO128(&v); break; \
  2759. }
  2760. #define SPLAT_ZERO_YUV(dir, var, off, n) \
  2761. do { \
  2762. SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
  2763. SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
  2764. SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
  2765. } while (0)
  2766. switch (w4) {
  2767. case 1: SPLAT_ZERO_YUV(above, nnz_ctx, col, 1); break;
  2768. case 2: SPLAT_ZERO_YUV(above, nnz_ctx, col, 2); break;
  2769. case 4: SPLAT_ZERO_YUV(above, nnz_ctx, col, 4); break;
  2770. case 8: SPLAT_ZERO_YUV(above, nnz_ctx, col, 8); break;
  2771. }
  2772. switch (h4) {
  2773. case 1: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 1); break;
  2774. case 2: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 2); break;
  2775. case 4: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 4); break;
  2776. case 8: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 8); break;
  2777. }
  2778. }
  2779. if (s->pass == 1) {
  2780. s->b++;
  2781. s->block += w4 * h4 * 64;
  2782. s->uvblock[0] += w4 * h4 * 16;
  2783. s->uvblock[1] += w4 * h4 * 16;
  2784. s->eob += 4 * w4 * h4;
  2785. s->uveob[0] += w4 * h4;
  2786. s->uveob[1] += w4 * h4;
  2787. return;
  2788. }
  2789. }
  2790. // emulated overhangs if the stride of the target buffer can't hold. This
  2791. // allows to support emu-edge and so on even if we have large block
  2792. // overhangs
  2793. emu[0] = (col + w4) * 8 > f->linesize[0] ||
  2794. (row + h4) > s->rows;
  2795. emu[1] = (col + w4) * 4 > f->linesize[1] ||
  2796. (row + h4) > s->rows;
  2797. if (emu[0]) {
  2798. s->dst[0] = s->tmp_y;
  2799. s->y_stride = 64;
  2800. } else {
  2801. s->dst[0] = f->data[0] + yoff;
  2802. s->y_stride = f->linesize[0];
  2803. }
  2804. if (emu[1]) {
  2805. s->dst[1] = s->tmp_uv[0];
  2806. s->dst[2] = s->tmp_uv[1];
  2807. s->uv_stride = 32;
  2808. } else {
  2809. s->dst[1] = f->data[1] + uvoff;
  2810. s->dst[2] = f->data[2] + uvoff;
  2811. s->uv_stride = f->linesize[1];
  2812. }
  2813. if (b->intra) {
  2814. intra_recon(ctx, yoff, uvoff);
  2815. } else {
  2816. inter_recon(ctx);
  2817. }
  2818. if (emu[0]) {
  2819. int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
  2820. for (n = 0; o < w; n++) {
  2821. int bw = 64 >> n;
  2822. av_assert2(n <= 4);
  2823. if (w & bw) {
  2824. s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o, f->linesize[0],
  2825. s->tmp_y + o, 64, h, 0, 0);
  2826. o += bw;
  2827. }
  2828. }
  2829. }
  2830. if (emu[1]) {
  2831. int w = FFMIN(s->cols - col, w4) * 4, h = FFMIN(s->rows - row, h4) * 4, n, o = 0;
  2832. for (n = 1; o < w; n++) {
  2833. int bw = 64 >> n;
  2834. av_assert2(n <= 4);
  2835. if (w & bw) {
  2836. s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o, f->linesize[1],
  2837. s->tmp_uv[0] + o, 32, h, 0, 0);
  2838. s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o, f->linesize[2],
  2839. s->tmp_uv[1] + o, 32, h, 0, 0);
  2840. o += bw;
  2841. }
  2842. }
  2843. }
  2844. // pick filter level and find edges to apply filter to
  2845. if (s->filter.level &&
  2846. (lvl = s->segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
  2847. [b->mode[3] != ZEROMV]) > 0) {
  2848. int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
  2849. int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7;
  2850. setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
  2851. mask_edges(lflvl, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
  2852. mask_edges(lflvl, 1, row7, col7, x_end, y_end,
  2853. s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
  2854. s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
  2855. b->uvtx, skip_inter);
  2856. if (!s->filter.lim_lut[lvl]) {
  2857. int sharp = s->filter.sharpness;
  2858. int limit = lvl;
  2859. if (sharp > 0) {
  2860. limit >>= (sharp + 3) >> 2;
  2861. limit = FFMIN(limit, 9 - sharp);
  2862. }
  2863. limit = FFMAX(limit, 1);
  2864. s->filter.lim_lut[lvl] = limit;
  2865. s->filter.mblim_lut[lvl] = 2 * (lvl + 2) + limit;
  2866. }
  2867. }
  2868. if (s->pass == 2) {
  2869. s->b++;
  2870. s->block += w4 * h4 * 64;
  2871. s->uvblock[0] += w4 * h4 * 16;
  2872. s->uvblock[1] += w4 * h4 * 16;
  2873. s->eob += 4 * w4 * h4;
  2874. s->uveob[0] += w4 * h4;
  2875. s->uveob[1] += w4 * h4;
  2876. }
  2877. }
  2878. static void decode_sb(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  2879. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  2880. {
  2881. VP9Context *s = ctx->priv_data;
  2882. int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
  2883. (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
  2884. const uint8_t *p = s->keyframe ? vp9_default_kf_partition_probs[bl][c] :
  2885. s->prob.p.partition[bl][c];
  2886. enum BlockPartition bp;
  2887. ptrdiff_t hbs = 4 >> bl;
  2888. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2889. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  2890. if (bl == BL_8X8) {
  2891. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  2892. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2893. } else if (col + hbs < s->cols) { // FIXME why not <=?
  2894. if (row + hbs < s->rows) { // FIXME why not <=?
  2895. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  2896. switch (bp) {
  2897. case PARTITION_NONE:
  2898. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2899. break;
  2900. case PARTITION_H:
  2901. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2902. yoff += hbs * 8 * y_stride;
  2903. uvoff += hbs * 4 * uv_stride;
  2904. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
  2905. break;
  2906. case PARTITION_V:
  2907. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2908. yoff += hbs * 8;
  2909. uvoff += hbs * 4;
  2910. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
  2911. break;
  2912. case PARTITION_SPLIT:
  2913. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2914. decode_sb(ctx, row, col + hbs, lflvl,
  2915. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2916. yoff += hbs * 8 * y_stride;
  2917. uvoff += hbs * 4 * uv_stride;
  2918. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2919. decode_sb(ctx, row + hbs, col + hbs, lflvl,
  2920. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2921. break;
  2922. default:
  2923. av_assert0(0);
  2924. }
  2925. } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
  2926. bp = PARTITION_SPLIT;
  2927. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2928. decode_sb(ctx, row, col + hbs, lflvl,
  2929. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2930. } else {
  2931. bp = PARTITION_H;
  2932. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2933. }
  2934. } else if (row + hbs < s->rows) { // FIXME why not <=?
  2935. if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
  2936. bp = PARTITION_SPLIT;
  2937. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2938. yoff += hbs * 8 * y_stride;
  2939. uvoff += hbs * 4 * uv_stride;
  2940. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2941. } else {
  2942. bp = PARTITION_V;
  2943. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  2944. }
  2945. } else {
  2946. bp = PARTITION_SPLIT;
  2947. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2948. }
  2949. s->counts.partition[bl][c][bp]++;
  2950. }
  2951. static void decode_sb_mem(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  2952. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  2953. {
  2954. VP9Context *s = ctx->priv_data;
  2955. VP9Block *b = s->b;
  2956. ptrdiff_t hbs = 4 >> bl;
  2957. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  2958. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  2959. if (bl == BL_8X8) {
  2960. av_assert2(b->bl == BL_8X8);
  2961. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2962. } else if (s->b->bl == bl) {
  2963. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2964. if (b->bp == PARTITION_H && row + hbs < s->rows) {
  2965. yoff += hbs * 8 * y_stride;
  2966. uvoff += hbs * 4 * uv_stride;
  2967. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
  2968. } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
  2969. yoff += hbs * 8;
  2970. uvoff += hbs * 4;
  2971. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
  2972. }
  2973. } else {
  2974. decode_sb_mem(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  2975. if (col + hbs < s->cols) { // FIXME why not <=?
  2976. if (row + hbs < s->rows) {
  2977. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff + 8 * hbs,
  2978. uvoff + 4 * hbs, bl + 1);
  2979. yoff += hbs * 8 * y_stride;
  2980. uvoff += hbs * 4 * uv_stride;
  2981. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2982. decode_sb_mem(ctx, row + hbs, col + hbs, lflvl,
  2983. yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
  2984. } else {
  2985. yoff += hbs * 8;
  2986. uvoff += hbs * 4;
  2987. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
  2988. }
  2989. } else if (row + hbs < s->rows) {
  2990. yoff += hbs * 8 * y_stride;
  2991. uvoff += hbs * 4 * uv_stride;
  2992. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  2993. }
  2994. }
  2995. }
  2996. static void loopfilter_sb(AVCodecContext *ctx, struct VP9Filter *lflvl,
  2997. int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
  2998. {
  2999. VP9Context *s = ctx->priv_data;
  3000. AVFrame *f = s->frames[CUR_FRAME].tf.f;
  3001. uint8_t *dst = f->data[0] + yoff, *lvl = lflvl->level;
  3002. ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1];
  3003. int y, x, p;
  3004. // FIXME in how far can we interleave the v/h loopfilter calls? E.g.
  3005. // if you think of them as acting on a 8x8 block max, we can interleave
  3006. // each v/h within the single x loop, but that only works if we work on
  3007. // 8 pixel blocks, and we won't always do that (we want at least 16px
  3008. // to use SSE2 optimizations, perhaps 32 for AVX2)
  3009. // filter edges between columns, Y plane (e.g. block1 | block2)
  3010. for (y = 0; y < 8; y += 2, dst += 16 * ls_y, lvl += 16) {
  3011. uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[0][0][y];
  3012. uint8_t *hmask2 = lflvl->mask[0][0][y + 1];
  3013. unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2], hm13 = hmask1[3];
  3014. unsigned hm2 = hmask2[1] | hmask2[2], hm23 = hmask2[3];
  3015. unsigned hm = hm1 | hm2 | hm13 | hm23;
  3016. for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 8, l++) {
  3017. if (hm1 & x) {
  3018. int L = *l, H = L >> 4;
  3019. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3020. if (col || x > 1) {
  3021. if (hmask1[0] & x) {
  3022. if (hmask2[0] & x) {
  3023. av_assert2(l[8] == L);
  3024. s->dsp.loop_filter_16[0](ptr, ls_y, E, I, H);
  3025. } else {
  3026. s->dsp.loop_filter_8[2][0](ptr, ls_y, E, I, H);
  3027. }
  3028. } else if (hm2 & x) {
  3029. L = l[8];
  3030. H |= (L >> 4) << 8;
  3031. E |= s->filter.mblim_lut[L] << 8;
  3032. I |= s->filter.lim_lut[L] << 8;
  3033. s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
  3034. [!!(hmask2[1] & x)]
  3035. [0](ptr, ls_y, E, I, H);
  3036. } else {
  3037. s->dsp.loop_filter_8[!!(hmask1[1] & x)]
  3038. [0](ptr, ls_y, E, I, H);
  3039. }
  3040. }
  3041. } else if (hm2 & x) {
  3042. int L = l[8], H = L >> 4;
  3043. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3044. if (col || x > 1) {
  3045. s->dsp.loop_filter_8[!!(hmask2[1] & x)]
  3046. [0](ptr + 8 * ls_y, ls_y, E, I, H);
  3047. }
  3048. }
  3049. if (hm13 & x) {
  3050. int L = *l, H = L >> 4;
  3051. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3052. if (hm23 & x) {
  3053. L = l[8];
  3054. H |= (L >> 4) << 8;
  3055. E |= s->filter.mblim_lut[L] << 8;
  3056. I |= s->filter.lim_lut[L] << 8;
  3057. s->dsp.loop_filter_mix2[0][0][0](ptr + 4, ls_y, E, I, H);
  3058. } else {
  3059. s->dsp.loop_filter_8[0][0](ptr + 4, ls_y, E, I, H);
  3060. }
  3061. } else if (hm23 & x) {
  3062. int L = l[8], H = L >> 4;
  3063. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3064. s->dsp.loop_filter_8[0][0](ptr + 8 * ls_y + 4, ls_y, E, I, H);
  3065. }
  3066. }
  3067. }
  3068. // block1
  3069. // filter edges between rows, Y plane (e.g. ------)
  3070. // block2
  3071. dst = f->data[0] + yoff;
  3072. lvl = lflvl->level;
  3073. for (y = 0; y < 8; y++, dst += 8 * ls_y, lvl += 8) {
  3074. uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[0][1][y];
  3075. unsigned vm = vmask[0] | vmask[1] | vmask[2], vm3 = vmask[3];
  3076. for (x = 1; vm & ~(x - 1); x <<= 2, ptr += 16, l += 2) {
  3077. if (row || y) {
  3078. if (vm & x) {
  3079. int L = *l, H = L >> 4;
  3080. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3081. if (vmask[0] & x) {
  3082. if (vmask[0] & (x << 1)) {
  3083. av_assert2(l[1] == L);
  3084. s->dsp.loop_filter_16[1](ptr, ls_y, E, I, H);
  3085. } else {
  3086. s->dsp.loop_filter_8[2][1](ptr, ls_y, E, I, H);
  3087. }
  3088. } else if (vm & (x << 1)) {
  3089. L = l[1];
  3090. H |= (L >> 4) << 8;
  3091. E |= s->filter.mblim_lut[L] << 8;
  3092. I |= s->filter.lim_lut[L] << 8;
  3093. s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
  3094. [!!(vmask[1] & (x << 1))]
  3095. [1](ptr, ls_y, E, I, H);
  3096. } else {
  3097. s->dsp.loop_filter_8[!!(vmask[1] & x)]
  3098. [1](ptr, ls_y, E, I, H);
  3099. }
  3100. } else if (vm & (x << 1)) {
  3101. int L = l[1], H = L >> 4;
  3102. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3103. s->dsp.loop_filter_8[!!(vmask[1] & (x << 1))]
  3104. [1](ptr + 8, ls_y, E, I, H);
  3105. }
  3106. }
  3107. if (vm3 & x) {
  3108. int L = *l, H = L >> 4;
  3109. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3110. if (vm3 & (x << 1)) {
  3111. L = l[1];
  3112. H |= (L >> 4) << 8;
  3113. E |= s->filter.mblim_lut[L] << 8;
  3114. I |= s->filter.lim_lut[L] << 8;
  3115. s->dsp.loop_filter_mix2[0][0][1](ptr + ls_y * 4, ls_y, E, I, H);
  3116. } else {
  3117. s->dsp.loop_filter_8[0][1](ptr + ls_y * 4, ls_y, E, I, H);
  3118. }
  3119. } else if (vm3 & (x << 1)) {
  3120. int L = l[1], H = L >> 4;
  3121. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3122. s->dsp.loop_filter_8[0][1](ptr + ls_y * 4 + 8, ls_y, E, I, H);
  3123. }
  3124. }
  3125. }
  3126. // same principle but for U/V planes
  3127. for (p = 0; p < 2; p++) {
  3128. lvl = lflvl->level;
  3129. dst = f->data[1 + p] + uvoff;
  3130. for (y = 0; y < 8; y += 4, dst += 16 * ls_uv, lvl += 32) {
  3131. uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[1][0][y];
  3132. uint8_t *hmask2 = lflvl->mask[1][0][y + 2];
  3133. unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2];
  3134. unsigned hm2 = hmask2[1] | hmask2[2], hm = hm1 | hm2;
  3135. for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 4) {
  3136. if (col || x > 1) {
  3137. if (hm1 & x) {
  3138. int L = *l, H = L >> 4;
  3139. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3140. if (hmask1[0] & x) {
  3141. if (hmask2[0] & x) {
  3142. av_assert2(l[16] == L);
  3143. s->dsp.loop_filter_16[0](ptr, ls_uv, E, I, H);
  3144. } else {
  3145. s->dsp.loop_filter_8[2][0](ptr, ls_uv, E, I, H);
  3146. }
  3147. } else if (hm2 & x) {
  3148. L = l[16];
  3149. H |= (L >> 4) << 8;
  3150. E |= s->filter.mblim_lut[L] << 8;
  3151. I |= s->filter.lim_lut[L] << 8;
  3152. s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
  3153. [!!(hmask2[1] & x)]
  3154. [0](ptr, ls_uv, E, I, H);
  3155. } else {
  3156. s->dsp.loop_filter_8[!!(hmask1[1] & x)]
  3157. [0](ptr, ls_uv, E, I, H);
  3158. }
  3159. } else if (hm2 & x) {
  3160. int L = l[16], H = L >> 4;
  3161. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3162. s->dsp.loop_filter_8[!!(hmask2[1] & x)]
  3163. [0](ptr + 8 * ls_uv, ls_uv, E, I, H);
  3164. }
  3165. }
  3166. if (x & 0xAA)
  3167. l += 2;
  3168. }
  3169. }
  3170. lvl = lflvl->level;
  3171. dst = f->data[1 + p] + uvoff;
  3172. for (y = 0; y < 8; y++, dst += 4 * ls_uv) {
  3173. uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[1][1][y];
  3174. unsigned vm = vmask[0] | vmask[1] | vmask[2];
  3175. for (x = 1; vm & ~(x - 1); x <<= 4, ptr += 16, l += 4) {
  3176. if (row || y) {
  3177. if (vm & x) {
  3178. int L = *l, H = L >> 4;
  3179. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3180. if (vmask[0] & x) {
  3181. if (vmask[0] & (x << 2)) {
  3182. av_assert2(l[2] == L);
  3183. s->dsp.loop_filter_16[1](ptr, ls_uv, E, I, H);
  3184. } else {
  3185. s->dsp.loop_filter_8[2][1](ptr, ls_uv, E, I, H);
  3186. }
  3187. } else if (vm & (x << 2)) {
  3188. L = l[2];
  3189. H |= (L >> 4) << 8;
  3190. E |= s->filter.mblim_lut[L] << 8;
  3191. I |= s->filter.lim_lut[L] << 8;
  3192. s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
  3193. [!!(vmask[1] & (x << 2))]
  3194. [1](ptr, ls_uv, E, I, H);
  3195. } else {
  3196. s->dsp.loop_filter_8[!!(vmask[1] & x)]
  3197. [1](ptr, ls_uv, E, I, H);
  3198. }
  3199. } else if (vm & (x << 2)) {
  3200. int L = l[2], H = L >> 4;
  3201. int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
  3202. s->dsp.loop_filter_8[!!(vmask[1] & (x << 2))]
  3203. [1](ptr + 8, ls_uv, E, I, H);
  3204. }
  3205. }
  3206. }
  3207. if (y & 1)
  3208. lvl += 16;
  3209. }
  3210. }
  3211. }
  3212. static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
  3213. {
  3214. int sb_start = ( idx * n) >> log2_n;
  3215. int sb_end = ((idx + 1) * n) >> log2_n;
  3216. *start = FFMIN(sb_start, n) << 3;
  3217. *end = FFMIN(sb_end, n) << 3;
  3218. }
  3219. static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
  3220. int max_count, int update_factor)
  3221. {
  3222. unsigned ct = ct0 + ct1, p2, p1;
  3223. if (!ct)
  3224. return;
  3225. update_factor = FASTDIV(update_factor * FFMIN(ct, max_count), max_count);
  3226. p1 = *p;
  3227. p2 = ((((int64_t) ct0) << 8) + (ct >> 1)) / ct;
  3228. p2 = av_clip(p2, 1, 255);
  3229. // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
  3230. *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
  3231. }
  3232. static void adapt_probs(VP9Context *s)
  3233. {
  3234. int i, j, k, l, m;
  3235. prob_context *p = &s->prob_ctx[s->framectxid].p;
  3236. int uf = (s->keyframe || s->intraonly || !s->last_keyframe) ? 112 : 128;
  3237. // coefficients
  3238. for (i = 0; i < 4; i++)
  3239. for (j = 0; j < 2; j++)
  3240. for (k = 0; k < 2; k++)
  3241. for (l = 0; l < 6; l++)
  3242. for (m = 0; m < 6; m++) {
  3243. uint8_t *pp = s->prob_ctx[s->framectxid].coef[i][j][k][l][m];
  3244. unsigned *e = s->counts.eob[i][j][k][l][m];
  3245. unsigned *c = s->counts.coef[i][j][k][l][m];
  3246. if (l == 0 && m >= 3) // dc only has 3 pt
  3247. break;
  3248. adapt_prob(&pp[0], e[0], e[1], 24, uf);
  3249. adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
  3250. adapt_prob(&pp[2], c[1], c[2], 24, uf);
  3251. }
  3252. if (s->keyframe || s->intraonly) {
  3253. memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
  3254. memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
  3255. memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
  3256. memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
  3257. return;
  3258. }
  3259. // skip flag
  3260. for (i = 0; i < 3; i++)
  3261. adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
  3262. // intra/inter flag
  3263. for (i = 0; i < 4; i++)
  3264. adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
  3265. // comppred flag
  3266. if (s->comppredmode == PRED_SWITCHABLE) {
  3267. for (i = 0; i < 5; i++)
  3268. adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
  3269. }
  3270. // reference frames
  3271. if (s->comppredmode != PRED_SINGLEREF) {
  3272. for (i = 0; i < 5; i++)
  3273. adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
  3274. s->counts.comp_ref[i][1], 20, 128);
  3275. }
  3276. if (s->comppredmode != PRED_COMPREF) {
  3277. for (i = 0; i < 5; i++) {
  3278. uint8_t *pp = p->single_ref[i];
  3279. unsigned (*c)[2] = s->counts.single_ref[i];
  3280. adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
  3281. adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
  3282. }
  3283. }
  3284. // block partitioning
  3285. for (i = 0; i < 4; i++)
  3286. for (j = 0; j < 4; j++) {
  3287. uint8_t *pp = p->partition[i][j];
  3288. unsigned *c = s->counts.partition[i][j];
  3289. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3290. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3291. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3292. }
  3293. // tx size
  3294. if (s->txfmmode == TX_SWITCHABLE) {
  3295. for (i = 0; i < 2; i++) {
  3296. unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
  3297. adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
  3298. adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
  3299. adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
  3300. adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
  3301. adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
  3302. adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
  3303. }
  3304. }
  3305. // interpolation filter
  3306. if (s->filtermode == FILTER_SWITCHABLE) {
  3307. for (i = 0; i < 4; i++) {
  3308. uint8_t *pp = p->filter[i];
  3309. unsigned *c = s->counts.filter[i];
  3310. adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
  3311. adapt_prob(&pp[1], c[1], c[2], 20, 128);
  3312. }
  3313. }
  3314. // inter modes
  3315. for (i = 0; i < 7; i++) {
  3316. uint8_t *pp = p->mv_mode[i];
  3317. unsigned *c = s->counts.mv_mode[i];
  3318. adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
  3319. adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
  3320. adapt_prob(&pp[2], c[1], c[3], 20, 128);
  3321. }
  3322. // mv joints
  3323. {
  3324. uint8_t *pp = p->mv_joint;
  3325. unsigned *c = s->counts.mv_joint;
  3326. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3327. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3328. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3329. }
  3330. // mv components
  3331. for (i = 0; i < 2; i++) {
  3332. uint8_t *pp;
  3333. unsigned *c, (*c2)[2], sum;
  3334. adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
  3335. s->counts.mv_comp[i].sign[1], 20, 128);
  3336. pp = p->mv_comp[i].classes;
  3337. c = s->counts.mv_comp[i].classes;
  3338. sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
  3339. adapt_prob(&pp[0], c[0], sum, 20, 128);
  3340. sum -= c[1];
  3341. adapt_prob(&pp[1], c[1], sum, 20, 128);
  3342. sum -= c[2] + c[3];
  3343. adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
  3344. adapt_prob(&pp[3], c[2], c[3], 20, 128);
  3345. sum -= c[4] + c[5];
  3346. adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
  3347. adapt_prob(&pp[5], c[4], c[5], 20, 128);
  3348. sum -= c[6];
  3349. adapt_prob(&pp[6], c[6], sum, 20, 128);
  3350. adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
  3351. adapt_prob(&pp[8], c[7], c[8], 20, 128);
  3352. adapt_prob(&pp[9], c[9], c[10], 20, 128);
  3353. adapt_prob(&p->mv_comp[i].class0, s->counts.mv_comp[i].class0[0],
  3354. s->counts.mv_comp[i].class0[1], 20, 128);
  3355. pp = p->mv_comp[i].bits;
  3356. c2 = s->counts.mv_comp[i].bits;
  3357. for (j = 0; j < 10; j++)
  3358. adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
  3359. for (j = 0; j < 2; j++) {
  3360. pp = p->mv_comp[i].class0_fp[j];
  3361. c = s->counts.mv_comp[i].class0_fp[j];
  3362. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3363. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3364. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3365. }
  3366. pp = p->mv_comp[i].fp;
  3367. c = s->counts.mv_comp[i].fp;
  3368. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3369. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3370. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3371. if (s->highprecisionmvs) {
  3372. adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0],
  3373. s->counts.mv_comp[i].class0_hp[1], 20, 128);
  3374. adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
  3375. s->counts.mv_comp[i].hp[1], 20, 128);
  3376. }
  3377. }
  3378. // y intra modes
  3379. for (i = 0; i < 4; i++) {
  3380. uint8_t *pp = p->y_mode[i];
  3381. unsigned *c = s->counts.y_mode[i], sum, s2;
  3382. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3383. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3384. sum -= c[TM_VP8_PRED];
  3385. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3386. sum -= c[VERT_PRED];
  3387. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3388. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3389. sum -= s2;
  3390. adapt_prob(&pp[3], s2, sum, 20, 128);
  3391. s2 -= c[HOR_PRED];
  3392. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3393. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3394. sum -= c[DIAG_DOWN_LEFT_PRED];
  3395. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3396. sum -= c[VERT_LEFT_PRED];
  3397. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3398. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3399. }
  3400. // uv intra modes
  3401. for (i = 0; i < 10; i++) {
  3402. uint8_t *pp = p->uv_mode[i];
  3403. unsigned *c = s->counts.uv_mode[i], sum, s2;
  3404. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3405. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3406. sum -= c[TM_VP8_PRED];
  3407. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3408. sum -= c[VERT_PRED];
  3409. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3410. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3411. sum -= s2;
  3412. adapt_prob(&pp[3], s2, sum, 20, 128);
  3413. s2 -= c[HOR_PRED];
  3414. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3415. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3416. sum -= c[DIAG_DOWN_LEFT_PRED];
  3417. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3418. sum -= c[VERT_LEFT_PRED];
  3419. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3420. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3421. }
  3422. }
  3423. static void free_buffers(VP9Context *s)
  3424. {
  3425. av_freep(&s->intra_pred_data[0]);
  3426. av_freep(&s->b_base);
  3427. av_freep(&s->block_base);
  3428. }
  3429. static av_cold int vp9_decode_free(AVCodecContext *ctx)
  3430. {
  3431. VP9Context *s = ctx->priv_data;
  3432. int i;
  3433. for (i = 0; i < 2; i++) {
  3434. if (s->frames[i].tf.f->data[0])
  3435. vp9_unref_frame(ctx, &s->frames[i]);
  3436. av_frame_free(&s->frames[i].tf.f);
  3437. }
  3438. for (i = 0; i < 8; i++) {
  3439. if (s->refs[i].f->data[0])
  3440. ff_thread_release_buffer(ctx, &s->refs[i]);
  3441. av_frame_free(&s->refs[i].f);
  3442. if (s->next_refs[i].f->data[0])
  3443. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3444. av_frame_free(&s->next_refs[i].f);
  3445. }
  3446. free_buffers(s);
  3447. av_freep(&s->c_b);
  3448. s->c_b_size = 0;
  3449. return 0;
  3450. }
  3451. static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
  3452. int *got_frame, AVPacket *pkt)
  3453. {
  3454. const uint8_t *data = pkt->data;
  3455. int size = pkt->size;
  3456. VP9Context *s = ctx->priv_data;
  3457. int res, tile_row, tile_col, i, ref, row, col;
  3458. ptrdiff_t yoff, uvoff, ls_y, ls_uv;
  3459. AVFrame *f;
  3460. if ((res = decode_frame_header(ctx, data, size, &ref)) < 0) {
  3461. return res;
  3462. } else if (res == 0) {
  3463. if (!s->refs[ref].f->data[0]) {
  3464. av_log(ctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
  3465. return AVERROR_INVALIDDATA;
  3466. }
  3467. if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0)
  3468. return res;
  3469. *got_frame = 1;
  3470. return pkt->size;
  3471. }
  3472. data += res;
  3473. size -= res;
  3474. if (s->frames[LAST_FRAME].tf.f->data[0])
  3475. vp9_unref_frame(ctx, &s->frames[LAST_FRAME]);
  3476. if (!s->keyframe && s->frames[CUR_FRAME].tf.f->data[0] &&
  3477. (res = vp9_ref_frame(ctx, &s->frames[LAST_FRAME], &s->frames[CUR_FRAME])) < 0)
  3478. return res;
  3479. if (s->frames[CUR_FRAME].tf.f->data[0])
  3480. vp9_unref_frame(ctx, &s->frames[CUR_FRAME]);
  3481. if ((res = vp9_alloc_frame(ctx, &s->frames[CUR_FRAME])) < 0)
  3482. return res;
  3483. f = s->frames[CUR_FRAME].tf.f;
  3484. f->key_frame = s->keyframe;
  3485. f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  3486. ls_y = f->linesize[0];
  3487. ls_uv =f->linesize[1];
  3488. // ref frame setup
  3489. for (i = 0; i < 8; i++) {
  3490. if (s->next_refs[i].f->data[0])
  3491. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3492. if (s->refreshrefmask & (1 << i)) {
  3493. res = ff_thread_ref_frame(&s->next_refs[i], &s->frames[CUR_FRAME].tf);
  3494. } else {
  3495. res = ff_thread_ref_frame(&s->next_refs[i], &s->refs[i]);
  3496. }
  3497. if (res < 0)
  3498. return res;
  3499. }
  3500. // main tile decode loop
  3501. memset(s->above_partition_ctx, 0, s->cols);
  3502. memset(s->above_skip_ctx, 0, s->cols);
  3503. if (s->keyframe || s->intraonly) {
  3504. memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
  3505. } else {
  3506. memset(s->above_mode_ctx, NEARESTMV, s->cols);
  3507. }
  3508. memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
  3509. memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 8);
  3510. memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 8);
  3511. memset(s->above_segpred_ctx, 0, s->cols);
  3512. s->pass = s->uses_2pass =
  3513. ctx->active_thread_type == FF_THREAD_FRAME && s->refreshctx && !s->parallelmode;
  3514. if ((res = update_block_buffers(ctx)) < 0) {
  3515. av_log(ctx, AV_LOG_ERROR,
  3516. "Failed to allocate block buffers\n");
  3517. return res;
  3518. }
  3519. if (s->refreshctx && s->parallelmode) {
  3520. int j, k, l, m;
  3521. for (i = 0; i < 4; i++) {
  3522. for (j = 0; j < 2; j++)
  3523. for (k = 0; k < 2; k++)
  3524. for (l = 0; l < 6; l++)
  3525. for (m = 0; m < 6; m++)
  3526. memcpy(s->prob_ctx[s->framectxid].coef[i][j][k][l][m],
  3527. s->prob.coef[i][j][k][l][m], 3);
  3528. if (s->txfmmode == i)
  3529. break;
  3530. }
  3531. s->prob_ctx[s->framectxid].p = s->prob.p;
  3532. ff_thread_finish_setup(ctx);
  3533. }
  3534. do {
  3535. yoff = uvoff = 0;
  3536. s->b = s->b_base;
  3537. s->block = s->block_base;
  3538. s->uvblock[0] = s->uvblock_base[0];
  3539. s->uvblock[1] = s->uvblock_base[1];
  3540. s->eob = s->eob_base;
  3541. s->uveob[0] = s->uveob_base[0];
  3542. s->uveob[1] = s->uveob_base[1];
  3543. for (tile_row = 0; tile_row < s->tiling.tile_rows; tile_row++) {
  3544. set_tile_offset(&s->tiling.tile_row_start, &s->tiling.tile_row_end,
  3545. tile_row, s->tiling.log2_tile_rows, s->sb_rows);
  3546. if (s->pass != 2) {
  3547. for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
  3548. int64_t tile_size;
  3549. if (tile_col == s->tiling.tile_cols - 1 &&
  3550. tile_row == s->tiling.tile_rows - 1) {
  3551. tile_size = size;
  3552. } else {
  3553. tile_size = AV_RB32(data);
  3554. data += 4;
  3555. size -= 4;
  3556. }
  3557. if (tile_size > size) {
  3558. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3559. return AVERROR_INVALIDDATA;
  3560. }
  3561. ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
  3562. if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit
  3563. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3564. return AVERROR_INVALIDDATA;
  3565. }
  3566. data += tile_size;
  3567. size -= tile_size;
  3568. }
  3569. }
  3570. for (row = s->tiling.tile_row_start; row < s->tiling.tile_row_end;
  3571. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 32) {
  3572. struct VP9Filter *lflvl_ptr = s->lflvl;
  3573. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  3574. for (tile_col = 0; tile_col < s->tiling.tile_cols; tile_col++) {
  3575. set_tile_offset(&s->tiling.tile_col_start, &s->tiling.tile_col_end,
  3576. tile_col, s->tiling.log2_tile_cols, s->sb_cols);
  3577. if (s->pass != 2) {
  3578. memset(s->left_partition_ctx, 0, 8);
  3579. memset(s->left_skip_ctx, 0, 8);
  3580. if (s->keyframe || s->intraonly) {
  3581. memset(s->left_mode_ctx, DC_PRED, 16);
  3582. } else {
  3583. memset(s->left_mode_ctx, NEARESTMV, 8);
  3584. }
  3585. memset(s->left_y_nnz_ctx, 0, 16);
  3586. memset(s->left_uv_nnz_ctx, 0, 16);
  3587. memset(s->left_segpred_ctx, 0, 8);
  3588. memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
  3589. }
  3590. for (col = s->tiling.tile_col_start;
  3591. col < s->tiling.tile_col_end;
  3592. col += 8, yoff2 += 64, uvoff2 += 32, lflvl_ptr++) {
  3593. // FIXME integrate with lf code (i.e. zero after each
  3594. // use, similar to invtxfm coefficients, or similar)
  3595. if (s->pass != 1) {
  3596. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  3597. }
  3598. if (s->pass == 2) {
  3599. decode_sb_mem(ctx, row, col, lflvl_ptr,
  3600. yoff2, uvoff2, BL_64X64);
  3601. } else {
  3602. decode_sb(ctx, row, col, lflvl_ptr,
  3603. yoff2, uvoff2, BL_64X64);
  3604. }
  3605. }
  3606. if (s->pass != 2) {
  3607. memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
  3608. }
  3609. }
  3610. if (s->pass == 1) {
  3611. continue;
  3612. }
  3613. // backup pre-loopfilter reconstruction data for intra
  3614. // prediction of next row of sb64s
  3615. if (row + 8 < s->rows) {
  3616. memcpy(s->intra_pred_data[0],
  3617. f->data[0] + yoff + 63 * ls_y,
  3618. 8 * s->cols);
  3619. memcpy(s->intra_pred_data[1],
  3620. f->data[1] + uvoff + 31 * ls_uv,
  3621. 4 * s->cols);
  3622. memcpy(s->intra_pred_data[2],
  3623. f->data[2] + uvoff + 31 * ls_uv,
  3624. 4 * s->cols);
  3625. }
  3626. // loopfilter one row
  3627. if (s->filter.level) {
  3628. yoff2 = yoff;
  3629. uvoff2 = uvoff;
  3630. lflvl_ptr = s->lflvl;
  3631. for (col = 0; col < s->cols;
  3632. col += 8, yoff2 += 64, uvoff2 += 32, lflvl_ptr++) {
  3633. loopfilter_sb(ctx, lflvl_ptr, row, col, yoff2, uvoff2);
  3634. }
  3635. }
  3636. // FIXME maybe we can make this more finegrained by running the
  3637. // loopfilter per-block instead of after each sbrow
  3638. // In fact that would also make intra pred left preparation easier?
  3639. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, row >> 3, 0);
  3640. }
  3641. }
  3642. if (s->pass < 2 && s->refreshctx && !s->parallelmode) {
  3643. adapt_probs(s);
  3644. ff_thread_finish_setup(ctx);
  3645. }
  3646. } while (s->pass++ == 1);
  3647. ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0);
  3648. // ref frame setup
  3649. for (i = 0; i < 8; i++) {
  3650. if (s->refs[i].f->data[0])
  3651. ff_thread_release_buffer(ctx, &s->refs[i]);
  3652. ff_thread_ref_frame(&s->refs[i], &s->next_refs[i]);
  3653. }
  3654. if (!s->invisible) {
  3655. if ((res = av_frame_ref(frame, s->frames[CUR_FRAME].tf.f)) < 0)
  3656. return res;
  3657. *got_frame = 1;
  3658. }
  3659. return pkt->size;
  3660. }
  3661. static void vp9_decode_flush(AVCodecContext *ctx)
  3662. {
  3663. VP9Context *s = ctx->priv_data;
  3664. int i;
  3665. for (i = 0; i < 2; i++)
  3666. vp9_unref_frame(ctx, &s->frames[i]);
  3667. for (i = 0; i < 8; i++)
  3668. ff_thread_release_buffer(ctx, &s->refs[i]);
  3669. }
  3670. static int init_frames(AVCodecContext *ctx)
  3671. {
  3672. VP9Context *s = ctx->priv_data;
  3673. int i;
  3674. for (i = 0; i < 2; i++) {
  3675. s->frames[i].tf.f = av_frame_alloc();
  3676. if (!s->frames[i].tf.f) {
  3677. vp9_decode_free(ctx);
  3678. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3679. return AVERROR(ENOMEM);
  3680. }
  3681. }
  3682. for (i = 0; i < 8; i++) {
  3683. s->refs[i].f = av_frame_alloc();
  3684. s->next_refs[i].f = av_frame_alloc();
  3685. if (!s->refs[i].f || !s->next_refs[i].f) {
  3686. vp9_decode_free(ctx);
  3687. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3688. return AVERROR(ENOMEM);
  3689. }
  3690. }
  3691. return 0;
  3692. }
  3693. static av_cold int vp9_decode_init(AVCodecContext *ctx)
  3694. {
  3695. VP9Context *s = ctx->priv_data;
  3696. ctx->internal->allocate_progress = 1;
  3697. ctx->pix_fmt = AV_PIX_FMT_YUV420P;
  3698. ff_vp9dsp_init(&s->dsp);
  3699. ff_videodsp_init(&s->vdsp, 8);
  3700. s->filter.sharpness = -1;
  3701. return init_frames(ctx);
  3702. }
  3703. static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
  3704. {
  3705. return init_frames(avctx);
  3706. }
  3707. static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  3708. {
  3709. int i, res;
  3710. VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
  3711. // detect size changes in other threads
  3712. if (s->intra_pred_data[0] &&
  3713. (!ssrc->intra_pred_data[0] || s->cols != ssrc->cols || s->rows != ssrc->rows)) {
  3714. free_buffers(s);
  3715. }
  3716. for (i = 0; i < 2; i++) {
  3717. if (s->frames[i].tf.f->data[0])
  3718. vp9_unref_frame(dst, &s->frames[i]);
  3719. if (ssrc->frames[i].tf.f->data[0]) {
  3720. if ((res = vp9_ref_frame(dst, &s->frames[i], &ssrc->frames[i])) < 0)
  3721. return res;
  3722. }
  3723. }
  3724. for (i = 0; i < 8; i++) {
  3725. if (s->refs[i].f->data[0])
  3726. ff_thread_release_buffer(dst, &s->refs[i]);
  3727. if (ssrc->next_refs[i].f->data[0]) {
  3728. if ((res = ff_thread_ref_frame(&s->refs[i], &ssrc->next_refs[i])) < 0)
  3729. return res;
  3730. }
  3731. }
  3732. s->invisible = ssrc->invisible;
  3733. s->keyframe = ssrc->keyframe;
  3734. s->uses_2pass = ssrc->uses_2pass;
  3735. memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
  3736. memcpy(&s->lf_delta, &ssrc->lf_delta, sizeof(s->lf_delta));
  3737. if (ssrc->segmentation.enabled) {
  3738. memcpy(&s->segmentation.feat, &ssrc->segmentation.feat,
  3739. sizeof(s->segmentation.feat));
  3740. }
  3741. return 0;
  3742. }
  3743. AVCodec ff_vp9_decoder = {
  3744. .name = "vp9",
  3745. .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
  3746. .type = AVMEDIA_TYPE_VIDEO,
  3747. .id = AV_CODEC_ID_VP9,
  3748. .priv_data_size = sizeof(VP9Context),
  3749. .init = vp9_decode_init,
  3750. .close = vp9_decode_free,
  3751. .decode = vp9_decode_frame,
  3752. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  3753. .flush = vp9_decode_flush,
  3754. .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
  3755. .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
  3756. };