You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4355 lines
175KB

  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "internal.h"
  26. #include "profiles.h"
  27. #include "thread.h"
  28. #include "videodsp.h"
  29. #include "vp56.h"
  30. #include "vp9.h"
  31. #include "vp9data.h"
  32. #include "vp9dsp.h"
  33. #include "libavutil/avassert.h"
  34. #include "libavutil/pixdesc.h"
  35. #define VP9_SYNCCODE 0x498342
  36. struct VP9Filter {
  37. uint8_t level[8 * 8];
  38. uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
  39. [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
  40. };
  41. typedef struct VP9Block {
  42. uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
  43. enum FilterMode filter;
  44. VP56mv mv[4 /* b_idx */][2 /* ref */];
  45. enum BlockSize bs;
  46. enum TxfmMode tx, uvtx;
  47. enum BlockLevel bl;
  48. enum BlockPartition bp;
  49. } VP9Block;
  50. typedef struct VP9Context {
  51. VP9SharedContext s;
  52. VP9DSPContext dsp;
  53. VideoDSPContext vdsp;
  54. GetBitContext gb;
  55. VP56RangeCoder c;
  56. VP56RangeCoder *c_b;
  57. unsigned c_b_size;
  58. VP9Block *b_base, *b;
  59. int pass;
  60. int row, row7, col, col7;
  61. uint8_t *dst[3];
  62. ptrdiff_t y_stride, uv_stride;
  63. uint8_t ss_h, ss_v;
  64. uint8_t last_bpp, bpp, bpp_index, bytesperpixel;
  65. uint8_t last_keyframe;
  66. enum AVPixelFormat pix_fmt, last_fmt;
  67. ThreadFrame next_refs[8];
  68. struct {
  69. uint8_t lim_lut[64];
  70. uint8_t mblim_lut[64];
  71. } filter_lut;
  72. unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  73. unsigned sb_cols, sb_rows, rows, cols;
  74. struct {
  75. prob_context p;
  76. uint8_t coef[4][2][2][6][6][3];
  77. } prob_ctx[4];
  78. struct {
  79. prob_context p;
  80. uint8_t coef[4][2][2][6][6][11];
  81. } prob;
  82. struct {
  83. unsigned y_mode[4][10];
  84. unsigned uv_mode[10][10];
  85. unsigned filter[4][3];
  86. unsigned mv_mode[7][4];
  87. unsigned intra[4][2];
  88. unsigned comp[5][2];
  89. unsigned single_ref[5][2][2];
  90. unsigned comp_ref[5][2];
  91. unsigned tx32p[2][4];
  92. unsigned tx16p[2][3];
  93. unsigned tx8p[2][2];
  94. unsigned skip[3][2];
  95. unsigned mv_joint[4];
  96. struct {
  97. unsigned sign[2];
  98. unsigned classes[11];
  99. unsigned class0[2];
  100. unsigned bits[10][2];
  101. unsigned class0_fp[2][4];
  102. unsigned fp[4];
  103. unsigned class0_hp[2];
  104. unsigned hp[2];
  105. } mv_comp[2];
  106. unsigned partition[4][4][4];
  107. unsigned coef[4][2][2][6][6][3];
  108. unsigned eob[4][2][2][6][6][2];
  109. } counts;
  110. // contextual (left/above) cache
  111. DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
  112. DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
  113. DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
  114. DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
  115. DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
  116. DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
  117. DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
  118. DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
  119. DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
  120. DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
  121. DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
  122. DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
  123. uint8_t *above_partition_ctx;
  124. uint8_t *above_mode_ctx;
  125. // FIXME maybe merge some of the below in a flags field?
  126. uint8_t *above_y_nnz_ctx;
  127. uint8_t *above_uv_nnz_ctx[2];
  128. uint8_t *above_skip_ctx; // 1bit
  129. uint8_t *above_txfm_ctx; // 2bit
  130. uint8_t *above_segpred_ctx; // 1bit
  131. uint8_t *above_intra_ctx; // 1bit
  132. uint8_t *above_comp_ctx; // 1bit
  133. uint8_t *above_ref_ctx; // 2bit
  134. uint8_t *above_filter_ctx;
  135. VP56mv (*above_mv_ctx)[2];
  136. // whole-frame cache
  137. uint8_t *intra_pred_data[3];
  138. struct VP9Filter *lflvl;
  139. DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[135 * 144 * 2];
  140. // block reconstruction intermediates
  141. int block_alloc_using_2pass;
  142. int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
  143. uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
  144. struct { int x, y; } min_mv, max_mv;
  145. DECLARE_ALIGNED(32, uint8_t, tmp_y)[64 * 64 * 2];
  146. DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][64 * 64 * 2];
  147. uint16_t mvscale[3][2];
  148. uint8_t mvstep[3][2];
  149. } VP9Context;
  150. static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
  151. {
  152. { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
  153. { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
  154. }, {
  155. { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
  156. { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
  157. }
  158. };
  159. static void vp9_unref_frame(AVCodecContext *ctx, VP9Frame *f)
  160. {
  161. ff_thread_release_buffer(ctx, &f->tf);
  162. av_buffer_unref(&f->extradata);
  163. av_buffer_unref(&f->hwaccel_priv_buf);
  164. f->segmentation_map = NULL;
  165. f->hwaccel_picture_private = NULL;
  166. }
  167. static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
  168. {
  169. VP9Context *s = ctx->priv_data;
  170. int ret, sz;
  171. if ((ret = ff_thread_get_buffer(ctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
  172. return ret;
  173. sz = 64 * s->sb_cols * s->sb_rows;
  174. if (!(f->extradata = av_buffer_allocz(sz * (1 + sizeof(struct VP9mvrefPair))))) {
  175. goto fail;
  176. }
  177. f->segmentation_map = f->extradata->data;
  178. f->mv = (struct VP9mvrefPair *) (f->extradata->data + sz);
  179. if (ctx->hwaccel) {
  180. const AVHWAccel *hwaccel = ctx->hwaccel;
  181. av_assert0(!f->hwaccel_picture_private);
  182. if (hwaccel->frame_priv_data_size) {
  183. f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  184. if (!f->hwaccel_priv_buf)
  185. goto fail;
  186. f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
  187. }
  188. }
  189. return 0;
  190. fail:
  191. vp9_unref_frame(ctx, f);
  192. return AVERROR(ENOMEM);
  193. }
  194. static int vp9_ref_frame(AVCodecContext *ctx, VP9Frame *dst, VP9Frame *src)
  195. {
  196. int res;
  197. if ((res = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0) {
  198. return res;
  199. } else if (!(dst->extradata = av_buffer_ref(src->extradata))) {
  200. goto fail;
  201. }
  202. dst->segmentation_map = src->segmentation_map;
  203. dst->mv = src->mv;
  204. dst->uses_2pass = src->uses_2pass;
  205. if (src->hwaccel_picture_private) {
  206. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  207. if (!dst->hwaccel_priv_buf)
  208. goto fail;
  209. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  210. }
  211. return 0;
  212. fail:
  213. vp9_unref_frame(ctx, dst);
  214. return AVERROR(ENOMEM);
  215. }
  216. static int update_size(AVCodecContext *ctx, int w, int h)
  217. {
  218. #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + CONFIG_VP9_D3D11VA_HWACCEL + CONFIG_VP9_VAAPI_HWACCEL)
  219. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
  220. VP9Context *s = ctx->priv_data;
  221. uint8_t *p;
  222. int bytesperpixel = s->bytesperpixel, res;
  223. av_assert0(w > 0 && h > 0);
  224. if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height && s->pix_fmt == s->last_fmt)
  225. return 0;
  226. if ((res = ff_set_dimensions(ctx, w, h)) < 0)
  227. return res;
  228. if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
  229. #if CONFIG_VP9_DXVA2_HWACCEL
  230. *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
  231. #endif
  232. #if CONFIG_VP9_D3D11VA_HWACCEL
  233. *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
  234. #endif
  235. #if CONFIG_VP9_VAAPI_HWACCEL
  236. *fmtp++ = AV_PIX_FMT_VAAPI;
  237. #endif
  238. }
  239. *fmtp++ = s->pix_fmt;
  240. *fmtp = AV_PIX_FMT_NONE;
  241. res = ff_thread_get_format(ctx, pix_fmts);
  242. if (res < 0)
  243. return res;
  244. ctx->pix_fmt = res;
  245. s->last_fmt = s->pix_fmt;
  246. s->sb_cols = (w + 63) >> 6;
  247. s->sb_rows = (h + 63) >> 6;
  248. s->cols = (w + 7) >> 3;
  249. s->rows = (h + 7) >> 3;
  250. #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
  251. av_freep(&s->intra_pred_data[0]);
  252. // FIXME we slightly over-allocate here for subsampled chroma, but a little
  253. // bit of padding shouldn't affect performance...
  254. p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
  255. sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
  256. if (!p)
  257. return AVERROR(ENOMEM);
  258. assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
  259. assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
  260. assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
  261. assign(s->above_y_nnz_ctx, uint8_t *, 16);
  262. assign(s->above_mode_ctx, uint8_t *, 16);
  263. assign(s->above_mv_ctx, VP56mv(*)[2], 16);
  264. assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
  265. assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
  266. assign(s->above_partition_ctx, uint8_t *, 8);
  267. assign(s->above_skip_ctx, uint8_t *, 8);
  268. assign(s->above_txfm_ctx, uint8_t *, 8);
  269. assign(s->above_segpred_ctx, uint8_t *, 8);
  270. assign(s->above_intra_ctx, uint8_t *, 8);
  271. assign(s->above_comp_ctx, uint8_t *, 8);
  272. assign(s->above_ref_ctx, uint8_t *, 8);
  273. assign(s->above_filter_ctx, uint8_t *, 8);
  274. assign(s->lflvl, struct VP9Filter *, 1);
  275. #undef assign
  276. // these will be re-allocated a little later
  277. av_freep(&s->b_base);
  278. av_freep(&s->block_base);
  279. if (s->bpp != s->last_bpp) {
  280. ff_vp9dsp_init(&s->dsp, s->bpp, ctx->flags & AV_CODEC_FLAG_BITEXACT);
  281. ff_videodsp_init(&s->vdsp, s->bpp);
  282. s->last_bpp = s->bpp;
  283. }
  284. return 0;
  285. }
  286. static int update_block_buffers(AVCodecContext *ctx)
  287. {
  288. VP9Context *s = ctx->priv_data;
  289. int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
  290. if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
  291. return 0;
  292. av_free(s->b_base);
  293. av_free(s->block_base);
  294. chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
  295. chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
  296. if (s->s.frames[CUR_FRAME].uses_2pass) {
  297. int sbs = s->sb_cols * s->sb_rows;
  298. s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
  299. s->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  300. 16 * 16 + 2 * chroma_eobs) * sbs);
  301. if (!s->b_base || !s->block_base)
  302. return AVERROR(ENOMEM);
  303. s->uvblock_base[0] = s->block_base + sbs * 64 * 64 * bytesperpixel;
  304. s->uvblock_base[1] = s->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
  305. s->eob_base = (uint8_t *) (s->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
  306. s->uveob_base[0] = s->eob_base + 16 * 16 * sbs;
  307. s->uveob_base[1] = s->uveob_base[0] + chroma_eobs * sbs;
  308. } else {
  309. s->b_base = av_malloc(sizeof(VP9Block));
  310. s->block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  311. 16 * 16 + 2 * chroma_eobs);
  312. if (!s->b_base || !s->block_base)
  313. return AVERROR(ENOMEM);
  314. s->uvblock_base[0] = s->block_base + 64 * 64 * bytesperpixel;
  315. s->uvblock_base[1] = s->uvblock_base[0] + chroma_blocks * bytesperpixel;
  316. s->eob_base = (uint8_t *) (s->uvblock_base[1] + chroma_blocks * bytesperpixel);
  317. s->uveob_base[0] = s->eob_base + 16 * 16;
  318. s->uveob_base[1] = s->uveob_base[0] + chroma_eobs;
  319. }
  320. s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
  321. return 0;
  322. }
  323. // for some reason the sign bit is at the end, not the start, of a bit sequence
  324. static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
  325. {
  326. int v = get_bits(gb, n);
  327. return get_bits1(gb) ? -v : v;
  328. }
  329. static av_always_inline int inv_recenter_nonneg(int v, int m)
  330. {
  331. return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
  332. }
  333. // differential forward probability updates
  334. static int update_prob(VP56RangeCoder *c, int p)
  335. {
  336. static const int inv_map_table[255] = {
  337. 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
  338. 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
  339. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
  340. 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
  341. 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
  342. 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  343. 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
  344. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
  345. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
  346. 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
  347. 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
  348. 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
  349. 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
  350. 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
  351. 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
  352. 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
  353. 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
  354. 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
  355. 252, 253, 253,
  356. };
  357. int d;
  358. /* This code is trying to do a differential probability update. For a
  359. * current probability A in the range [1, 255], the difference to a new
  360. * probability of any value can be expressed differentially as 1-A,255-A
  361. * where some part of this (absolute range) exists both in positive as
  362. * well as the negative part, whereas another part only exists in one
  363. * half. We're trying to code this shared part differentially, i.e.
  364. * times two where the value of the lowest bit specifies the sign, and
  365. * the single part is then coded on top of this. This absolute difference
  366. * then again has a value of [0,254], but a bigger value in this range
  367. * indicates that we're further away from the original value A, so we
  368. * can code this as a VLC code, since higher values are increasingly
  369. * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
  370. * updates vs. the 'fine, exact' updates further down the range, which
  371. * adds one extra dimension to this differential update model. */
  372. if (!vp8_rac_get(c)) {
  373. d = vp8_rac_get_uint(c, 4) + 0;
  374. } else if (!vp8_rac_get(c)) {
  375. d = vp8_rac_get_uint(c, 4) + 16;
  376. } else if (!vp8_rac_get(c)) {
  377. d = vp8_rac_get_uint(c, 5) + 32;
  378. } else {
  379. d = vp8_rac_get_uint(c, 7);
  380. if (d >= 65)
  381. d = (d << 1) - 65 + vp8_rac_get(c);
  382. d += 64;
  383. av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
  384. }
  385. return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
  386. 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
  387. }
  388. static int read_colorspace_details(AVCodecContext *ctx)
  389. {
  390. static const enum AVColorSpace colorspaces[8] = {
  391. AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
  392. AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
  393. };
  394. VP9Context *s = ctx->priv_data;
  395. int bits = ctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
  396. s->bpp_index = bits;
  397. s->bpp = 8 + bits * 2;
  398. s->bytesperpixel = (7 + s->bpp) >> 3;
  399. ctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
  400. if (ctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
  401. static const enum AVPixelFormat pix_fmt_rgb[3] = {
  402. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12
  403. };
  404. s->ss_h = s->ss_v = 0;
  405. ctx->color_range = AVCOL_RANGE_JPEG;
  406. s->pix_fmt = pix_fmt_rgb[bits];
  407. if (ctx->profile & 1) {
  408. if (get_bits1(&s->gb)) {
  409. av_log(ctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
  410. return AVERROR_INVALIDDATA;
  411. }
  412. } else {
  413. av_log(ctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
  414. ctx->profile);
  415. return AVERROR_INVALIDDATA;
  416. }
  417. } else {
  418. static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
  419. { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
  420. { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
  421. { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
  422. { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
  423. { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
  424. { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }
  425. };
  426. ctx->color_range = get_bits1(&s->gb) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  427. if (ctx->profile & 1) {
  428. s->ss_h = get_bits1(&s->gb);
  429. s->ss_v = get_bits1(&s->gb);
  430. s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
  431. if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
  432. av_log(ctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
  433. ctx->profile);
  434. return AVERROR_INVALIDDATA;
  435. } else if (get_bits1(&s->gb)) {
  436. av_log(ctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
  437. ctx->profile);
  438. return AVERROR_INVALIDDATA;
  439. }
  440. } else {
  441. s->ss_h = s->ss_v = 1;
  442. s->pix_fmt = pix_fmt_for_ss[bits][1][1];
  443. }
  444. }
  445. return 0;
  446. }
  447. static int decode_frame_header(AVCodecContext *ctx,
  448. const uint8_t *data, int size, int *ref)
  449. {
  450. VP9Context *s = ctx->priv_data;
  451. int c, i, j, k, l, m, n, w, h, max, size2, res, sharp;
  452. int last_invisible;
  453. const uint8_t *data2;
  454. /* general header */
  455. if ((res = init_get_bits8(&s->gb, data, size)) < 0) {
  456. av_log(ctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
  457. return res;
  458. }
  459. if (get_bits(&s->gb, 2) != 0x2) { // frame marker
  460. av_log(ctx, AV_LOG_ERROR, "Invalid frame marker\n");
  461. return AVERROR_INVALIDDATA;
  462. }
  463. ctx->profile = get_bits1(&s->gb);
  464. ctx->profile |= get_bits1(&s->gb) << 1;
  465. if (ctx->profile == 3) ctx->profile += get_bits1(&s->gb);
  466. if (ctx->profile > 3) {
  467. av_log(ctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", ctx->profile);
  468. return AVERROR_INVALIDDATA;
  469. }
  470. s->s.h.profile = ctx->profile;
  471. if (get_bits1(&s->gb)) {
  472. *ref = get_bits(&s->gb, 3);
  473. return 0;
  474. }
  475. s->last_keyframe = s->s.h.keyframe;
  476. s->s.h.keyframe = !get_bits1(&s->gb);
  477. last_invisible = s->s.h.invisible;
  478. s->s.h.invisible = !get_bits1(&s->gb);
  479. s->s.h.errorres = get_bits1(&s->gb);
  480. s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
  481. if (s->s.h.keyframe) {
  482. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  483. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  484. return AVERROR_INVALIDDATA;
  485. }
  486. if ((res = read_colorspace_details(ctx)) < 0)
  487. return res;
  488. // for profile 1, here follows the subsampling bits
  489. s->s.h.refreshrefmask = 0xff;
  490. w = get_bits(&s->gb, 16) + 1;
  491. h = get_bits(&s->gb, 16) + 1;
  492. if (get_bits1(&s->gb)) // display size
  493. skip_bits(&s->gb, 32);
  494. } else {
  495. s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
  496. s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
  497. if (s->s.h.intraonly) {
  498. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  499. av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
  500. return AVERROR_INVALIDDATA;
  501. }
  502. if (ctx->profile >= 1) {
  503. if ((res = read_colorspace_details(ctx)) < 0)
  504. return res;
  505. } else {
  506. s->ss_h = s->ss_v = 1;
  507. s->bpp = 8;
  508. s->bpp_index = 0;
  509. s->bytesperpixel = 1;
  510. s->pix_fmt = AV_PIX_FMT_YUV420P;
  511. ctx->colorspace = AVCOL_SPC_BT470BG;
  512. ctx->color_range = AVCOL_RANGE_JPEG;
  513. }
  514. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  515. w = get_bits(&s->gb, 16) + 1;
  516. h = get_bits(&s->gb, 16) + 1;
  517. if (get_bits1(&s->gb)) // display size
  518. skip_bits(&s->gb, 32);
  519. } else {
  520. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  521. s->s.h.refidx[0] = get_bits(&s->gb, 3);
  522. s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
  523. s->s.h.refidx[1] = get_bits(&s->gb, 3);
  524. s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
  525. s->s.h.refidx[2] = get_bits(&s->gb, 3);
  526. s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
  527. if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
  528. !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
  529. !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
  530. av_log(ctx, AV_LOG_ERROR, "Not all references are available\n");
  531. return AVERROR_INVALIDDATA;
  532. }
  533. if (get_bits1(&s->gb)) {
  534. w = s->s.refs[s->s.h.refidx[0]].f->width;
  535. h = s->s.refs[s->s.h.refidx[0]].f->height;
  536. } else if (get_bits1(&s->gb)) {
  537. w = s->s.refs[s->s.h.refidx[1]].f->width;
  538. h = s->s.refs[s->s.h.refidx[1]].f->height;
  539. } else if (get_bits1(&s->gb)) {
  540. w = s->s.refs[s->s.h.refidx[2]].f->width;
  541. h = s->s.refs[s->s.h.refidx[2]].f->height;
  542. } else {
  543. w = get_bits(&s->gb, 16) + 1;
  544. h = get_bits(&s->gb, 16) + 1;
  545. }
  546. // Note that in this code, "CUR_FRAME" is actually before we
  547. // have formally allocated a frame, and thus actually represents
  548. // the _last_ frame
  549. s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
  550. s->s.frames[CUR_FRAME].tf.f->height == h;
  551. if (get_bits1(&s->gb)) // display size
  552. skip_bits(&s->gb, 32);
  553. s->s.h.highprecisionmvs = get_bits1(&s->gb);
  554. s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
  555. get_bits(&s->gb, 2);
  556. s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
  557. s->s.h.signbias[0] != s->s.h.signbias[2];
  558. if (s->s.h.allowcompinter) {
  559. if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
  560. s->s.h.fixcompref = 2;
  561. s->s.h.varcompref[0] = 0;
  562. s->s.h.varcompref[1] = 1;
  563. } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
  564. s->s.h.fixcompref = 1;
  565. s->s.h.varcompref[0] = 0;
  566. s->s.h.varcompref[1] = 2;
  567. } else {
  568. s->s.h.fixcompref = 0;
  569. s->s.h.varcompref[0] = 1;
  570. s->s.h.varcompref[1] = 2;
  571. }
  572. }
  573. }
  574. }
  575. s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
  576. s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
  577. s->s.h.framectxid = c = get_bits(&s->gb, 2);
  578. /* loopfilter header data */
  579. if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
  580. // reset loopfilter defaults
  581. s->s.h.lf_delta.ref[0] = 1;
  582. s->s.h.lf_delta.ref[1] = 0;
  583. s->s.h.lf_delta.ref[2] = -1;
  584. s->s.h.lf_delta.ref[3] = -1;
  585. s->s.h.lf_delta.mode[0] = 0;
  586. s->s.h.lf_delta.mode[1] = 0;
  587. memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
  588. }
  589. s->s.h.filter.level = get_bits(&s->gb, 6);
  590. sharp = get_bits(&s->gb, 3);
  591. // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
  592. // the old cache values since they are still valid
  593. if (s->s.h.filter.sharpness != sharp)
  594. memset(s->filter_lut.lim_lut, 0, sizeof(s->filter_lut.lim_lut));
  595. s->s.h.filter.sharpness = sharp;
  596. if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
  597. if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
  598. for (i = 0; i < 4; i++)
  599. if (get_bits1(&s->gb))
  600. s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
  601. for (i = 0; i < 2; i++)
  602. if (get_bits1(&s->gb))
  603. s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
  604. }
  605. }
  606. /* quantization header data */
  607. s->s.h.yac_qi = get_bits(&s->gb, 8);
  608. s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  609. s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  610. s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  611. s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
  612. s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
  613. if (s->s.h.lossless)
  614. ctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
  615. /* segmentation header info */
  616. if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
  617. if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
  618. for (i = 0; i < 7; i++)
  619. s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
  620. get_bits(&s->gb, 8) : 255;
  621. if ((s->s.h.segmentation.temporal = get_bits1(&s->gb))) {
  622. for (i = 0; i < 3; i++)
  623. s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
  624. get_bits(&s->gb, 8) : 255;
  625. }
  626. }
  627. if (get_bits1(&s->gb)) {
  628. s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
  629. for (i = 0; i < 8; i++) {
  630. if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
  631. s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
  632. if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
  633. s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
  634. if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
  635. s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
  636. s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
  637. }
  638. }
  639. }
  640. // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
  641. for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
  642. int qyac, qydc, quvac, quvdc, lflvl, sh;
  643. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
  644. if (s->s.h.segmentation.absolute_vals)
  645. qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
  646. else
  647. qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
  648. } else {
  649. qyac = s->s.h.yac_qi;
  650. }
  651. qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
  652. quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
  653. quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
  654. qyac = av_clip_uintp2(qyac, 8);
  655. s->s.h.segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[s->bpp_index][qydc];
  656. s->s.h.segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[s->bpp_index][qyac];
  657. s->s.h.segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[s->bpp_index][quvdc];
  658. s->s.h.segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[s->bpp_index][quvac];
  659. sh = s->s.h.filter.level >= 32;
  660. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
  661. if (s->s.h.segmentation.absolute_vals)
  662. lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
  663. else
  664. lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
  665. } else {
  666. lflvl = s->s.h.filter.level;
  667. }
  668. if (s->s.h.lf_delta.enabled) {
  669. s->s.h.segmentation.feat[i].lflvl[0][0] =
  670. s->s.h.segmentation.feat[i].lflvl[0][1] =
  671. av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] << sh), 6);
  672. for (j = 1; j < 4; j++) {
  673. s->s.h.segmentation.feat[i].lflvl[j][0] =
  674. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  675. s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
  676. s->s.h.segmentation.feat[i].lflvl[j][1] =
  677. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  678. s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
  679. }
  680. } else {
  681. memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
  682. sizeof(s->s.h.segmentation.feat[i].lflvl));
  683. }
  684. }
  685. /* tiling info */
  686. if ((res = update_size(ctx, w, h)) < 0) {
  687. av_log(ctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
  688. w, h, s->pix_fmt);
  689. return res;
  690. }
  691. for (s->s.h.tiling.log2_tile_cols = 0;
  692. s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
  693. s->s.h.tiling.log2_tile_cols++) ;
  694. for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
  695. max = FFMAX(0, max - 1);
  696. while (max > s->s.h.tiling.log2_tile_cols) {
  697. if (get_bits1(&s->gb))
  698. s->s.h.tiling.log2_tile_cols++;
  699. else
  700. break;
  701. }
  702. s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
  703. s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
  704. if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
  705. s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
  706. s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
  707. sizeof(VP56RangeCoder) * s->s.h.tiling.tile_cols);
  708. if (!s->c_b) {
  709. av_log(ctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
  710. return AVERROR(ENOMEM);
  711. }
  712. }
  713. /* check reference frames */
  714. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  715. for (i = 0; i < 3; i++) {
  716. AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
  717. int refw = ref->width, refh = ref->height;
  718. if (ref->format != ctx->pix_fmt) {
  719. av_log(ctx, AV_LOG_ERROR,
  720. "Ref pixfmt (%s) did not match current frame (%s)",
  721. av_get_pix_fmt_name(ref->format),
  722. av_get_pix_fmt_name(ctx->pix_fmt));
  723. return AVERROR_INVALIDDATA;
  724. } else if (refw == w && refh == h) {
  725. s->mvscale[i][0] = s->mvscale[i][1] = 0;
  726. } else {
  727. if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
  728. av_log(ctx, AV_LOG_ERROR,
  729. "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
  730. refw, refh, w, h);
  731. return AVERROR_INVALIDDATA;
  732. }
  733. s->mvscale[i][0] = (refw << 14) / w;
  734. s->mvscale[i][1] = (refh << 14) / h;
  735. s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
  736. s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
  737. }
  738. }
  739. }
  740. if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
  741. s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
  742. s->prob_ctx[3].p = vp9_default_probs;
  743. memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
  744. sizeof(vp9_default_coef_probs));
  745. memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
  746. sizeof(vp9_default_coef_probs));
  747. memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
  748. sizeof(vp9_default_coef_probs));
  749. memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
  750. sizeof(vp9_default_coef_probs));
  751. } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
  752. s->prob_ctx[c].p = vp9_default_probs;
  753. memcpy(s->prob_ctx[c].coef, vp9_default_coef_probs,
  754. sizeof(vp9_default_coef_probs));
  755. }
  756. // next 16 bits is size of the rest of the header (arith-coded)
  757. s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
  758. s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
  759. data2 = align_get_bits(&s->gb);
  760. if (size2 > size - (data2 - data)) {
  761. av_log(ctx, AV_LOG_ERROR, "Invalid compressed header size\n");
  762. return AVERROR_INVALIDDATA;
  763. }
  764. ff_vp56_init_range_decoder(&s->c, data2, size2);
  765. if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
  766. av_log(ctx, AV_LOG_ERROR, "Marker bit was set\n");
  767. return AVERROR_INVALIDDATA;
  768. }
  769. if (s->s.h.keyframe || s->s.h.intraonly) {
  770. memset(s->counts.coef, 0, sizeof(s->counts.coef));
  771. memset(s->counts.eob, 0, sizeof(s->counts.eob));
  772. } else {
  773. memset(&s->counts, 0, sizeof(s->counts));
  774. }
  775. // FIXME is it faster to not copy here, but do it down in the fw updates
  776. // as explicit copies if the fw update is missing (and skip the copy upon
  777. // fw update)?
  778. s->prob.p = s->prob_ctx[c].p;
  779. // txfm updates
  780. if (s->s.h.lossless) {
  781. s->s.h.txfmmode = TX_4X4;
  782. } else {
  783. s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
  784. if (s->s.h.txfmmode == 3)
  785. s->s.h.txfmmode += vp8_rac_get(&s->c);
  786. if (s->s.h.txfmmode == TX_SWITCHABLE) {
  787. for (i = 0; i < 2; i++)
  788. if (vp56_rac_get_prob_branchy(&s->c, 252))
  789. s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
  790. for (i = 0; i < 2; i++)
  791. for (j = 0; j < 2; j++)
  792. if (vp56_rac_get_prob_branchy(&s->c, 252))
  793. s->prob.p.tx16p[i][j] =
  794. update_prob(&s->c, s->prob.p.tx16p[i][j]);
  795. for (i = 0; i < 2; i++)
  796. for (j = 0; j < 3; j++)
  797. if (vp56_rac_get_prob_branchy(&s->c, 252))
  798. s->prob.p.tx32p[i][j] =
  799. update_prob(&s->c, s->prob.p.tx32p[i][j]);
  800. }
  801. }
  802. // coef updates
  803. for (i = 0; i < 4; i++) {
  804. uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
  805. if (vp8_rac_get(&s->c)) {
  806. for (j = 0; j < 2; j++)
  807. for (k = 0; k < 2; k++)
  808. for (l = 0; l < 6; l++)
  809. for (m = 0; m < 6; m++) {
  810. uint8_t *p = s->prob.coef[i][j][k][l][m];
  811. uint8_t *r = ref[j][k][l][m];
  812. if (m >= 3 && l == 0) // dc only has 3 pt
  813. break;
  814. for (n = 0; n < 3; n++) {
  815. if (vp56_rac_get_prob_branchy(&s->c, 252)) {
  816. p[n] = update_prob(&s->c, r[n]);
  817. } else {
  818. p[n] = r[n];
  819. }
  820. }
  821. p[3] = 0;
  822. }
  823. } else {
  824. for (j = 0; j < 2; j++)
  825. for (k = 0; k < 2; k++)
  826. for (l = 0; l < 6; l++)
  827. for (m = 0; m < 6; m++) {
  828. uint8_t *p = s->prob.coef[i][j][k][l][m];
  829. uint8_t *r = ref[j][k][l][m];
  830. if (m > 3 && l == 0) // dc only has 3 pt
  831. break;
  832. memcpy(p, r, 3);
  833. p[3] = 0;
  834. }
  835. }
  836. if (s->s.h.txfmmode == i)
  837. break;
  838. }
  839. // mode updates
  840. for (i = 0; i < 3; i++)
  841. if (vp56_rac_get_prob_branchy(&s->c, 252))
  842. s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
  843. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  844. for (i = 0; i < 7; i++)
  845. for (j = 0; j < 3; j++)
  846. if (vp56_rac_get_prob_branchy(&s->c, 252))
  847. s->prob.p.mv_mode[i][j] =
  848. update_prob(&s->c, s->prob.p.mv_mode[i][j]);
  849. if (s->s.h.filtermode == FILTER_SWITCHABLE)
  850. for (i = 0; i < 4; i++)
  851. for (j = 0; j < 2; j++)
  852. if (vp56_rac_get_prob_branchy(&s->c, 252))
  853. s->prob.p.filter[i][j] =
  854. update_prob(&s->c, s->prob.p.filter[i][j]);
  855. for (i = 0; i < 4; i++)
  856. if (vp56_rac_get_prob_branchy(&s->c, 252))
  857. s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
  858. if (s->s.h.allowcompinter) {
  859. s->s.h.comppredmode = vp8_rac_get(&s->c);
  860. if (s->s.h.comppredmode)
  861. s->s.h.comppredmode += vp8_rac_get(&s->c);
  862. if (s->s.h.comppredmode == PRED_SWITCHABLE)
  863. for (i = 0; i < 5; i++)
  864. if (vp56_rac_get_prob_branchy(&s->c, 252))
  865. s->prob.p.comp[i] =
  866. update_prob(&s->c, s->prob.p.comp[i]);
  867. } else {
  868. s->s.h.comppredmode = PRED_SINGLEREF;
  869. }
  870. if (s->s.h.comppredmode != PRED_COMPREF) {
  871. for (i = 0; i < 5; i++) {
  872. if (vp56_rac_get_prob_branchy(&s->c, 252))
  873. s->prob.p.single_ref[i][0] =
  874. update_prob(&s->c, s->prob.p.single_ref[i][0]);
  875. if (vp56_rac_get_prob_branchy(&s->c, 252))
  876. s->prob.p.single_ref[i][1] =
  877. update_prob(&s->c, s->prob.p.single_ref[i][1]);
  878. }
  879. }
  880. if (s->s.h.comppredmode != PRED_SINGLEREF) {
  881. for (i = 0; i < 5; i++)
  882. if (vp56_rac_get_prob_branchy(&s->c, 252))
  883. s->prob.p.comp_ref[i] =
  884. update_prob(&s->c, s->prob.p.comp_ref[i]);
  885. }
  886. for (i = 0; i < 4; i++)
  887. for (j = 0; j < 9; j++)
  888. if (vp56_rac_get_prob_branchy(&s->c, 252))
  889. s->prob.p.y_mode[i][j] =
  890. update_prob(&s->c, s->prob.p.y_mode[i][j]);
  891. for (i = 0; i < 4; i++)
  892. for (j = 0; j < 4; j++)
  893. for (k = 0; k < 3; k++)
  894. if (vp56_rac_get_prob_branchy(&s->c, 252))
  895. s->prob.p.partition[3 - i][j][k] =
  896. update_prob(&s->c, s->prob.p.partition[3 - i][j][k]);
  897. // mv fields don't use the update_prob subexp model for some reason
  898. for (i = 0; i < 3; i++)
  899. if (vp56_rac_get_prob_branchy(&s->c, 252))
  900. s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  901. for (i = 0; i < 2; i++) {
  902. if (vp56_rac_get_prob_branchy(&s->c, 252))
  903. s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  904. for (j = 0; j < 10; j++)
  905. if (vp56_rac_get_prob_branchy(&s->c, 252))
  906. s->prob.p.mv_comp[i].classes[j] =
  907. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  908. if (vp56_rac_get_prob_branchy(&s->c, 252))
  909. s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  910. for (j = 0; j < 10; j++)
  911. if (vp56_rac_get_prob_branchy(&s->c, 252))
  912. s->prob.p.mv_comp[i].bits[j] =
  913. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  914. }
  915. for (i = 0; i < 2; i++) {
  916. for (j = 0; j < 2; j++)
  917. for (k = 0; k < 3; k++)
  918. if (vp56_rac_get_prob_branchy(&s->c, 252))
  919. s->prob.p.mv_comp[i].class0_fp[j][k] =
  920. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  921. for (j = 0; j < 3; j++)
  922. if (vp56_rac_get_prob_branchy(&s->c, 252))
  923. s->prob.p.mv_comp[i].fp[j] =
  924. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  925. }
  926. if (s->s.h.highprecisionmvs) {
  927. for (i = 0; i < 2; i++) {
  928. if (vp56_rac_get_prob_branchy(&s->c, 252))
  929. s->prob.p.mv_comp[i].class0_hp =
  930. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  931. if (vp56_rac_get_prob_branchy(&s->c, 252))
  932. s->prob.p.mv_comp[i].hp =
  933. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  934. }
  935. }
  936. }
  937. return (data2 - data) + size2;
  938. }
  939. static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
  940. VP9Context *s)
  941. {
  942. dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
  943. dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
  944. }
  945. static void find_ref_mvs(VP9Context *s,
  946. VP56mv *pmv, int ref, int z, int idx, int sb)
  947. {
  948. static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
  949. [BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
  950. { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }},
  951. [BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
  952. { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }},
  953. [BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
  954. { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }},
  955. [BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
  956. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  957. [BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
  958. { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  959. [BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
  960. { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }},
  961. [BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
  962. { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
  963. [BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
  964. { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }},
  965. [BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
  966. { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }},
  967. [BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  968. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  969. [BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  970. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  971. [BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  972. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  973. [BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
  974. { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
  975. };
  976. VP9Block *b = s->b;
  977. int row = s->row, col = s->col, row7 = s->row7;
  978. const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
  979. #define INVALID_MV 0x80008000U
  980. uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV;
  981. int i;
  982. #define RETURN_DIRECT_MV(mv) \
  983. do { \
  984. uint32_t m = AV_RN32A(&mv); \
  985. if (!idx) { \
  986. AV_WN32A(pmv, m); \
  987. return; \
  988. } else if (mem == INVALID_MV) { \
  989. mem = m; \
  990. } else if (m != mem) { \
  991. AV_WN32A(pmv, m); \
  992. return; \
  993. } \
  994. } while (0)
  995. if (sb >= 0) {
  996. if (sb == 2 || sb == 1) {
  997. RETURN_DIRECT_MV(b->mv[0][z]);
  998. } else if (sb == 3) {
  999. RETURN_DIRECT_MV(b->mv[2][z]);
  1000. RETURN_DIRECT_MV(b->mv[1][z]);
  1001. RETURN_DIRECT_MV(b->mv[0][z]);
  1002. }
  1003. #define RETURN_MV(mv) \
  1004. do { \
  1005. if (sb > 0) { \
  1006. VP56mv tmp; \
  1007. uint32_t m; \
  1008. av_assert2(idx == 1); \
  1009. av_assert2(mem != INVALID_MV); \
  1010. if (mem_sub8x8 == INVALID_MV) { \
  1011. clamp_mv(&tmp, &mv, s); \
  1012. m = AV_RN32A(&tmp); \
  1013. if (m != mem) { \
  1014. AV_WN32A(pmv, m); \
  1015. return; \
  1016. } \
  1017. mem_sub8x8 = AV_RN32A(&mv); \
  1018. } else if (mem_sub8x8 != AV_RN32A(&mv)) { \
  1019. clamp_mv(&tmp, &mv, s); \
  1020. m = AV_RN32A(&tmp); \
  1021. if (m != mem) { \
  1022. AV_WN32A(pmv, m); \
  1023. } else { \
  1024. /* BUG I'm pretty sure this isn't the intention */ \
  1025. AV_WN32A(pmv, 0); \
  1026. } \
  1027. return; \
  1028. } \
  1029. } else { \
  1030. uint32_t m = AV_RN32A(&mv); \
  1031. if (!idx) { \
  1032. clamp_mv(pmv, &mv, s); \
  1033. return; \
  1034. } else if (mem == INVALID_MV) { \
  1035. mem = m; \
  1036. } else if (m != mem) { \
  1037. clamp_mv(pmv, &mv, s); \
  1038. return; \
  1039. } \
  1040. } \
  1041. } while (0)
  1042. if (row > 0) {
  1043. struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
  1044. if (mv->ref[0] == ref) {
  1045. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
  1046. } else if (mv->ref[1] == ref) {
  1047. RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
  1048. }
  1049. }
  1050. if (col > s->tile_col_start) {
  1051. struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
  1052. if (mv->ref[0] == ref) {
  1053. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
  1054. } else if (mv->ref[1] == ref) {
  1055. RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
  1056. }
  1057. }
  1058. i = 2;
  1059. } else {
  1060. i = 0;
  1061. }
  1062. // previously coded MVs in this neighbourhood, using same reference frame
  1063. for (; i < 8; i++) {
  1064. int c = p[i][0] + col, r = p[i][1] + row;
  1065. if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  1066. struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  1067. if (mv->ref[0] == ref) {
  1068. RETURN_MV(mv->mv[0]);
  1069. } else if (mv->ref[1] == ref) {
  1070. RETURN_MV(mv->mv[1]);
  1071. }
  1072. }
  1073. }
  1074. // MV at this position in previous frame, using same reference frame
  1075. if (s->s.h.use_last_frame_mvs) {
  1076. struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
  1077. if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
  1078. ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
  1079. if (mv->ref[0] == ref) {
  1080. RETURN_MV(mv->mv[0]);
  1081. } else if (mv->ref[1] == ref) {
  1082. RETURN_MV(mv->mv[1]);
  1083. }
  1084. }
  1085. #define RETURN_SCALE_MV(mv, scale) \
  1086. do { \
  1087. if (scale) { \
  1088. VP56mv mv_temp = { -mv.x, -mv.y }; \
  1089. RETURN_MV(mv_temp); \
  1090. } else { \
  1091. RETURN_MV(mv); \
  1092. } \
  1093. } while (0)
  1094. // previously coded MVs in this neighbourhood, using different reference frame
  1095. for (i = 0; i < 8; i++) {
  1096. int c = p[i][0] + col, r = p[i][1] + row;
  1097. if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
  1098. struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
  1099. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1100. RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
  1101. }
  1102. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1103. // BUG - libvpx has this condition regardless of whether
  1104. // we used the first ref MV and pre-scaling
  1105. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1106. RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
  1107. }
  1108. }
  1109. }
  1110. // MV at this position in previous frame, using different reference frame
  1111. if (s->s.h.use_last_frame_mvs) {
  1112. struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
  1113. // no need to await_progress, because we already did that above
  1114. if (mv->ref[0] != ref && mv->ref[0] >= 0) {
  1115. RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
  1116. }
  1117. if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
  1118. // BUG - libvpx has this condition regardless of whether
  1119. // we used the first ref MV and pre-scaling
  1120. AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
  1121. RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
  1122. }
  1123. }
  1124. AV_ZERO32(pmv);
  1125. clamp_mv(pmv, pmv, s);
  1126. #undef INVALID_MV
  1127. #undef RETURN_MV
  1128. #undef RETURN_SCALE_MV
  1129. }
  1130. static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
  1131. {
  1132. int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
  1133. int n, c = vp8_rac_get_tree(&s->c, vp9_mv_class_tree,
  1134. s->prob.p.mv_comp[idx].classes);
  1135. s->counts.mv_comp[idx].sign[sign]++;
  1136. s->counts.mv_comp[idx].classes[c]++;
  1137. if (c) {
  1138. int m;
  1139. for (n = 0, m = 0; m < c; m++) {
  1140. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
  1141. n |= bit << m;
  1142. s->counts.mv_comp[idx].bits[m][bit]++;
  1143. }
  1144. n <<= 3;
  1145. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp);
  1146. n |= bit << 1;
  1147. s->counts.mv_comp[idx].fp[bit]++;
  1148. if (hp) {
  1149. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
  1150. s->counts.mv_comp[idx].hp[bit]++;
  1151. n |= bit;
  1152. } else {
  1153. n |= 1;
  1154. // bug in libvpx - we count for bw entropy purposes even if the
  1155. // bit wasn't coded
  1156. s->counts.mv_comp[idx].hp[1]++;
  1157. }
  1158. n += 8 << c;
  1159. } else {
  1160. n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
  1161. s->counts.mv_comp[idx].class0[n]++;
  1162. bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree,
  1163. s->prob.p.mv_comp[idx].class0_fp[n]);
  1164. s->counts.mv_comp[idx].class0_fp[n][bit]++;
  1165. n = (n << 3) | (bit << 1);
  1166. if (hp) {
  1167. bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
  1168. s->counts.mv_comp[idx].class0_hp[bit]++;
  1169. n |= bit;
  1170. } else {
  1171. n |= 1;
  1172. // bug in libvpx - we count for bw entropy purposes even if the
  1173. // bit wasn't coded
  1174. s->counts.mv_comp[idx].class0_hp[1]++;
  1175. }
  1176. }
  1177. return sign ? -(n + 1) : (n + 1);
  1178. }
  1179. static void fill_mv(VP9Context *s,
  1180. VP56mv *mv, int mode, int sb)
  1181. {
  1182. VP9Block *b = s->b;
  1183. if (mode == ZEROMV) {
  1184. AV_ZERO64(mv);
  1185. } else {
  1186. int hp;
  1187. // FIXME cache this value and reuse for other subblocks
  1188. find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
  1189. mode == NEWMV ? -1 : sb);
  1190. // FIXME maybe move this code into find_ref_mvs()
  1191. if ((mode == NEWMV || sb == -1) &&
  1192. !(hp = s->s.h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
  1193. if (mv[0].y & 1) {
  1194. if (mv[0].y < 0)
  1195. mv[0].y++;
  1196. else
  1197. mv[0].y--;
  1198. }
  1199. if (mv[0].x & 1) {
  1200. if (mv[0].x < 0)
  1201. mv[0].x++;
  1202. else
  1203. mv[0].x--;
  1204. }
  1205. }
  1206. if (mode == NEWMV) {
  1207. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1208. s->prob.p.mv_joint);
  1209. s->counts.mv_joint[j]++;
  1210. if (j >= MV_JOINT_V)
  1211. mv[0].y += read_mv_component(s, 0, hp);
  1212. if (j & 1)
  1213. mv[0].x += read_mv_component(s, 1, hp);
  1214. }
  1215. if (b->comp) {
  1216. // FIXME cache this value and reuse for other subblocks
  1217. find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
  1218. mode == NEWMV ? -1 : sb);
  1219. if ((mode == NEWMV || sb == -1) &&
  1220. !(hp = s->s.h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
  1221. if (mv[1].y & 1) {
  1222. if (mv[1].y < 0)
  1223. mv[1].y++;
  1224. else
  1225. mv[1].y--;
  1226. }
  1227. if (mv[1].x & 1) {
  1228. if (mv[1].x < 0)
  1229. mv[1].x++;
  1230. else
  1231. mv[1].x--;
  1232. }
  1233. }
  1234. if (mode == NEWMV) {
  1235. enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
  1236. s->prob.p.mv_joint);
  1237. s->counts.mv_joint[j]++;
  1238. if (j >= MV_JOINT_V)
  1239. mv[1].y += read_mv_component(s, 0, hp);
  1240. if (j & 1)
  1241. mv[1].x += read_mv_component(s, 1, hp);
  1242. }
  1243. }
  1244. }
  1245. }
  1246. static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
  1247. ptrdiff_t stride, int v)
  1248. {
  1249. switch (w) {
  1250. case 1:
  1251. do {
  1252. *ptr = v;
  1253. ptr += stride;
  1254. } while (--h);
  1255. break;
  1256. case 2: {
  1257. int v16 = v * 0x0101;
  1258. do {
  1259. AV_WN16A(ptr, v16);
  1260. ptr += stride;
  1261. } while (--h);
  1262. break;
  1263. }
  1264. case 4: {
  1265. uint32_t v32 = v * 0x01010101;
  1266. do {
  1267. AV_WN32A(ptr, v32);
  1268. ptr += stride;
  1269. } while (--h);
  1270. break;
  1271. }
  1272. case 8: {
  1273. #if HAVE_FAST_64BIT
  1274. uint64_t v64 = v * 0x0101010101010101ULL;
  1275. do {
  1276. AV_WN64A(ptr, v64);
  1277. ptr += stride;
  1278. } while (--h);
  1279. #else
  1280. uint32_t v32 = v * 0x01010101;
  1281. do {
  1282. AV_WN32A(ptr, v32);
  1283. AV_WN32A(ptr + 4, v32);
  1284. ptr += stride;
  1285. } while (--h);
  1286. #endif
  1287. break;
  1288. }
  1289. }
  1290. }
  1291. static void decode_mode(AVCodecContext *ctx)
  1292. {
  1293. static const uint8_t left_ctx[N_BS_SIZES] = {
  1294. 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
  1295. };
  1296. static const uint8_t above_ctx[N_BS_SIZES] = {
  1297. 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
  1298. };
  1299. static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
  1300. TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
  1301. TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
  1302. };
  1303. VP9Context *s = ctx->priv_data;
  1304. VP9Block *b = s->b;
  1305. int row = s->row, col = s->col, row7 = s->row7;
  1306. enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
  1307. int bw4 = bwh_tab[1][b->bs][0], w4 = FFMIN(s->cols - col, bw4);
  1308. int bh4 = bwh_tab[1][b->bs][1], h4 = FFMIN(s->rows - row, bh4), y;
  1309. int have_a = row > 0, have_l = col > s->tile_col_start;
  1310. int vref, filter_id;
  1311. if (!s->s.h.segmentation.enabled) {
  1312. b->seg_id = 0;
  1313. } else if (s->s.h.keyframe || s->s.h.intraonly) {
  1314. b->seg_id = !s->s.h.segmentation.update_map ? 0 :
  1315. vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->s.h.segmentation.prob);
  1316. } else if (!s->s.h.segmentation.update_map ||
  1317. (s->s.h.segmentation.temporal &&
  1318. vp56_rac_get_prob_branchy(&s->c,
  1319. s->s.h.segmentation.pred_prob[s->above_segpred_ctx[col] +
  1320. s->left_segpred_ctx[row7]]))) {
  1321. if (!s->s.h.errorres && s->s.frames[REF_FRAME_SEGMAP].segmentation_map) {
  1322. int pred = 8, x;
  1323. uint8_t *refsegmap = s->s.frames[REF_FRAME_SEGMAP].segmentation_map;
  1324. if (!s->s.frames[REF_FRAME_SEGMAP].uses_2pass)
  1325. ff_thread_await_progress(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3, 0);
  1326. for (y = 0; y < h4; y++) {
  1327. int idx_base = (y + row) * 8 * s->sb_cols + col;
  1328. for (x = 0; x < w4; x++)
  1329. pred = FFMIN(pred, refsegmap[idx_base + x]);
  1330. }
  1331. av_assert1(pred < 8);
  1332. b->seg_id = pred;
  1333. } else {
  1334. b->seg_id = 0;
  1335. }
  1336. memset(&s->above_segpred_ctx[col], 1, w4);
  1337. memset(&s->left_segpred_ctx[row7], 1, h4);
  1338. } else {
  1339. b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree,
  1340. s->s.h.segmentation.prob);
  1341. memset(&s->above_segpred_ctx[col], 0, w4);
  1342. memset(&s->left_segpred_ctx[row7], 0, h4);
  1343. }
  1344. if (s->s.h.segmentation.enabled &&
  1345. (s->s.h.segmentation.update_map || s->s.h.keyframe || s->s.h.intraonly)) {
  1346. setctx_2d(&s->s.frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
  1347. bw4, bh4, 8 * s->sb_cols, b->seg_id);
  1348. }
  1349. b->skip = s->s.h.segmentation.enabled &&
  1350. s->s.h.segmentation.feat[b->seg_id].skip_enabled;
  1351. if (!b->skip) {
  1352. int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col];
  1353. b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]);
  1354. s->counts.skip[c][b->skip]++;
  1355. }
  1356. if (s->s.h.keyframe || s->s.h.intraonly) {
  1357. b->intra = 1;
  1358. } else if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
  1359. b->intra = !s->s.h.segmentation.feat[b->seg_id].ref_val;
  1360. } else {
  1361. int c, bit;
  1362. if (have_a && have_l) {
  1363. c = s->above_intra_ctx[col] + s->left_intra_ctx[row7];
  1364. c += (c == 2);
  1365. } else {
  1366. c = have_a ? 2 * s->above_intra_ctx[col] :
  1367. have_l ? 2 * s->left_intra_ctx[row7] : 0;
  1368. }
  1369. bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]);
  1370. s->counts.intra[c][bit]++;
  1371. b->intra = !bit;
  1372. }
  1373. if ((b->intra || !b->skip) && s->s.h.txfmmode == TX_SWITCHABLE) {
  1374. int c;
  1375. if (have_a) {
  1376. if (have_l) {
  1377. c = (s->above_skip_ctx[col] ? max_tx :
  1378. s->above_txfm_ctx[col]) +
  1379. (s->left_skip_ctx[row7] ? max_tx :
  1380. s->left_txfm_ctx[row7]) > max_tx;
  1381. } else {
  1382. c = s->above_skip_ctx[col] ? 1 :
  1383. (s->above_txfm_ctx[col] * 2 > max_tx);
  1384. }
  1385. } else if (have_l) {
  1386. c = s->left_skip_ctx[row7] ? 1 :
  1387. (s->left_txfm_ctx[row7] * 2 > max_tx);
  1388. } else {
  1389. c = 1;
  1390. }
  1391. switch (max_tx) {
  1392. case TX_32X32:
  1393. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]);
  1394. if (b->tx) {
  1395. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]);
  1396. if (b->tx == 2)
  1397. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]);
  1398. }
  1399. s->counts.tx32p[c][b->tx]++;
  1400. break;
  1401. case TX_16X16:
  1402. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]);
  1403. if (b->tx)
  1404. b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]);
  1405. s->counts.tx16p[c][b->tx]++;
  1406. break;
  1407. case TX_8X8:
  1408. b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]);
  1409. s->counts.tx8p[c][b->tx]++;
  1410. break;
  1411. case TX_4X4:
  1412. b->tx = TX_4X4;
  1413. break;
  1414. }
  1415. } else {
  1416. b->tx = FFMIN(max_tx, s->s.h.txfmmode);
  1417. }
  1418. if (s->s.h.keyframe || s->s.h.intraonly) {
  1419. uint8_t *a = &s->above_mode_ctx[col * 2];
  1420. uint8_t *l = &s->left_mode_ctx[(row7) << 1];
  1421. b->comp = 0;
  1422. if (b->bs > BS_8x8) {
  1423. // FIXME the memory storage intermediates here aren't really
  1424. // necessary, they're just there to make the code slightly
  1425. // simpler for now
  1426. b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1427. vp9_default_kf_ymode_probs[a[0]][l[0]]);
  1428. if (b->bs != BS_8x4) {
  1429. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1430. vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
  1431. l[0] = a[1] = b->mode[1];
  1432. } else {
  1433. l[0] = a[1] = b->mode[1] = b->mode[0];
  1434. }
  1435. if (b->bs != BS_4x8) {
  1436. b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1437. vp9_default_kf_ymode_probs[a[0]][l[1]]);
  1438. if (b->bs != BS_8x4) {
  1439. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1440. vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
  1441. l[1] = a[1] = b->mode[3];
  1442. } else {
  1443. l[1] = a[1] = b->mode[3] = b->mode[2];
  1444. }
  1445. } else {
  1446. b->mode[2] = b->mode[0];
  1447. l[1] = a[1] = b->mode[3] = b->mode[1];
  1448. }
  1449. } else {
  1450. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1451. vp9_default_kf_ymode_probs[*a][*l]);
  1452. b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0];
  1453. // FIXME this can probably be optimized
  1454. memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
  1455. memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
  1456. }
  1457. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1458. vp9_default_kf_uvmode_probs[b->mode[3]]);
  1459. } else if (b->intra) {
  1460. b->comp = 0;
  1461. if (b->bs > BS_8x8) {
  1462. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1463. s->prob.p.y_mode[0]);
  1464. s->counts.y_mode[0][b->mode[0]]++;
  1465. if (b->bs != BS_8x4) {
  1466. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1467. s->prob.p.y_mode[0]);
  1468. s->counts.y_mode[0][b->mode[1]]++;
  1469. } else {
  1470. b->mode[1] = b->mode[0];
  1471. }
  1472. if (b->bs != BS_4x8) {
  1473. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1474. s->prob.p.y_mode[0]);
  1475. s->counts.y_mode[0][b->mode[2]]++;
  1476. if (b->bs != BS_8x4) {
  1477. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1478. s->prob.p.y_mode[0]);
  1479. s->counts.y_mode[0][b->mode[3]]++;
  1480. } else {
  1481. b->mode[3] = b->mode[2];
  1482. }
  1483. } else {
  1484. b->mode[2] = b->mode[0];
  1485. b->mode[3] = b->mode[1];
  1486. }
  1487. } else {
  1488. static const uint8_t size_group[10] = {
  1489. 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
  1490. };
  1491. int sz = size_group[b->bs];
  1492. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1493. s->prob.p.y_mode[sz]);
  1494. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1495. s->counts.y_mode[sz][b->mode[3]]++;
  1496. }
  1497. b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
  1498. s->prob.p.uv_mode[b->mode[3]]);
  1499. s->counts.uv_mode[b->mode[3]][b->uvmode]++;
  1500. } else {
  1501. static const uint8_t inter_mode_ctx_lut[14][14] = {
  1502. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1503. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1504. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1505. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1506. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1507. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1508. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1509. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1510. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1511. { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
  1512. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1513. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
  1514. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
  1515. { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
  1516. };
  1517. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
  1518. av_assert2(s->s.h.segmentation.feat[b->seg_id].ref_val != 0);
  1519. b->comp = 0;
  1520. b->ref[0] = s->s.h.segmentation.feat[b->seg_id].ref_val - 1;
  1521. } else {
  1522. // read comp_pred flag
  1523. if (s->s.h.comppredmode != PRED_SWITCHABLE) {
  1524. b->comp = s->s.h.comppredmode == PRED_COMPREF;
  1525. } else {
  1526. int c;
  1527. // FIXME add intra as ref=0xff (or -1) to make these easier?
  1528. if (have_a) {
  1529. if (have_l) {
  1530. if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) {
  1531. c = 4;
  1532. } else if (s->above_comp_ctx[col]) {
  1533. c = 2 + (s->left_intra_ctx[row7] ||
  1534. s->left_ref_ctx[row7] == s->s.h.fixcompref);
  1535. } else if (s->left_comp_ctx[row7]) {
  1536. c = 2 + (s->above_intra_ctx[col] ||
  1537. s->above_ref_ctx[col] == s->s.h.fixcompref);
  1538. } else {
  1539. c = (!s->above_intra_ctx[col] &&
  1540. s->above_ref_ctx[col] == s->s.h.fixcompref) ^
  1541. (!s->left_intra_ctx[row7] &&
  1542. s->left_ref_ctx[row & 7] == s->s.h.fixcompref);
  1543. }
  1544. } else {
  1545. c = s->above_comp_ctx[col] ? 3 :
  1546. (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->s.h.fixcompref);
  1547. }
  1548. } else if (have_l) {
  1549. c = s->left_comp_ctx[row7] ? 3 :
  1550. (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->s.h.fixcompref);
  1551. } else {
  1552. c = 1;
  1553. }
  1554. b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]);
  1555. s->counts.comp[c][b->comp]++;
  1556. }
  1557. // read actual references
  1558. // FIXME probably cache a few variables here to prevent repetitive
  1559. // memory accesses below
  1560. if (b->comp) /* two references */ {
  1561. int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit;
  1562. b->ref[fix_idx] = s->s.h.fixcompref;
  1563. // FIXME can this codeblob be replaced by some sort of LUT?
  1564. if (have_a) {
  1565. if (have_l) {
  1566. if (s->above_intra_ctx[col]) {
  1567. if (s->left_intra_ctx[row7]) {
  1568. c = 2;
  1569. } else {
  1570. c = 1 + 2 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
  1571. }
  1572. } else if (s->left_intra_ctx[row7]) {
  1573. c = 1 + 2 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
  1574. } else {
  1575. int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
  1576. if (refl == refa && refa == s->s.h.varcompref[1]) {
  1577. c = 0;
  1578. } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
  1579. if ((refa == s->s.h.fixcompref && refl == s->s.h.varcompref[0]) ||
  1580. (refl == s->s.h.fixcompref && refa == s->s.h.varcompref[0])) {
  1581. c = 4;
  1582. } else {
  1583. c = (refa == refl) ? 3 : 1;
  1584. }
  1585. } else if (!s->left_comp_ctx[row7]) {
  1586. if (refa == s->s.h.varcompref[1] && refl != s->s.h.varcompref[1]) {
  1587. c = 1;
  1588. } else {
  1589. c = (refl == s->s.h.varcompref[1] &&
  1590. refa != s->s.h.varcompref[1]) ? 2 : 4;
  1591. }
  1592. } else if (!s->above_comp_ctx[col]) {
  1593. if (refl == s->s.h.varcompref[1] && refa != s->s.h.varcompref[1]) {
  1594. c = 1;
  1595. } else {
  1596. c = (refa == s->s.h.varcompref[1] &&
  1597. refl != s->s.h.varcompref[1]) ? 2 : 4;
  1598. }
  1599. } else {
  1600. c = (refl == refa) ? 4 : 2;
  1601. }
  1602. }
  1603. } else {
  1604. if (s->above_intra_ctx[col]) {
  1605. c = 2;
  1606. } else if (s->above_comp_ctx[col]) {
  1607. c = 4 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
  1608. } else {
  1609. c = 3 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
  1610. }
  1611. }
  1612. } else if (have_l) {
  1613. if (s->left_intra_ctx[row7]) {
  1614. c = 2;
  1615. } else if (s->left_comp_ctx[row7]) {
  1616. c = 4 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
  1617. } else {
  1618. c = 3 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
  1619. }
  1620. } else {
  1621. c = 2;
  1622. }
  1623. bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]);
  1624. b->ref[var_idx] = s->s.h.varcompref[bit];
  1625. s->counts.comp_ref[c][bit]++;
  1626. } else /* single reference */ {
  1627. int bit, c;
  1628. if (have_a && !s->above_intra_ctx[col]) {
  1629. if (have_l && !s->left_intra_ctx[row7]) {
  1630. if (s->left_comp_ctx[row7]) {
  1631. if (s->above_comp_ctx[col]) {
  1632. c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7] ||
  1633. !s->above_ref_ctx[col]);
  1634. } else {
  1635. c = (3 * !s->above_ref_ctx[col]) +
  1636. (!s->s.h.fixcompref || !s->left_ref_ctx[row7]);
  1637. }
  1638. } else if (s->above_comp_ctx[col]) {
  1639. c = (3 * !s->left_ref_ctx[row7]) +
  1640. (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
  1641. } else {
  1642. c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
  1643. }
  1644. } else if (s->above_intra_ctx[col]) {
  1645. c = 2;
  1646. } else if (s->above_comp_ctx[col]) {
  1647. c = 1 + (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
  1648. } else {
  1649. c = 4 * (!s->above_ref_ctx[col]);
  1650. }
  1651. } else if (have_l && !s->left_intra_ctx[row7]) {
  1652. if (s->left_intra_ctx[row7]) {
  1653. c = 2;
  1654. } else if (s->left_comp_ctx[row7]) {
  1655. c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7]);
  1656. } else {
  1657. c = 4 * (!s->left_ref_ctx[row7]);
  1658. }
  1659. } else {
  1660. c = 2;
  1661. }
  1662. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]);
  1663. s->counts.single_ref[c][0][bit]++;
  1664. if (!bit) {
  1665. b->ref[0] = 0;
  1666. } else {
  1667. // FIXME can this codeblob be replaced by some sort of LUT?
  1668. if (have_a) {
  1669. if (have_l) {
  1670. if (s->left_intra_ctx[row7]) {
  1671. if (s->above_intra_ctx[col]) {
  1672. c = 2;
  1673. } else if (s->above_comp_ctx[col]) {
  1674. c = 1 + 2 * (s->s.h.fixcompref == 1 ||
  1675. s->above_ref_ctx[col] == 1);
  1676. } else if (!s->above_ref_ctx[col]) {
  1677. c = 3;
  1678. } else {
  1679. c = 4 * (s->above_ref_ctx[col] == 1);
  1680. }
  1681. } else if (s->above_intra_ctx[col]) {
  1682. if (s->left_intra_ctx[row7]) {
  1683. c = 2;
  1684. } else if (s->left_comp_ctx[row7]) {
  1685. c = 1 + 2 * (s->s.h.fixcompref == 1 ||
  1686. s->left_ref_ctx[row7] == 1);
  1687. } else if (!s->left_ref_ctx[row7]) {
  1688. c = 3;
  1689. } else {
  1690. c = 4 * (s->left_ref_ctx[row7] == 1);
  1691. }
  1692. } else if (s->above_comp_ctx[col]) {
  1693. if (s->left_comp_ctx[row7]) {
  1694. if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
  1695. c = 3 * (s->s.h.fixcompref == 1 ||
  1696. s->left_ref_ctx[row7] == 1);
  1697. } else {
  1698. c = 2;
  1699. }
  1700. } else if (!s->left_ref_ctx[row7]) {
  1701. c = 1 + 2 * (s->s.h.fixcompref == 1 ||
  1702. s->above_ref_ctx[col] == 1);
  1703. } else {
  1704. c = 3 * (s->left_ref_ctx[row7] == 1) +
  1705. (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1706. }
  1707. } else if (s->left_comp_ctx[row7]) {
  1708. if (!s->above_ref_ctx[col]) {
  1709. c = 1 + 2 * (s->s.h.fixcompref == 1 ||
  1710. s->left_ref_ctx[row7] == 1);
  1711. } else {
  1712. c = 3 * (s->above_ref_ctx[col] == 1) +
  1713. (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1714. }
  1715. } else if (!s->above_ref_ctx[col]) {
  1716. if (!s->left_ref_ctx[row7]) {
  1717. c = 3;
  1718. } else {
  1719. c = 4 * (s->left_ref_ctx[row7] == 1);
  1720. }
  1721. } else if (!s->left_ref_ctx[row7]) {
  1722. c = 4 * (s->above_ref_ctx[col] == 1);
  1723. } else {
  1724. c = 2 * (s->left_ref_ctx[row7] == 1) +
  1725. 2 * (s->above_ref_ctx[col] == 1);
  1726. }
  1727. } else {
  1728. if (s->above_intra_ctx[col] ||
  1729. (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
  1730. c = 2;
  1731. } else if (s->above_comp_ctx[col]) {
  1732. c = 3 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
  1733. } else {
  1734. c = 4 * (s->above_ref_ctx[col] == 1);
  1735. }
  1736. }
  1737. } else if (have_l) {
  1738. if (s->left_intra_ctx[row7] ||
  1739. (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) {
  1740. c = 2;
  1741. } else if (s->left_comp_ctx[row7]) {
  1742. c = 3 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1);
  1743. } else {
  1744. c = 4 * (s->left_ref_ctx[row7] == 1);
  1745. }
  1746. } else {
  1747. c = 2;
  1748. }
  1749. bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]);
  1750. s->counts.single_ref[c][1][bit]++;
  1751. b->ref[0] = 1 + bit;
  1752. }
  1753. }
  1754. }
  1755. if (b->bs <= BS_8x8) {
  1756. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) {
  1757. b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV;
  1758. } else {
  1759. static const uint8_t off[10] = {
  1760. 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
  1761. };
  1762. // FIXME this needs to use the LUT tables from find_ref_mvs
  1763. // because not all are -1,0/0,-1
  1764. int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
  1765. [s->left_mode_ctx[row7 + off[b->bs]]];
  1766. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1767. s->prob.p.mv_mode[c]);
  1768. b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
  1769. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1770. }
  1771. }
  1772. if (s->s.h.filtermode == FILTER_SWITCHABLE) {
  1773. int c;
  1774. if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
  1775. if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1776. c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ?
  1777. s->left_filter_ctx[row7] : 3;
  1778. } else {
  1779. c = s->above_filter_ctx[col];
  1780. }
  1781. } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
  1782. c = s->left_filter_ctx[row7];
  1783. } else {
  1784. c = 3;
  1785. }
  1786. filter_id = vp8_rac_get_tree(&s->c, vp9_filter_tree,
  1787. s->prob.p.filter[c]);
  1788. s->counts.filter[c][filter_id]++;
  1789. b->filter = vp9_filter_lut[filter_id];
  1790. } else {
  1791. b->filter = s->s.h.filtermode;
  1792. }
  1793. if (b->bs > BS_8x8) {
  1794. int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]];
  1795. b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1796. s->prob.p.mv_mode[c]);
  1797. s->counts.mv_mode[c][b->mode[0] - 10]++;
  1798. fill_mv(s, b->mv[0], b->mode[0], 0);
  1799. if (b->bs != BS_8x4) {
  1800. b->mode[1] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1801. s->prob.p.mv_mode[c]);
  1802. s->counts.mv_mode[c][b->mode[1] - 10]++;
  1803. fill_mv(s, b->mv[1], b->mode[1], 1);
  1804. } else {
  1805. b->mode[1] = b->mode[0];
  1806. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1807. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1808. }
  1809. if (b->bs != BS_4x8) {
  1810. b->mode[2] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1811. s->prob.p.mv_mode[c]);
  1812. s->counts.mv_mode[c][b->mode[2] - 10]++;
  1813. fill_mv(s, b->mv[2], b->mode[2], 2);
  1814. if (b->bs != BS_8x4) {
  1815. b->mode[3] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
  1816. s->prob.p.mv_mode[c]);
  1817. s->counts.mv_mode[c][b->mode[3] - 10]++;
  1818. fill_mv(s, b->mv[3], b->mode[3], 3);
  1819. } else {
  1820. b->mode[3] = b->mode[2];
  1821. AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
  1822. AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
  1823. }
  1824. } else {
  1825. b->mode[2] = b->mode[0];
  1826. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1827. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1828. b->mode[3] = b->mode[1];
  1829. AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
  1830. AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
  1831. }
  1832. } else {
  1833. fill_mv(s, b->mv[0], b->mode[0], -1);
  1834. AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
  1835. AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
  1836. AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
  1837. AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
  1838. AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
  1839. AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
  1840. }
  1841. vref = b->ref[b->comp ? s->s.h.signbias[s->s.h.varcompref[0]] : 0];
  1842. }
  1843. #if HAVE_FAST_64BIT
  1844. #define SPLAT_CTX(var, val, n) \
  1845. switch (n) { \
  1846. case 1: var = val; break; \
  1847. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1848. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1849. case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
  1850. case 16: { \
  1851. uint64_t v64 = val * 0x0101010101010101ULL; \
  1852. AV_WN64A( &var, v64); \
  1853. AV_WN64A(&((uint8_t *) &var)[8], v64); \
  1854. break; \
  1855. } \
  1856. }
  1857. #else
  1858. #define SPLAT_CTX(var, val, n) \
  1859. switch (n) { \
  1860. case 1: var = val; break; \
  1861. case 2: AV_WN16A(&var, val * 0x0101); break; \
  1862. case 4: AV_WN32A(&var, val * 0x01010101); break; \
  1863. case 8: { \
  1864. uint32_t v32 = val * 0x01010101; \
  1865. AV_WN32A( &var, v32); \
  1866. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1867. break; \
  1868. } \
  1869. case 16: { \
  1870. uint32_t v32 = val * 0x01010101; \
  1871. AV_WN32A( &var, v32); \
  1872. AV_WN32A(&((uint8_t *) &var)[4], v32); \
  1873. AV_WN32A(&((uint8_t *) &var)[8], v32); \
  1874. AV_WN32A(&((uint8_t *) &var)[12], v32); \
  1875. break; \
  1876. } \
  1877. }
  1878. #endif
  1879. switch (bwh_tab[1][b->bs][0]) {
  1880. #define SET_CTXS(dir, off, n) \
  1881. do { \
  1882. SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
  1883. SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
  1884. SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
  1885. if (!s->s.h.keyframe && !s->s.h.intraonly) { \
  1886. SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
  1887. SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
  1888. SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
  1889. if (!b->intra) { \
  1890. SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
  1891. if (s->s.h.filtermode == FILTER_SWITCHABLE) { \
  1892. SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
  1893. } \
  1894. } \
  1895. } \
  1896. } while (0)
  1897. case 1: SET_CTXS(above, col, 1); break;
  1898. case 2: SET_CTXS(above, col, 2); break;
  1899. case 4: SET_CTXS(above, col, 4); break;
  1900. case 8: SET_CTXS(above, col, 8); break;
  1901. }
  1902. switch (bwh_tab[1][b->bs][1]) {
  1903. case 1: SET_CTXS(left, row7, 1); break;
  1904. case 2: SET_CTXS(left, row7, 2); break;
  1905. case 4: SET_CTXS(left, row7, 4); break;
  1906. case 8: SET_CTXS(left, row7, 8); break;
  1907. }
  1908. #undef SPLAT_CTX
  1909. #undef SET_CTXS
  1910. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  1911. if (b->bs > BS_8x8) {
  1912. int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1913. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
  1914. AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
  1915. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0);
  1916. AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1);
  1917. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
  1918. AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
  1919. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
  1920. AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
  1921. } else {
  1922. int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
  1923. for (n = 0; n < w4 * 2; n++) {
  1924. AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
  1925. AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
  1926. }
  1927. for (n = 0; n < h4 * 2; n++) {
  1928. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0);
  1929. AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
  1930. }
  1931. }
  1932. }
  1933. // FIXME kinda ugly
  1934. for (y = 0; y < h4; y++) {
  1935. int x, o = (row + y) * s->sb_cols * 8 + col;
  1936. struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[o];
  1937. if (b->intra) {
  1938. for (x = 0; x < w4; x++) {
  1939. mv[x].ref[0] =
  1940. mv[x].ref[1] = -1;
  1941. }
  1942. } else if (b->comp) {
  1943. for (x = 0; x < w4; x++) {
  1944. mv[x].ref[0] = b->ref[0];
  1945. mv[x].ref[1] = b->ref[1];
  1946. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1947. AV_COPY32(&mv[x].mv[1], &b->mv[3][1]);
  1948. }
  1949. } else {
  1950. for (x = 0; x < w4; x++) {
  1951. mv[x].ref[0] = b->ref[0];
  1952. mv[x].ref[1] = -1;
  1953. AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
  1954. }
  1955. }
  1956. }
  1957. }
  1958. // FIXME merge cnt/eob arguments?
  1959. static av_always_inline int
  1960. decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
  1961. int is_tx32x32, int is8bitsperpixel, int bpp, unsigned (*cnt)[6][3],
  1962. unsigned (*eob)[6][2], uint8_t (*p)[6][11],
  1963. int nnz, const int16_t *scan, const int16_t (*nb)[2],
  1964. const int16_t *band_counts, const int16_t *qmul)
  1965. {
  1966. int i = 0, band = 0, band_left = band_counts[band];
  1967. uint8_t *tp = p[0][nnz];
  1968. uint8_t cache[1024];
  1969. do {
  1970. int val, rc;
  1971. val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
  1972. eob[band][nnz][val]++;
  1973. if (!val)
  1974. break;
  1975. skip_eob:
  1976. if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
  1977. cnt[band][nnz][0]++;
  1978. if (!--band_left)
  1979. band_left = band_counts[++band];
  1980. cache[scan[i]] = 0;
  1981. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  1982. tp = p[band][nnz];
  1983. if (++i == n_coeffs)
  1984. break; //invalid input; blocks should end with EOB
  1985. goto skip_eob;
  1986. }
  1987. rc = scan[i];
  1988. if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
  1989. cnt[band][nnz][1]++;
  1990. val = 1;
  1991. cache[rc] = 1;
  1992. } else {
  1993. // fill in p[3-10] (model fill) - only once per frame for each pos
  1994. if (!tp[3])
  1995. memcpy(&tp[3], vp9_model_pareto8[tp[2]], 8);
  1996. cnt[band][nnz][2]++;
  1997. if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
  1998. if (!vp56_rac_get_prob_branchy(c, tp[4])) {
  1999. cache[rc] = val = 2;
  2000. } else {
  2001. val = 3 + vp56_rac_get_prob(c, tp[5]);
  2002. cache[rc] = 3;
  2003. }
  2004. } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
  2005. cache[rc] = 4;
  2006. if (!vp56_rac_get_prob_branchy(c, tp[7])) {
  2007. val = 5 + vp56_rac_get_prob(c, 159);
  2008. } else {
  2009. val = 7 + (vp56_rac_get_prob(c, 165) << 1);
  2010. val += vp56_rac_get_prob(c, 145);
  2011. }
  2012. } else { // cat 3-6
  2013. cache[rc] = 5;
  2014. if (!vp56_rac_get_prob_branchy(c, tp[8])) {
  2015. if (!vp56_rac_get_prob_branchy(c, tp[9])) {
  2016. val = 11 + (vp56_rac_get_prob(c, 173) << 2);
  2017. val += (vp56_rac_get_prob(c, 148) << 1);
  2018. val += vp56_rac_get_prob(c, 140);
  2019. } else {
  2020. val = 19 + (vp56_rac_get_prob(c, 176) << 3);
  2021. val += (vp56_rac_get_prob(c, 155) << 2);
  2022. val += (vp56_rac_get_prob(c, 140) << 1);
  2023. val += vp56_rac_get_prob(c, 135);
  2024. }
  2025. } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
  2026. val = 35 + (vp56_rac_get_prob(c, 180) << 4);
  2027. val += (vp56_rac_get_prob(c, 157) << 3);
  2028. val += (vp56_rac_get_prob(c, 141) << 2);
  2029. val += (vp56_rac_get_prob(c, 134) << 1);
  2030. val += vp56_rac_get_prob(c, 130);
  2031. } else {
  2032. val = 67;
  2033. if (!is8bitsperpixel) {
  2034. if (bpp == 12) {
  2035. val += vp56_rac_get_prob(c, 255) << 17;
  2036. val += vp56_rac_get_prob(c, 255) << 16;
  2037. }
  2038. val += (vp56_rac_get_prob(c, 255) << 15);
  2039. val += (vp56_rac_get_prob(c, 255) << 14);
  2040. }
  2041. val += (vp56_rac_get_prob(c, 254) << 13);
  2042. val += (vp56_rac_get_prob(c, 254) << 12);
  2043. val += (vp56_rac_get_prob(c, 254) << 11);
  2044. val += (vp56_rac_get_prob(c, 252) << 10);
  2045. val += (vp56_rac_get_prob(c, 249) << 9);
  2046. val += (vp56_rac_get_prob(c, 243) << 8);
  2047. val += (vp56_rac_get_prob(c, 230) << 7);
  2048. val += (vp56_rac_get_prob(c, 196) << 6);
  2049. val += (vp56_rac_get_prob(c, 177) << 5);
  2050. val += (vp56_rac_get_prob(c, 153) << 4);
  2051. val += (vp56_rac_get_prob(c, 140) << 3);
  2052. val += (vp56_rac_get_prob(c, 133) << 2);
  2053. val += (vp56_rac_get_prob(c, 130) << 1);
  2054. val += vp56_rac_get_prob(c, 129);
  2055. }
  2056. }
  2057. }
  2058. #define STORE_COEF(c, i, v) do { \
  2059. if (is8bitsperpixel) { \
  2060. c[i] = v; \
  2061. } else { \
  2062. AV_WN32A(&c[i * 2], v); \
  2063. } \
  2064. } while (0)
  2065. if (!--band_left)
  2066. band_left = band_counts[++band];
  2067. if (is_tx32x32)
  2068. STORE_COEF(coef, rc, ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2);
  2069. else
  2070. STORE_COEF(coef, rc, (vp8_rac_get(c) ? -val : val) * qmul[!!i]);
  2071. nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
  2072. tp = p[band][nnz];
  2073. } while (++i < n_coeffs);
  2074. return i;
  2075. }
  2076. static int decode_coeffs_b_8bpp(VP9Context *s, int16_t *coef, int n_coeffs,
  2077. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2078. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2079. const int16_t (*nb)[2], const int16_t *band_counts,
  2080. const int16_t *qmul)
  2081. {
  2082. return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 0, 1, 8, cnt, eob, p,
  2083. nnz, scan, nb, band_counts, qmul);
  2084. }
  2085. static int decode_coeffs_b32_8bpp(VP9Context *s, int16_t *coef, int n_coeffs,
  2086. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2087. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2088. const int16_t (*nb)[2], const int16_t *band_counts,
  2089. const int16_t *qmul)
  2090. {
  2091. return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 1, 1, 8, cnt, eob, p,
  2092. nnz, scan, nb, band_counts, qmul);
  2093. }
  2094. static int decode_coeffs_b_16bpp(VP9Context *s, int16_t *coef, int n_coeffs,
  2095. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2096. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2097. const int16_t (*nb)[2], const int16_t *band_counts,
  2098. const int16_t *qmul)
  2099. {
  2100. return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 0, 0, s->bpp, cnt, eob, p,
  2101. nnz, scan, nb, band_counts, qmul);
  2102. }
  2103. static int decode_coeffs_b32_16bpp(VP9Context *s, int16_t *coef, int n_coeffs,
  2104. unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
  2105. uint8_t (*p)[6][11], int nnz, const int16_t *scan,
  2106. const int16_t (*nb)[2], const int16_t *band_counts,
  2107. const int16_t *qmul)
  2108. {
  2109. return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 1, 0, s->bpp, cnt, eob, p,
  2110. nnz, scan, nb, band_counts, qmul);
  2111. }
  2112. static av_always_inline int decode_coeffs(AVCodecContext *ctx, int is8bitsperpixel)
  2113. {
  2114. VP9Context *s = ctx->priv_data;
  2115. VP9Block *b = s->b;
  2116. int row = s->row, col = s->col;
  2117. uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
  2118. unsigned (*c)[6][3] = s->counts.coef[b->tx][0 /* y */][!b->intra];
  2119. unsigned (*e)[6][2] = s->counts.eob[b->tx][0 /* y */][!b->intra];
  2120. int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
  2121. int end_x = FFMIN(2 * (s->cols - col), w4);
  2122. int end_y = FFMIN(2 * (s->rows - row), h4);
  2123. int n, pl, x, y, res;
  2124. int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
  2125. int tx = 4 * s->s.h.lossless + b->tx;
  2126. const int16_t * const *yscans = vp9_scans[tx];
  2127. const int16_t (* const *ynbs)[2] = vp9_scans_nb[tx];
  2128. const int16_t *uvscan = vp9_scans[b->uvtx][DCT_DCT];
  2129. const int16_t (*uvnb)[2] = vp9_scans_nb[b->uvtx][DCT_DCT];
  2130. uint8_t *a = &s->above_y_nnz_ctx[col * 2];
  2131. uint8_t *l = &s->left_y_nnz_ctx[(row & 7) << 1];
  2132. static const int16_t band_counts[4][8] = {
  2133. { 1, 2, 3, 4, 3, 16 - 13 },
  2134. { 1, 2, 3, 4, 11, 64 - 21 },
  2135. { 1, 2, 3, 4, 11, 256 - 21 },
  2136. { 1, 2, 3, 4, 11, 1024 - 21 },
  2137. };
  2138. const int16_t *y_band_counts = band_counts[b->tx];
  2139. const int16_t *uv_band_counts = band_counts[b->uvtx];
  2140. int bytesperpixel = is8bitsperpixel ? 1 : 2;
  2141. int total_coeff = 0;
  2142. #define MERGE(la, end, step, rd) \
  2143. for (n = 0; n < end; n += step) \
  2144. la[n] = !!rd(&la[n])
  2145. #define MERGE_CTX(step, rd) \
  2146. do { \
  2147. MERGE(l, end_y, step, rd); \
  2148. MERGE(a, end_x, step, rd); \
  2149. } while (0)
  2150. #define DECODE_Y_COEF_LOOP(step, mode_index, v) \
  2151. for (n = 0, y = 0; y < end_y; y += step) { \
  2152. for (x = 0; x < end_x; x += step, n += step * step) { \
  2153. enum TxfmType txtp = vp9_intra_txfm_type[b->mode[mode_index]]; \
  2154. res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
  2155. (s, s->block + 16 * n * bytesperpixel, 16 * step * step, \
  2156. c, e, p, a[x] + l[y], yscans[txtp], \
  2157. ynbs[txtp], y_band_counts, qmul[0]); \
  2158. a[x] = l[y] = !!res; \
  2159. total_coeff |= !!res; \
  2160. if (step >= 4) { \
  2161. AV_WN16A(&s->eob[n], res); \
  2162. } else { \
  2163. s->eob[n] = res; \
  2164. } \
  2165. } \
  2166. }
  2167. #define SPLAT(la, end, step, cond) \
  2168. if (step == 2) { \
  2169. for (n = 1; n < end; n += step) \
  2170. la[n] = la[n - 1]; \
  2171. } else if (step == 4) { \
  2172. if (cond) { \
  2173. for (n = 0; n < end; n += step) \
  2174. AV_WN32A(&la[n], la[n] * 0x01010101); \
  2175. } else { \
  2176. for (n = 0; n < end; n += step) \
  2177. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
  2178. } \
  2179. } else /* step == 8 */ { \
  2180. if (cond) { \
  2181. if (HAVE_FAST_64BIT) { \
  2182. for (n = 0; n < end; n += step) \
  2183. AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
  2184. } else { \
  2185. for (n = 0; n < end; n += step) { \
  2186. uint32_t v32 = la[n] * 0x01010101; \
  2187. AV_WN32A(&la[n], v32); \
  2188. AV_WN32A(&la[n + 4], v32); \
  2189. } \
  2190. } \
  2191. } else { \
  2192. for (n = 0; n < end; n += step) \
  2193. memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
  2194. } \
  2195. }
  2196. #define SPLAT_CTX(step) \
  2197. do { \
  2198. SPLAT(a, end_x, step, end_x == w4); \
  2199. SPLAT(l, end_y, step, end_y == h4); \
  2200. } while (0)
  2201. /* y tokens */
  2202. switch (b->tx) {
  2203. case TX_4X4:
  2204. DECODE_Y_COEF_LOOP(1, b->bs > BS_8x8 ? n : 0,);
  2205. break;
  2206. case TX_8X8:
  2207. MERGE_CTX(2, AV_RN16A);
  2208. DECODE_Y_COEF_LOOP(2, 0,);
  2209. SPLAT_CTX(2);
  2210. break;
  2211. case TX_16X16:
  2212. MERGE_CTX(4, AV_RN32A);
  2213. DECODE_Y_COEF_LOOP(4, 0,);
  2214. SPLAT_CTX(4);
  2215. break;
  2216. case TX_32X32:
  2217. MERGE_CTX(8, AV_RN64A);
  2218. DECODE_Y_COEF_LOOP(8, 0, 32);
  2219. SPLAT_CTX(8);
  2220. break;
  2221. }
  2222. #define DECODE_UV_COEF_LOOP(step, v) \
  2223. for (n = 0, y = 0; y < end_y; y += step) { \
  2224. for (x = 0; x < end_x; x += step, n += step * step) { \
  2225. res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
  2226. (s, s->uvblock[pl] + 16 * n * bytesperpixel, \
  2227. 16 * step * step, c, e, p, a[x] + l[y], \
  2228. uvscan, uvnb, uv_band_counts, qmul[1]); \
  2229. a[x] = l[y] = !!res; \
  2230. total_coeff |= !!res; \
  2231. if (step >= 4) { \
  2232. AV_WN16A(&s->uveob[pl][n], res); \
  2233. } else { \
  2234. s->uveob[pl][n] = res; \
  2235. } \
  2236. } \
  2237. }
  2238. p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
  2239. c = s->counts.coef[b->uvtx][1 /* uv */][!b->intra];
  2240. e = s->counts.eob[b->uvtx][1 /* uv */][!b->intra];
  2241. w4 >>= s->ss_h;
  2242. end_x >>= s->ss_h;
  2243. h4 >>= s->ss_v;
  2244. end_y >>= s->ss_v;
  2245. for (pl = 0; pl < 2; pl++) {
  2246. a = &s->above_uv_nnz_ctx[pl][col << !s->ss_h];
  2247. l = &s->left_uv_nnz_ctx[pl][(row & 7) << !s->ss_v];
  2248. switch (b->uvtx) {
  2249. case TX_4X4:
  2250. DECODE_UV_COEF_LOOP(1,);
  2251. break;
  2252. case TX_8X8:
  2253. MERGE_CTX(2, AV_RN16A);
  2254. DECODE_UV_COEF_LOOP(2,);
  2255. SPLAT_CTX(2);
  2256. break;
  2257. case TX_16X16:
  2258. MERGE_CTX(4, AV_RN32A);
  2259. DECODE_UV_COEF_LOOP(4,);
  2260. SPLAT_CTX(4);
  2261. break;
  2262. case TX_32X32:
  2263. MERGE_CTX(8, AV_RN64A);
  2264. DECODE_UV_COEF_LOOP(8, 32);
  2265. SPLAT_CTX(8);
  2266. break;
  2267. }
  2268. }
  2269. return total_coeff;
  2270. }
  2271. static int decode_coeffs_8bpp(AVCodecContext *ctx)
  2272. {
  2273. return decode_coeffs(ctx, 1);
  2274. }
  2275. static int decode_coeffs_16bpp(AVCodecContext *ctx)
  2276. {
  2277. return decode_coeffs(ctx, 0);
  2278. }
  2279. static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
  2280. uint8_t *dst_edge, ptrdiff_t stride_edge,
  2281. uint8_t *dst_inner, ptrdiff_t stride_inner,
  2282. uint8_t *l, int col, int x, int w,
  2283. int row, int y, enum TxfmMode tx,
  2284. int p, int ss_h, int ss_v, int bytesperpixel)
  2285. {
  2286. int have_top = row > 0 || y > 0;
  2287. int have_left = col > s->tile_col_start || x > 0;
  2288. int have_right = x < w - 1;
  2289. int bpp = s->bpp;
  2290. static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
  2291. [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
  2292. { DC_127_PRED, VERT_PRED } },
  2293. [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
  2294. { HOR_PRED, HOR_PRED } },
  2295. [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
  2296. { LEFT_DC_PRED, DC_PRED } },
  2297. [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
  2298. { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
  2299. [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
  2300. { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
  2301. [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
  2302. { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
  2303. [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
  2304. { HOR_DOWN_PRED, HOR_DOWN_PRED } },
  2305. [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
  2306. { DC_127_PRED, VERT_LEFT_PRED } },
  2307. [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
  2308. { HOR_UP_PRED, HOR_UP_PRED } },
  2309. [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
  2310. { HOR_PRED, TM_VP8_PRED } },
  2311. };
  2312. static const struct {
  2313. uint8_t needs_left:1;
  2314. uint8_t needs_top:1;
  2315. uint8_t needs_topleft:1;
  2316. uint8_t needs_topright:1;
  2317. uint8_t invert_left:1;
  2318. } edges[N_INTRA_PRED_MODES] = {
  2319. [VERT_PRED] = { .needs_top = 1 },
  2320. [HOR_PRED] = { .needs_left = 1 },
  2321. [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
  2322. [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2323. [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2324. [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2325. [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2326. [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
  2327. [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
  2328. [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
  2329. [LEFT_DC_PRED] = { .needs_left = 1 },
  2330. [TOP_DC_PRED] = { .needs_top = 1 },
  2331. [DC_128_PRED] = { 0 },
  2332. [DC_127_PRED] = { 0 },
  2333. [DC_129_PRED] = { 0 }
  2334. };
  2335. av_assert2(mode >= 0 && mode < 10);
  2336. mode = mode_conv[mode][have_left][have_top];
  2337. if (edges[mode].needs_top) {
  2338. uint8_t *top, *topleft;
  2339. int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
  2340. int n_px_need_tr = 0;
  2341. if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
  2342. n_px_need_tr = 4;
  2343. // if top of sb64-row, use s->intra_pred_data[] instead of
  2344. // dst[-stride] for intra prediction (it contains pre- instead of
  2345. // post-loopfilter data)
  2346. if (have_top) {
  2347. top = !(row & 7) && !y ?
  2348. s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
  2349. y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
  2350. if (have_left)
  2351. topleft = !(row & 7) && !y ?
  2352. s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
  2353. y == 0 || x == 0 ? &dst_edge[-stride_edge] :
  2354. &dst_inner[-stride_inner];
  2355. }
  2356. if (have_top &&
  2357. (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
  2358. (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
  2359. n_px_need + n_px_need_tr <= n_px_have) {
  2360. *a = top;
  2361. } else {
  2362. if (have_top) {
  2363. if (n_px_need <= n_px_have) {
  2364. memcpy(*a, top, n_px_need * bytesperpixel);
  2365. } else {
  2366. #define memset_bpp(c, i1, v, i2, num) do { \
  2367. if (bytesperpixel == 1) { \
  2368. memset(&(c)[(i1)], (v)[(i2)], (num)); \
  2369. } else { \
  2370. int n, val = AV_RN16A(&(v)[(i2) * 2]); \
  2371. for (n = 0; n < (num); n++) { \
  2372. AV_WN16A(&(c)[((i1) + n) * 2], val); \
  2373. } \
  2374. } \
  2375. } while (0)
  2376. memcpy(*a, top, n_px_have * bytesperpixel);
  2377. memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
  2378. }
  2379. } else {
  2380. #define memset_val(c, val, num) do { \
  2381. if (bytesperpixel == 1) { \
  2382. memset((c), (val), (num)); \
  2383. } else { \
  2384. int n; \
  2385. for (n = 0; n < (num); n++) { \
  2386. AV_WN16A(&(c)[n * 2], (val)); \
  2387. } \
  2388. } \
  2389. } while (0)
  2390. memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
  2391. }
  2392. if (edges[mode].needs_topleft) {
  2393. if (have_left && have_top) {
  2394. #define assign_bpp(c, i1, v, i2) do { \
  2395. if (bytesperpixel == 1) { \
  2396. (c)[(i1)] = (v)[(i2)]; \
  2397. } else { \
  2398. AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
  2399. } \
  2400. } while (0)
  2401. assign_bpp(*a, -1, topleft, -1);
  2402. } else {
  2403. #define assign_val(c, i, v) do { \
  2404. if (bytesperpixel == 1) { \
  2405. (c)[(i)] = (v); \
  2406. } else { \
  2407. AV_WN16A(&(c)[(i) * 2], (v)); \
  2408. } \
  2409. } while (0)
  2410. assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
  2411. }
  2412. }
  2413. if (tx == TX_4X4 && edges[mode].needs_topright) {
  2414. if (have_top && have_right &&
  2415. n_px_need + n_px_need_tr <= n_px_have) {
  2416. memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
  2417. } else {
  2418. memset_bpp(*a, 4, *a, 3, 4);
  2419. }
  2420. }
  2421. }
  2422. }
  2423. if (edges[mode].needs_left) {
  2424. if (have_left) {
  2425. int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
  2426. uint8_t *dst = x == 0 ? dst_edge : dst_inner;
  2427. ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
  2428. if (edges[mode].invert_left) {
  2429. if (n_px_need <= n_px_have) {
  2430. for (i = 0; i < n_px_need; i++)
  2431. assign_bpp(l, i, &dst[i * stride], -1);
  2432. } else {
  2433. for (i = 0; i < n_px_have; i++)
  2434. assign_bpp(l, i, &dst[i * stride], -1);
  2435. memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
  2436. }
  2437. } else {
  2438. if (n_px_need <= n_px_have) {
  2439. for (i = 0; i < n_px_need; i++)
  2440. assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
  2441. } else {
  2442. for (i = 0; i < n_px_have; i++)
  2443. assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
  2444. memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
  2445. }
  2446. }
  2447. } else {
  2448. memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
  2449. }
  2450. }
  2451. return mode;
  2452. }
  2453. static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off,
  2454. ptrdiff_t uv_off, int bytesperpixel)
  2455. {
  2456. VP9Context *s = ctx->priv_data;
  2457. VP9Block *b = s->b;
  2458. int row = s->row, col = s->col;
  2459. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2460. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2461. int end_x = FFMIN(2 * (s->cols - col), w4);
  2462. int end_y = FFMIN(2 * (s->rows - row), h4);
  2463. int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
  2464. int uvstep1d = 1 << b->uvtx, p;
  2465. uint8_t *dst = s->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
  2466. LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
  2467. LOCAL_ALIGNED_32(uint8_t, l, [64]);
  2468. for (n = 0, y = 0; y < end_y; y += step1d) {
  2469. uint8_t *ptr = dst, *ptr_r = dst_r;
  2470. for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
  2471. ptr_r += 4 * step1d * bytesperpixel, n += step) {
  2472. int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
  2473. y * 2 + x : 0];
  2474. uint8_t *a = &a_buf[32];
  2475. enum TxfmType txtp = vp9_intra_txfm_type[mode];
  2476. int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2477. mode = check_intra_mode(s, mode, &a, ptr_r,
  2478. s->s.frames[CUR_FRAME].tf.f->linesize[0],
  2479. ptr, s->y_stride, l,
  2480. col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
  2481. s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a);
  2482. if (eob)
  2483. s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride,
  2484. s->block + 16 * n * bytesperpixel, eob);
  2485. }
  2486. dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
  2487. dst += 4 * step1d * s->y_stride;
  2488. }
  2489. // U/V
  2490. w4 >>= s->ss_h;
  2491. end_x >>= s->ss_h;
  2492. end_y >>= s->ss_v;
  2493. step = 1 << (b->uvtx * 2);
  2494. for (p = 0; p < 2; p++) {
  2495. dst = s->dst[1 + p];
  2496. dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
  2497. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2498. uint8_t *ptr = dst, *ptr_r = dst_r;
  2499. for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
  2500. ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
  2501. int mode = b->uvmode;
  2502. uint8_t *a = &a_buf[32];
  2503. int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2504. mode = check_intra_mode(s, mode, &a, ptr_r,
  2505. s->s.frames[CUR_FRAME].tf.f->linesize[1],
  2506. ptr, s->uv_stride, l, col, x, w4, row, y,
  2507. b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
  2508. s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a);
  2509. if (eob)
  2510. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2511. s->uvblock[p] + 16 * n * bytesperpixel, eob);
  2512. }
  2513. dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
  2514. dst += 4 * uvstep1d * s->uv_stride;
  2515. }
  2516. }
  2517. }
  2518. static void intra_recon_8bpp(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
  2519. {
  2520. intra_recon(ctx, y_off, uv_off, 1);
  2521. }
  2522. static void intra_recon_16bpp(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
  2523. {
  2524. intra_recon(ctx, y_off, uv_off, 2);
  2525. }
  2526. static av_always_inline void mc_luma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
  2527. uint8_t *dst, ptrdiff_t dst_stride,
  2528. const uint8_t *ref, ptrdiff_t ref_stride,
  2529. ThreadFrame *ref_frame,
  2530. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2531. int bw, int bh, int w, int h, int bytesperpixel)
  2532. {
  2533. int mx = mv->x, my = mv->y, th;
  2534. y += my >> 3;
  2535. x += mx >> 3;
  2536. ref += y * ref_stride + x * bytesperpixel;
  2537. mx &= 7;
  2538. my &= 7;
  2539. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2540. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2541. // the longest loopfilter of the next sbrow
  2542. th = (y + bh + 4 * !!my + 7) >> 6;
  2543. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2544. if (x < !!mx * 3 || y < !!my * 3 ||
  2545. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2546. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2547. ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
  2548. 160, ref_stride,
  2549. bw + !!mx * 7, bh + !!my * 7,
  2550. x - !!mx * 3, y - !!my * 3, w, h);
  2551. ref = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  2552. ref_stride = 160;
  2553. }
  2554. mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
  2555. }
  2556. static av_always_inline void mc_chroma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
  2557. uint8_t *dst_u, uint8_t *dst_v,
  2558. ptrdiff_t dst_stride,
  2559. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  2560. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  2561. ThreadFrame *ref_frame,
  2562. ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
  2563. int bw, int bh, int w, int h, int bytesperpixel)
  2564. {
  2565. int mx = mv->x << !s->ss_h, my = mv->y << !s->ss_v, th;
  2566. y += my >> 4;
  2567. x += mx >> 4;
  2568. ref_u += y * src_stride_u + x * bytesperpixel;
  2569. ref_v += y * src_stride_v + x * bytesperpixel;
  2570. mx &= 15;
  2571. my &= 15;
  2572. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2573. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2574. // the longest loopfilter of the next sbrow
  2575. th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
  2576. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2577. if (x < !!mx * 3 || y < !!my * 3 ||
  2578. x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
  2579. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2580. ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
  2581. 160, src_stride_u,
  2582. bw + !!mx * 7, bh + !!my * 7,
  2583. x - !!mx * 3, y - !!my * 3, w, h);
  2584. ref_u = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  2585. mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
  2586. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2587. ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
  2588. 160, src_stride_v,
  2589. bw + !!mx * 7, bh + !!my * 7,
  2590. x - !!mx * 3, y - !!my * 3, w, h);
  2591. ref_v = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
  2592. mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
  2593. } else {
  2594. mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
  2595. mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
  2596. }
  2597. }
  2598. #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
  2599. px, py, pw, ph, bw, bh, w, h, i) \
  2600. mc_luma_unscaled(s, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
  2601. mv, bw, bh, w, h, bytesperpixel)
  2602. #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2603. row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
  2604. mc_chroma_unscaled(s, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2605. row, col, mv, bw, bh, w, h, bytesperpixel)
  2606. #define SCALED 0
  2607. #define FN(x) x##_8bpp
  2608. #define BYTES_PER_PIXEL 1
  2609. #include "vp9_mc_template.c"
  2610. #undef FN
  2611. #undef BYTES_PER_PIXEL
  2612. #define FN(x) x##_16bpp
  2613. #define BYTES_PER_PIXEL 2
  2614. #include "vp9_mc_template.c"
  2615. #undef mc_luma_dir
  2616. #undef mc_chroma_dir
  2617. #undef FN
  2618. #undef BYTES_PER_PIXEL
  2619. #undef SCALED
  2620. static av_always_inline void mc_luma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
  2621. vp9_mc_func (*mc)[2],
  2622. uint8_t *dst, ptrdiff_t dst_stride,
  2623. const uint8_t *ref, ptrdiff_t ref_stride,
  2624. ThreadFrame *ref_frame,
  2625. ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
  2626. int px, int py, int pw, int ph,
  2627. int bw, int bh, int w, int h, int bytesperpixel,
  2628. const uint16_t *scale, const uint8_t *step)
  2629. {
  2630. if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
  2631. s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
  2632. mc_luma_unscaled(s, mc, dst, dst_stride, ref, ref_stride, ref_frame,
  2633. y, x, in_mv, bw, bh, w, h, bytesperpixel);
  2634. } else {
  2635. #define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
  2636. int mx, my;
  2637. int refbw_m1, refbh_m1;
  2638. int th;
  2639. VP56mv mv;
  2640. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) << 3, (s->cols * 8 - x + px + 3) << 3);
  2641. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) << 3, (s->rows * 8 - y + py + 3) << 3);
  2642. // BUG libvpx seems to scale the two components separately. This introduces
  2643. // rounding errors but we have to reproduce them to be exactly compatible
  2644. // with the output from libvpx...
  2645. mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
  2646. my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
  2647. y = my >> 4;
  2648. x = mx >> 4;
  2649. ref += y * ref_stride + x * bytesperpixel;
  2650. mx &= 15;
  2651. my &= 15;
  2652. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  2653. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  2654. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2655. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2656. // the longest loopfilter of the next sbrow
  2657. th = (y + refbh_m1 + 4 + 7) >> 6;
  2658. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2659. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 4 >= h - refbh_m1) {
  2660. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2661. ref - 3 * ref_stride - 3 * bytesperpixel,
  2662. 288, ref_stride,
  2663. refbw_m1 + 8, refbh_m1 + 8,
  2664. x - 3, y - 3, w, h);
  2665. ref = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  2666. ref_stride = 288;
  2667. }
  2668. smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
  2669. }
  2670. }
  2671. static av_always_inline void mc_chroma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
  2672. vp9_mc_func (*mc)[2],
  2673. uint8_t *dst_u, uint8_t *dst_v,
  2674. ptrdiff_t dst_stride,
  2675. const uint8_t *ref_u, ptrdiff_t src_stride_u,
  2676. const uint8_t *ref_v, ptrdiff_t src_stride_v,
  2677. ThreadFrame *ref_frame,
  2678. ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
  2679. int px, int py, int pw, int ph,
  2680. int bw, int bh, int w, int h, int bytesperpixel,
  2681. const uint16_t *scale, const uint8_t *step)
  2682. {
  2683. if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
  2684. s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
  2685. mc_chroma_unscaled(s, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
  2686. ref_v, src_stride_v, ref_frame,
  2687. y, x, in_mv, bw, bh, w, h, bytesperpixel);
  2688. } else {
  2689. int mx, my;
  2690. int refbw_m1, refbh_m1;
  2691. int th;
  2692. VP56mv mv;
  2693. if (s->ss_h) {
  2694. // BUG https://code.google.com/p/webm/issues/detail?id=820
  2695. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) << 4, (s->cols * 4 - x + px + 3) << 4);
  2696. mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
  2697. } else {
  2698. mv.x = av_clip(in_mv->x, -(x + pw - px + 4) << 3, (s->cols * 8 - x + px + 3) << 3);
  2699. mx = scale_mv(mv.x << 1, 0) + scale_mv(x * 16, 0);
  2700. }
  2701. if (s->ss_v) {
  2702. // BUG https://code.google.com/p/webm/issues/detail?id=820
  2703. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) << 4, (s->rows * 4 - y + py + 3) << 4);
  2704. my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
  2705. } else {
  2706. mv.y = av_clip(in_mv->y, -(y + ph - py + 4) << 3, (s->rows * 8 - y + py + 3) << 3);
  2707. my = scale_mv(mv.y << 1, 1) + scale_mv(y * 16, 1);
  2708. }
  2709. #undef scale_mv
  2710. y = my >> 4;
  2711. x = mx >> 4;
  2712. ref_u += y * src_stride_u + x * bytesperpixel;
  2713. ref_v += y * src_stride_v + x * bytesperpixel;
  2714. mx &= 15;
  2715. my &= 15;
  2716. refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
  2717. refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
  2718. // FIXME bilinear filter only needs 0/1 pixels, not 3/4
  2719. // we use +7 because the last 7 pixels of each sbrow can be changed in
  2720. // the longest loopfilter of the next sbrow
  2721. th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
  2722. ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
  2723. if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 4 >= h - refbh_m1) {
  2724. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2725. ref_u - 3 * src_stride_u - 3 * bytesperpixel,
  2726. 288, src_stride_u,
  2727. refbw_m1 + 8, refbh_m1 + 8,
  2728. x - 3, y - 3, w, h);
  2729. ref_u = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  2730. smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
  2731. s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
  2732. ref_v - 3 * src_stride_v - 3 * bytesperpixel,
  2733. 288, src_stride_v,
  2734. refbw_m1 + 8, refbh_m1 + 8,
  2735. x - 3, y - 3, w, h);
  2736. ref_v = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
  2737. smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
  2738. } else {
  2739. smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
  2740. smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
  2741. }
  2742. }
  2743. }
  2744. #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
  2745. px, py, pw, ph, bw, bh, w, h, i) \
  2746. mc_luma_scaled(s, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
  2747. mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
  2748. s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  2749. #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2750. row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
  2751. mc_chroma_scaled(s, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
  2752. row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
  2753. s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
  2754. #define SCALED 1
  2755. #define FN(x) x##_scaled_8bpp
  2756. #define BYTES_PER_PIXEL 1
  2757. #include "vp9_mc_template.c"
  2758. #undef FN
  2759. #undef BYTES_PER_PIXEL
  2760. #define FN(x) x##_scaled_16bpp
  2761. #define BYTES_PER_PIXEL 2
  2762. #include "vp9_mc_template.c"
  2763. #undef mc_luma_dir
  2764. #undef mc_chroma_dir
  2765. #undef FN
  2766. #undef BYTES_PER_PIXEL
  2767. #undef SCALED
  2768. static av_always_inline void inter_recon(AVCodecContext *ctx, int bytesperpixel)
  2769. {
  2770. VP9Context *s = ctx->priv_data;
  2771. VP9Block *b = s->b;
  2772. int row = s->row, col = s->col;
  2773. if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
  2774. if (bytesperpixel == 1) {
  2775. inter_pred_scaled_8bpp(ctx);
  2776. } else {
  2777. inter_pred_scaled_16bpp(ctx);
  2778. }
  2779. } else {
  2780. if (bytesperpixel == 1) {
  2781. inter_pred_8bpp(ctx);
  2782. } else {
  2783. inter_pred_16bpp(ctx);
  2784. }
  2785. }
  2786. if (!b->skip) {
  2787. /* mostly copied intra_recon() */
  2788. int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
  2789. int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
  2790. int end_x = FFMIN(2 * (s->cols - col), w4);
  2791. int end_y = FFMIN(2 * (s->rows - row), h4);
  2792. int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
  2793. int uvstep1d = 1 << b->uvtx, p;
  2794. uint8_t *dst = s->dst[0];
  2795. // y itxfm add
  2796. for (n = 0, y = 0; y < end_y; y += step1d) {
  2797. uint8_t *ptr = dst;
  2798. for (x = 0; x < end_x; x += step1d,
  2799. ptr += 4 * step1d * bytesperpixel, n += step) {
  2800. int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
  2801. if (eob)
  2802. s->dsp.itxfm_add[tx][DCT_DCT](ptr, s->y_stride,
  2803. s->block + 16 * n * bytesperpixel, eob);
  2804. }
  2805. dst += 4 * s->y_stride * step1d;
  2806. }
  2807. // uv itxfm add
  2808. end_x >>= s->ss_h;
  2809. end_y >>= s->ss_v;
  2810. step = 1 << (b->uvtx * 2);
  2811. for (p = 0; p < 2; p++) {
  2812. dst = s->dst[p + 1];
  2813. for (n = 0, y = 0; y < end_y; y += uvstep1d) {
  2814. uint8_t *ptr = dst;
  2815. for (x = 0; x < end_x; x += uvstep1d,
  2816. ptr += 4 * uvstep1d * bytesperpixel, n += step) {
  2817. int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
  2818. if (eob)
  2819. s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
  2820. s->uvblock[p] + 16 * n * bytesperpixel, eob);
  2821. }
  2822. dst += 4 * uvstep1d * s->uv_stride;
  2823. }
  2824. }
  2825. }
  2826. }
  2827. static void inter_recon_8bpp(AVCodecContext *ctx)
  2828. {
  2829. inter_recon(ctx, 1);
  2830. }
  2831. static void inter_recon_16bpp(AVCodecContext *ctx)
  2832. {
  2833. inter_recon(ctx, 2);
  2834. }
  2835. static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_v,
  2836. int row_and_7, int col_and_7,
  2837. int w, int h, int col_end, int row_end,
  2838. enum TxfmMode tx, int skip_inter)
  2839. {
  2840. static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
  2841. static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
  2842. // FIXME I'm pretty sure all loops can be replaced by a single LUT if
  2843. // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
  2844. // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
  2845. // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
  2846. // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
  2847. // edges. This means that for UV, we work on two subsampled blocks at
  2848. // a time, and we only use the topleft block's mode information to set
  2849. // things like block strength. Thus, for any block size smaller than
  2850. // 16x16, ignore the odd portion of the block.
  2851. if (tx == TX_4X4 && (ss_v | ss_h)) {
  2852. if (h == ss_v) {
  2853. if (row_and_7 & 1)
  2854. return;
  2855. if (!row_end)
  2856. h += 1;
  2857. }
  2858. if (w == ss_h) {
  2859. if (col_and_7 & 1)
  2860. return;
  2861. if (!col_end)
  2862. w += 1;
  2863. }
  2864. }
  2865. if (tx == TX_4X4 && !skip_inter) {
  2866. int t = 1 << col_and_7, m_col = (t << w) - t, y;
  2867. // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
  2868. int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
  2869. for (y = row_and_7; y < h + row_and_7; y++) {
  2870. int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
  2871. mask[0][y][1] |= m_row_8;
  2872. mask[0][y][2] |= m_row_4;
  2873. // for odd lines, if the odd col is not being filtered,
  2874. // skip odd row also:
  2875. // .---. <-- a
  2876. // | |
  2877. // |___| <-- b
  2878. // ^ ^
  2879. // c d
  2880. //
  2881. // if a/c are even row/col and b/d are odd, and d is skipped,
  2882. // e.g. right edge of size-66x66.webm, then skip b also (bug)
  2883. if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
  2884. mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
  2885. } else {
  2886. mask[1][y][col_mask_id] |= m_col;
  2887. }
  2888. if (!ss_h)
  2889. mask[0][y][3] |= m_col;
  2890. if (!ss_v) {
  2891. if (ss_h && (col_end & 1))
  2892. mask[1][y][3] |= (t << (w - 1)) - t;
  2893. else
  2894. mask[1][y][3] |= m_col;
  2895. }
  2896. }
  2897. } else {
  2898. int y, t = 1 << col_and_7, m_col = (t << w) - t;
  2899. if (!skip_inter) {
  2900. int mask_id = (tx == TX_8X8);
  2901. static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
  2902. int l2 = tx + ss_h - 1, step1d;
  2903. int m_row = m_col & masks[l2];
  2904. // at odd UV col/row edges tx16/tx32 loopfilter edges, force
  2905. // 8wd loopfilter to prevent going off the visible edge.
  2906. if (ss_h && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
  2907. int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
  2908. int m_row_8 = m_row - m_row_16;
  2909. for (y = row_and_7; y < h + row_and_7; y++) {
  2910. mask[0][y][0] |= m_row_16;
  2911. mask[0][y][1] |= m_row_8;
  2912. }
  2913. } else {
  2914. for (y = row_and_7; y < h + row_and_7; y++)
  2915. mask[0][y][mask_id] |= m_row;
  2916. }
  2917. l2 = tx + ss_v - 1;
  2918. step1d = 1 << l2;
  2919. if (ss_v && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
  2920. for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
  2921. mask[1][y][0] |= m_col;
  2922. if (y - row_and_7 == h - 1)
  2923. mask[1][y][1] |= m_col;
  2924. } else {
  2925. for (y = row_and_7; y < h + row_and_7; y += step1d)
  2926. mask[1][y][mask_id] |= m_col;
  2927. }
  2928. } else if (tx != TX_4X4) {
  2929. int mask_id;
  2930. mask_id = (tx == TX_8X8) || (h == ss_v);
  2931. mask[1][row_and_7][mask_id] |= m_col;
  2932. mask_id = (tx == TX_8X8) || (w == ss_h);
  2933. for (y = row_and_7; y < h + row_and_7; y++)
  2934. mask[0][y][mask_id] |= t;
  2935. } else {
  2936. int t8 = t & wide_filter_col_mask[ss_h], t4 = t - t8;
  2937. for (y = row_and_7; y < h + row_and_7; y++) {
  2938. mask[0][y][2] |= t4;
  2939. mask[0][y][1] |= t8;
  2940. }
  2941. mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
  2942. }
  2943. }
  2944. }
  2945. static void decode_b(AVCodecContext *ctx, int row, int col,
  2946. struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
  2947. enum BlockLevel bl, enum BlockPartition bp)
  2948. {
  2949. VP9Context *s = ctx->priv_data;
  2950. VP9Block *b = s->b;
  2951. enum BlockSize bs = bl * 3 + bp;
  2952. int bytesperpixel = s->bytesperpixel;
  2953. int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
  2954. int emu[2];
  2955. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  2956. s->row = row;
  2957. s->row7 = row & 7;
  2958. s->col = col;
  2959. s->col7 = col & 7;
  2960. s->min_mv.x = -(128 + col * 64);
  2961. s->min_mv.y = -(128 + row * 64);
  2962. s->max_mv.x = 128 + (s->cols - col - w4) * 64;
  2963. s->max_mv.y = 128 + (s->rows - row - h4) * 64;
  2964. if (s->pass < 2) {
  2965. b->bs = bs;
  2966. b->bl = bl;
  2967. b->bp = bp;
  2968. decode_mode(ctx);
  2969. b->uvtx = b->tx - ((s->ss_h && w4 * 2 == (1 << b->tx)) ||
  2970. (s->ss_v && h4 * 2 == (1 << b->tx)));
  2971. if (!b->skip) {
  2972. int has_coeffs;
  2973. if (bytesperpixel == 1) {
  2974. has_coeffs = decode_coeffs_8bpp(ctx);
  2975. } else {
  2976. has_coeffs = decode_coeffs_16bpp(ctx);
  2977. }
  2978. if (!has_coeffs && b->bs <= BS_8x8 && !b->intra) {
  2979. b->skip = 1;
  2980. memset(&s->above_skip_ctx[col], 1, w4);
  2981. memset(&s->left_skip_ctx[s->row7], 1, h4);
  2982. }
  2983. } else {
  2984. int row7 = s->row7;
  2985. #define SPLAT_ZERO_CTX(v, n) \
  2986. switch (n) { \
  2987. case 1: v = 0; break; \
  2988. case 2: AV_ZERO16(&v); break; \
  2989. case 4: AV_ZERO32(&v); break; \
  2990. case 8: AV_ZERO64(&v); break; \
  2991. case 16: AV_ZERO128(&v); break; \
  2992. }
  2993. #define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \
  2994. do { \
  2995. SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
  2996. if (s->ss_##dir2) { \
  2997. SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
  2998. SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
  2999. } else { \
  3000. SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off * 2], n * 2); \
  3001. SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off * 2], n * 2); \
  3002. } \
  3003. } while (0)
  3004. switch (w4) {
  3005. case 1: SPLAT_ZERO_YUV(above, nnz_ctx, col, 1, h); break;
  3006. case 2: SPLAT_ZERO_YUV(above, nnz_ctx, col, 2, h); break;
  3007. case 4: SPLAT_ZERO_YUV(above, nnz_ctx, col, 4, h); break;
  3008. case 8: SPLAT_ZERO_YUV(above, nnz_ctx, col, 8, h); break;
  3009. }
  3010. switch (h4) {
  3011. case 1: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 1, v); break;
  3012. case 2: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 2, v); break;
  3013. case 4: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 4, v); break;
  3014. case 8: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 8, v); break;
  3015. }
  3016. }
  3017. if (s->pass == 1) {
  3018. s->b++;
  3019. s->block += w4 * h4 * 64 * bytesperpixel;
  3020. s->uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
  3021. s->uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
  3022. s->eob += 4 * w4 * h4;
  3023. s->uveob[0] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
  3024. s->uveob[1] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
  3025. return;
  3026. }
  3027. }
  3028. // emulated overhangs if the stride of the target buffer can't hold. This
  3029. // makes it possible to support emu-edge and so on even if we have large block
  3030. // overhangs
  3031. emu[0] = (col + w4) * 8 * bytesperpixel > f->linesize[0] ||
  3032. (row + h4) > s->rows;
  3033. emu[1] = ((col + w4) * 8 >> s->ss_h) * bytesperpixel > f->linesize[1] ||
  3034. (row + h4) > s->rows;
  3035. if (emu[0]) {
  3036. s->dst[0] = s->tmp_y;
  3037. s->y_stride = 128;
  3038. } else {
  3039. s->dst[0] = f->data[0] + yoff;
  3040. s->y_stride = f->linesize[0];
  3041. }
  3042. if (emu[1]) {
  3043. s->dst[1] = s->tmp_uv[0];
  3044. s->dst[2] = s->tmp_uv[1];
  3045. s->uv_stride = 128;
  3046. } else {
  3047. s->dst[1] = f->data[1] + uvoff;
  3048. s->dst[2] = f->data[2] + uvoff;
  3049. s->uv_stride = f->linesize[1];
  3050. }
  3051. if (b->intra) {
  3052. if (s->bpp > 8) {
  3053. intra_recon_16bpp(ctx, yoff, uvoff);
  3054. } else {
  3055. intra_recon_8bpp(ctx, yoff, uvoff);
  3056. }
  3057. } else {
  3058. if (s->bpp > 8) {
  3059. inter_recon_16bpp(ctx);
  3060. } else {
  3061. inter_recon_8bpp(ctx);
  3062. }
  3063. }
  3064. if (emu[0]) {
  3065. int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
  3066. for (n = 0; o < w; n++) {
  3067. int bw = 64 >> n;
  3068. av_assert2(n <= 4);
  3069. if (w & bw) {
  3070. s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o * bytesperpixel, f->linesize[0],
  3071. s->tmp_y + o * bytesperpixel, 128, h, 0, 0);
  3072. o += bw;
  3073. }
  3074. }
  3075. }
  3076. if (emu[1]) {
  3077. int w = FFMIN(s->cols - col, w4) * 8 >> s->ss_h;
  3078. int h = FFMIN(s->rows - row, h4) * 8 >> s->ss_v, n, o = 0;
  3079. for (n = s->ss_h; o < w; n++) {
  3080. int bw = 64 >> n;
  3081. av_assert2(n <= 4);
  3082. if (w & bw) {
  3083. s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o * bytesperpixel, f->linesize[1],
  3084. s->tmp_uv[0] + o * bytesperpixel, 128, h, 0, 0);
  3085. s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o * bytesperpixel, f->linesize[2],
  3086. s->tmp_uv[1] + o * bytesperpixel, 128, h, 0, 0);
  3087. o += bw;
  3088. }
  3089. }
  3090. }
  3091. // pick filter level and find edges to apply filter to
  3092. if (s->s.h.filter.level &&
  3093. (lvl = s->s.h.segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
  3094. [b->mode[3] != ZEROMV]) > 0) {
  3095. int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
  3096. int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7;
  3097. setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
  3098. mask_edges(lflvl->mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
  3099. if (s->ss_h || s->ss_v)
  3100. mask_edges(lflvl->mask[1], s->ss_h, s->ss_v, row7, col7, x_end, y_end,
  3101. s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
  3102. s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
  3103. b->uvtx, skip_inter);
  3104. if (!s->filter_lut.lim_lut[lvl]) {
  3105. int sharp = s->s.h.filter.sharpness;
  3106. int limit = lvl;
  3107. if (sharp > 0) {
  3108. limit >>= (sharp + 3) >> 2;
  3109. limit = FFMIN(limit, 9 - sharp);
  3110. }
  3111. limit = FFMAX(limit, 1);
  3112. s->filter_lut.lim_lut[lvl] = limit;
  3113. s->filter_lut.mblim_lut[lvl] = 2 * (lvl + 2) + limit;
  3114. }
  3115. }
  3116. if (s->pass == 2) {
  3117. s->b++;
  3118. s->block += w4 * h4 * 64 * bytesperpixel;
  3119. s->uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
  3120. s->uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
  3121. s->eob += 4 * w4 * h4;
  3122. s->uveob[0] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);
  3123. s->uveob[1] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);
  3124. }
  3125. }
  3126. static void decode_sb(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  3127. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  3128. {
  3129. VP9Context *s = ctx->priv_data;
  3130. int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
  3131. (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
  3132. const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? vp9_default_kf_partition_probs[bl][c] :
  3133. s->prob.p.partition[bl][c];
  3134. enum BlockPartition bp;
  3135. ptrdiff_t hbs = 4 >> bl;
  3136. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  3137. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  3138. int bytesperpixel = s->bytesperpixel;
  3139. if (bl == BL_8X8) {
  3140. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  3141. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3142. } else if (col + hbs < s->cols) { // FIXME why not <=?
  3143. if (row + hbs < s->rows) { // FIXME why not <=?
  3144. bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
  3145. switch (bp) {
  3146. case PARTITION_NONE:
  3147. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3148. break;
  3149. case PARTITION_H:
  3150. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3151. yoff += hbs * 8 * y_stride;
  3152. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3153. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
  3154. break;
  3155. case PARTITION_V:
  3156. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3157. yoff += hbs * 8 * bytesperpixel;
  3158. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  3159. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
  3160. break;
  3161. case PARTITION_SPLIT:
  3162. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  3163. decode_sb(ctx, row, col + hbs, lflvl,
  3164. yoff + 8 * hbs * bytesperpixel,
  3165. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  3166. yoff += hbs * 8 * y_stride;
  3167. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3168. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  3169. decode_sb(ctx, row + hbs, col + hbs, lflvl,
  3170. yoff + 8 * hbs * bytesperpixel,
  3171. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  3172. break;
  3173. default:
  3174. av_assert0(0);
  3175. }
  3176. } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
  3177. bp = PARTITION_SPLIT;
  3178. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  3179. decode_sb(ctx, row, col + hbs, lflvl,
  3180. yoff + 8 * hbs * bytesperpixel,
  3181. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  3182. } else {
  3183. bp = PARTITION_H;
  3184. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3185. }
  3186. } else if (row + hbs < s->rows) { // FIXME why not <=?
  3187. if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
  3188. bp = PARTITION_SPLIT;
  3189. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  3190. yoff += hbs * 8 * y_stride;
  3191. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3192. decode_sb(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  3193. } else {
  3194. bp = PARTITION_V;
  3195. decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
  3196. }
  3197. } else {
  3198. bp = PARTITION_SPLIT;
  3199. decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  3200. }
  3201. s->counts.partition[bl][c][bp]++;
  3202. }
  3203. static void decode_sb_mem(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
  3204. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  3205. {
  3206. VP9Context *s = ctx->priv_data;
  3207. VP9Block *b = s->b;
  3208. ptrdiff_t hbs = 4 >> bl;
  3209. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  3210. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  3211. int bytesperpixel = s->bytesperpixel;
  3212. if (bl == BL_8X8) {
  3213. av_assert2(b->bl == BL_8X8);
  3214. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  3215. } else if (s->b->bl == bl) {
  3216. decode_b(ctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  3217. if (b->bp == PARTITION_H && row + hbs < s->rows) {
  3218. yoff += hbs * 8 * y_stride;
  3219. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3220. decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
  3221. } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
  3222. yoff += hbs * 8 * bytesperpixel;
  3223. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  3224. decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
  3225. }
  3226. } else {
  3227. decode_sb_mem(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
  3228. if (col + hbs < s->cols) { // FIXME why not <=?
  3229. if (row + hbs < s->rows) {
  3230. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
  3231. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  3232. yoff += hbs * 8 * y_stride;
  3233. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3234. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  3235. decode_sb_mem(ctx, row + hbs, col + hbs, lflvl,
  3236. yoff + 8 * hbs * bytesperpixel,
  3237. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  3238. } else {
  3239. yoff += hbs * 8 * bytesperpixel;
  3240. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  3241. decode_sb_mem(ctx, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
  3242. }
  3243. } else if (row + hbs < s->rows) {
  3244. yoff += hbs * 8 * y_stride;
  3245. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  3246. decode_sb_mem(ctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  3247. }
  3248. }
  3249. }
  3250. static av_always_inline void filter_plane_cols(VP9Context *s, int col, int ss_h, int ss_v,
  3251. uint8_t *lvl, uint8_t (*mask)[4],
  3252. uint8_t *dst, ptrdiff_t ls)
  3253. {
  3254. int y, x, bytesperpixel = s->bytesperpixel;
  3255. // filter edges between columns (e.g. block1 | block2)
  3256. for (y = 0; y < 8; y += 2 << ss_v, dst += 16 * ls, lvl += 16 << ss_v) {
  3257. uint8_t *ptr = dst, *l = lvl, *hmask1 = mask[y], *hmask2 = mask[y + 1 + ss_v];
  3258. unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2], hm13 = hmask1[3];
  3259. unsigned hm2 = hmask2[1] | hmask2[2], hm23 = hmask2[3];
  3260. unsigned hm = hm1 | hm2 | hm13 | hm23;
  3261. for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 8 * bytesperpixel >> ss_h) {
  3262. if (col || x > 1) {
  3263. if (hm1 & x) {
  3264. int L = *l, H = L >> 4;
  3265. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3266. if (hmask1[0] & x) {
  3267. if (hmask2[0] & x) {
  3268. av_assert2(l[8 << ss_v] == L);
  3269. s->dsp.loop_filter_16[0](ptr, ls, E, I, H);
  3270. } else {
  3271. s->dsp.loop_filter_8[2][0](ptr, ls, E, I, H);
  3272. }
  3273. } else if (hm2 & x) {
  3274. L = l[8 << ss_v];
  3275. H |= (L >> 4) << 8;
  3276. E |= s->filter_lut.mblim_lut[L] << 8;
  3277. I |= s->filter_lut.lim_lut[L] << 8;
  3278. s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
  3279. [!!(hmask2[1] & x)]
  3280. [0](ptr, ls, E, I, H);
  3281. } else {
  3282. s->dsp.loop_filter_8[!!(hmask1[1] & x)]
  3283. [0](ptr, ls, E, I, H);
  3284. }
  3285. } else if (hm2 & x) {
  3286. int L = l[8 << ss_v], H = L >> 4;
  3287. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3288. s->dsp.loop_filter_8[!!(hmask2[1] & x)]
  3289. [0](ptr + 8 * ls, ls, E, I, H);
  3290. }
  3291. }
  3292. if (ss_h) {
  3293. if (x & 0xAA)
  3294. l += 2;
  3295. } else {
  3296. if (hm13 & x) {
  3297. int L = *l, H = L >> 4;
  3298. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3299. if (hm23 & x) {
  3300. L = l[8 << ss_v];
  3301. H |= (L >> 4) << 8;
  3302. E |= s->filter_lut.mblim_lut[L] << 8;
  3303. I |= s->filter_lut.lim_lut[L] << 8;
  3304. s->dsp.loop_filter_mix2[0][0][0](ptr + 4 * bytesperpixel, ls, E, I, H);
  3305. } else {
  3306. s->dsp.loop_filter_8[0][0](ptr + 4 * bytesperpixel, ls, E, I, H);
  3307. }
  3308. } else if (hm23 & x) {
  3309. int L = l[8 << ss_v], H = L >> 4;
  3310. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3311. s->dsp.loop_filter_8[0][0](ptr + 8 * ls + 4 * bytesperpixel, ls, E, I, H);
  3312. }
  3313. l++;
  3314. }
  3315. }
  3316. }
  3317. }
  3318. static av_always_inline void filter_plane_rows(VP9Context *s, int row, int ss_h, int ss_v,
  3319. uint8_t *lvl, uint8_t (*mask)[4],
  3320. uint8_t *dst, ptrdiff_t ls)
  3321. {
  3322. int y, x, bytesperpixel = s->bytesperpixel;
  3323. // block1
  3324. // filter edges between rows (e.g. ------)
  3325. // block2
  3326. for (y = 0; y < 8; y++, dst += 8 * ls >> ss_v) {
  3327. uint8_t *ptr = dst, *l = lvl, *vmask = mask[y];
  3328. unsigned vm = vmask[0] | vmask[1] | vmask[2], vm3 = vmask[3];
  3329. for (x = 1; vm & ~(x - 1); x <<= (2 << ss_h), ptr += 16 * bytesperpixel, l += 2 << ss_h) {
  3330. if (row || y) {
  3331. if (vm & x) {
  3332. int L = *l, H = L >> 4;
  3333. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3334. if (vmask[0] & x) {
  3335. if (vmask[0] & (x << (1 + ss_h))) {
  3336. av_assert2(l[1 + ss_h] == L);
  3337. s->dsp.loop_filter_16[1](ptr, ls, E, I, H);
  3338. } else {
  3339. s->dsp.loop_filter_8[2][1](ptr, ls, E, I, H);
  3340. }
  3341. } else if (vm & (x << (1 + ss_h))) {
  3342. L = l[1 + ss_h];
  3343. H |= (L >> 4) << 8;
  3344. E |= s->filter_lut.mblim_lut[L] << 8;
  3345. I |= s->filter_lut.lim_lut[L] << 8;
  3346. s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
  3347. [!!(vmask[1] & (x << (1 + ss_h)))]
  3348. [1](ptr, ls, E, I, H);
  3349. } else {
  3350. s->dsp.loop_filter_8[!!(vmask[1] & x)]
  3351. [1](ptr, ls, E, I, H);
  3352. }
  3353. } else if (vm & (x << (1 + ss_h))) {
  3354. int L = l[1 + ss_h], H = L >> 4;
  3355. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3356. s->dsp.loop_filter_8[!!(vmask[1] & (x << (1 + ss_h)))]
  3357. [1](ptr + 8 * bytesperpixel, ls, E, I, H);
  3358. }
  3359. }
  3360. if (!ss_v) {
  3361. if (vm3 & x) {
  3362. int L = *l, H = L >> 4;
  3363. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3364. if (vm3 & (x << (1 + ss_h))) {
  3365. L = l[1 + ss_h];
  3366. H |= (L >> 4) << 8;
  3367. E |= s->filter_lut.mblim_lut[L] << 8;
  3368. I |= s->filter_lut.lim_lut[L] << 8;
  3369. s->dsp.loop_filter_mix2[0][0][1](ptr + ls * 4, ls, E, I, H);
  3370. } else {
  3371. s->dsp.loop_filter_8[0][1](ptr + ls * 4, ls, E, I, H);
  3372. }
  3373. } else if (vm3 & (x << (1 + ss_h))) {
  3374. int L = l[1 + ss_h], H = L >> 4;
  3375. int E = s->filter_lut.mblim_lut[L], I = s->filter_lut.lim_lut[L];
  3376. s->dsp.loop_filter_8[0][1](ptr + ls * 4 + 8 * bytesperpixel, ls, E, I, H);
  3377. }
  3378. }
  3379. }
  3380. if (ss_v) {
  3381. if (y & 1)
  3382. lvl += 16;
  3383. } else {
  3384. lvl += 8;
  3385. }
  3386. }
  3387. }
  3388. static void loopfilter_sb(AVCodecContext *ctx, struct VP9Filter *lflvl,
  3389. int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
  3390. {
  3391. VP9Context *s = ctx->priv_data;
  3392. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  3393. uint8_t *dst = f->data[0] + yoff;
  3394. ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1];
  3395. uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v];
  3396. int p;
  3397. // FIXME in how far can we interleave the v/h loopfilter calls? E.g.
  3398. // if you think of them as acting on a 8x8 block max, we can interleave
  3399. // each v/h within the single x loop, but that only works if we work on
  3400. // 8 pixel blocks, and we won't always do that (we want at least 16px
  3401. // to use SSE2 optimizations, perhaps 32 for AVX2)
  3402. filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y);
  3403. filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y);
  3404. for (p = 0; p < 2; p++) {
  3405. dst = f->data[1 + p] + uvoff;
  3406. filter_plane_cols(s, col, s->ss_h, s->ss_v, lflvl->level, uv_masks[0], dst, ls_uv);
  3407. filter_plane_rows(s, row, s->ss_h, s->ss_v, lflvl->level, uv_masks[1], dst, ls_uv);
  3408. }
  3409. }
  3410. static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
  3411. {
  3412. int sb_start = ( idx * n) >> log2_n;
  3413. int sb_end = ((idx + 1) * n) >> log2_n;
  3414. *start = FFMIN(sb_start, n) << 3;
  3415. *end = FFMIN(sb_end, n) << 3;
  3416. }
  3417. static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
  3418. int max_count, int update_factor)
  3419. {
  3420. unsigned ct = ct0 + ct1, p2, p1;
  3421. if (!ct)
  3422. return;
  3423. p1 = *p;
  3424. p2 = ((ct0 << 8) + (ct >> 1)) / ct;
  3425. p2 = av_clip(p2, 1, 255);
  3426. ct = FFMIN(ct, max_count);
  3427. update_factor = FASTDIV(update_factor * ct, max_count);
  3428. // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
  3429. *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
  3430. }
  3431. static void adapt_probs(VP9Context *s)
  3432. {
  3433. int i, j, k, l, m;
  3434. prob_context *p = &s->prob_ctx[s->s.h.framectxid].p;
  3435. int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128;
  3436. // coefficients
  3437. for (i = 0; i < 4; i++)
  3438. for (j = 0; j < 2; j++)
  3439. for (k = 0; k < 2; k++)
  3440. for (l = 0; l < 6; l++)
  3441. for (m = 0; m < 6; m++) {
  3442. uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m];
  3443. unsigned *e = s->counts.eob[i][j][k][l][m];
  3444. unsigned *c = s->counts.coef[i][j][k][l][m];
  3445. if (l == 0 && m >= 3) // dc only has 3 pt
  3446. break;
  3447. adapt_prob(&pp[0], e[0], e[1], 24, uf);
  3448. adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
  3449. adapt_prob(&pp[2], c[1], c[2], 24, uf);
  3450. }
  3451. if (s->s.h.keyframe || s->s.h.intraonly) {
  3452. memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
  3453. memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
  3454. memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
  3455. memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
  3456. return;
  3457. }
  3458. // skip flag
  3459. for (i = 0; i < 3; i++)
  3460. adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
  3461. // intra/inter flag
  3462. for (i = 0; i < 4; i++)
  3463. adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
  3464. // comppred flag
  3465. if (s->s.h.comppredmode == PRED_SWITCHABLE) {
  3466. for (i = 0; i < 5; i++)
  3467. adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
  3468. }
  3469. // reference frames
  3470. if (s->s.h.comppredmode != PRED_SINGLEREF) {
  3471. for (i = 0; i < 5; i++)
  3472. adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
  3473. s->counts.comp_ref[i][1], 20, 128);
  3474. }
  3475. if (s->s.h.comppredmode != PRED_COMPREF) {
  3476. for (i = 0; i < 5; i++) {
  3477. uint8_t *pp = p->single_ref[i];
  3478. unsigned (*c)[2] = s->counts.single_ref[i];
  3479. adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
  3480. adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
  3481. }
  3482. }
  3483. // block partitioning
  3484. for (i = 0; i < 4; i++)
  3485. for (j = 0; j < 4; j++) {
  3486. uint8_t *pp = p->partition[i][j];
  3487. unsigned *c = s->counts.partition[i][j];
  3488. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3489. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3490. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3491. }
  3492. // tx size
  3493. if (s->s.h.txfmmode == TX_SWITCHABLE) {
  3494. for (i = 0; i < 2; i++) {
  3495. unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
  3496. adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
  3497. adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
  3498. adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
  3499. adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
  3500. adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
  3501. adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
  3502. }
  3503. }
  3504. // interpolation filter
  3505. if (s->s.h.filtermode == FILTER_SWITCHABLE) {
  3506. for (i = 0; i < 4; i++) {
  3507. uint8_t *pp = p->filter[i];
  3508. unsigned *c = s->counts.filter[i];
  3509. adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
  3510. adapt_prob(&pp[1], c[1], c[2], 20, 128);
  3511. }
  3512. }
  3513. // inter modes
  3514. for (i = 0; i < 7; i++) {
  3515. uint8_t *pp = p->mv_mode[i];
  3516. unsigned *c = s->counts.mv_mode[i];
  3517. adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
  3518. adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
  3519. adapt_prob(&pp[2], c[1], c[3], 20, 128);
  3520. }
  3521. // mv joints
  3522. {
  3523. uint8_t *pp = p->mv_joint;
  3524. unsigned *c = s->counts.mv_joint;
  3525. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3526. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3527. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3528. }
  3529. // mv components
  3530. for (i = 0; i < 2; i++) {
  3531. uint8_t *pp;
  3532. unsigned *c, (*c2)[2], sum;
  3533. adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
  3534. s->counts.mv_comp[i].sign[1], 20, 128);
  3535. pp = p->mv_comp[i].classes;
  3536. c = s->counts.mv_comp[i].classes;
  3537. sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
  3538. adapt_prob(&pp[0], c[0], sum, 20, 128);
  3539. sum -= c[1];
  3540. adapt_prob(&pp[1], c[1], sum, 20, 128);
  3541. sum -= c[2] + c[3];
  3542. adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
  3543. adapt_prob(&pp[3], c[2], c[3], 20, 128);
  3544. sum -= c[4] + c[5];
  3545. adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
  3546. adapt_prob(&pp[5], c[4], c[5], 20, 128);
  3547. sum -= c[6];
  3548. adapt_prob(&pp[6], c[6], sum, 20, 128);
  3549. adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
  3550. adapt_prob(&pp[8], c[7], c[8], 20, 128);
  3551. adapt_prob(&pp[9], c[9], c[10], 20, 128);
  3552. adapt_prob(&p->mv_comp[i].class0, s->counts.mv_comp[i].class0[0],
  3553. s->counts.mv_comp[i].class0[1], 20, 128);
  3554. pp = p->mv_comp[i].bits;
  3555. c2 = s->counts.mv_comp[i].bits;
  3556. for (j = 0; j < 10; j++)
  3557. adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
  3558. for (j = 0; j < 2; j++) {
  3559. pp = p->mv_comp[i].class0_fp[j];
  3560. c = s->counts.mv_comp[i].class0_fp[j];
  3561. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3562. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3563. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3564. }
  3565. pp = p->mv_comp[i].fp;
  3566. c = s->counts.mv_comp[i].fp;
  3567. adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
  3568. adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
  3569. adapt_prob(&pp[2], c[2], c[3], 20, 128);
  3570. if (s->s.h.highprecisionmvs) {
  3571. adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0],
  3572. s->counts.mv_comp[i].class0_hp[1], 20, 128);
  3573. adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
  3574. s->counts.mv_comp[i].hp[1], 20, 128);
  3575. }
  3576. }
  3577. // y intra modes
  3578. for (i = 0; i < 4; i++) {
  3579. uint8_t *pp = p->y_mode[i];
  3580. unsigned *c = s->counts.y_mode[i], sum, s2;
  3581. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3582. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3583. sum -= c[TM_VP8_PRED];
  3584. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3585. sum -= c[VERT_PRED];
  3586. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3587. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3588. sum -= s2;
  3589. adapt_prob(&pp[3], s2, sum, 20, 128);
  3590. s2 -= c[HOR_PRED];
  3591. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3592. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3593. sum -= c[DIAG_DOWN_LEFT_PRED];
  3594. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3595. sum -= c[VERT_LEFT_PRED];
  3596. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3597. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3598. }
  3599. // uv intra modes
  3600. for (i = 0; i < 10; i++) {
  3601. uint8_t *pp = p->uv_mode[i];
  3602. unsigned *c = s->counts.uv_mode[i], sum, s2;
  3603. sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
  3604. adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
  3605. sum -= c[TM_VP8_PRED];
  3606. adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
  3607. sum -= c[VERT_PRED];
  3608. adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
  3609. s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
  3610. sum -= s2;
  3611. adapt_prob(&pp[3], s2, sum, 20, 128);
  3612. s2 -= c[HOR_PRED];
  3613. adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
  3614. adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
  3615. sum -= c[DIAG_DOWN_LEFT_PRED];
  3616. adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
  3617. sum -= c[VERT_LEFT_PRED];
  3618. adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
  3619. adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
  3620. }
  3621. }
  3622. static void free_buffers(VP9Context *s)
  3623. {
  3624. av_freep(&s->intra_pred_data[0]);
  3625. av_freep(&s->b_base);
  3626. av_freep(&s->block_base);
  3627. }
  3628. static av_cold int vp9_decode_free(AVCodecContext *ctx)
  3629. {
  3630. VP9Context *s = ctx->priv_data;
  3631. int i;
  3632. for (i = 0; i < 3; i++) {
  3633. if (s->s.frames[i].tf.f->buf[0])
  3634. vp9_unref_frame(ctx, &s->s.frames[i]);
  3635. av_frame_free(&s->s.frames[i].tf.f);
  3636. }
  3637. for (i = 0; i < 8; i++) {
  3638. if (s->s.refs[i].f->buf[0])
  3639. ff_thread_release_buffer(ctx, &s->s.refs[i]);
  3640. av_frame_free(&s->s.refs[i].f);
  3641. if (s->next_refs[i].f->buf[0])
  3642. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3643. av_frame_free(&s->next_refs[i].f);
  3644. }
  3645. free_buffers(s);
  3646. av_freep(&s->c_b);
  3647. s->c_b_size = 0;
  3648. return 0;
  3649. }
  3650. static int vp9_decode_frame(AVCodecContext *ctx, void *frame,
  3651. int *got_frame, AVPacket *pkt)
  3652. {
  3653. const uint8_t *data = pkt->data;
  3654. int size = pkt->size;
  3655. VP9Context *s = ctx->priv_data;
  3656. int res, tile_row, tile_col, i, ref, row, col;
  3657. int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
  3658. (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
  3659. ptrdiff_t yoff, uvoff, ls_y, ls_uv;
  3660. AVFrame *f;
  3661. int bytesperpixel;
  3662. if ((res = decode_frame_header(ctx, data, size, &ref)) < 0) {
  3663. return res;
  3664. } else if (res == 0) {
  3665. if (!s->s.refs[ref].f->buf[0]) {
  3666. av_log(ctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
  3667. return AVERROR_INVALIDDATA;
  3668. }
  3669. if ((res = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
  3670. return res;
  3671. ((AVFrame *)frame)->pkt_pts = pkt->pts;
  3672. ((AVFrame *)frame)->pkt_dts = pkt->dts;
  3673. for (i = 0; i < 8; i++) {
  3674. if (s->next_refs[i].f->buf[0])
  3675. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3676. if (s->s.refs[i].f->buf[0] &&
  3677. (res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
  3678. return res;
  3679. }
  3680. *got_frame = 1;
  3681. return pkt->size;
  3682. }
  3683. data += res;
  3684. size -= res;
  3685. if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
  3686. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
  3687. vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]);
  3688. if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  3689. (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
  3690. return res;
  3691. }
  3692. if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
  3693. vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR]);
  3694. if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  3695. (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
  3696. return res;
  3697. if (s->s.frames[CUR_FRAME].tf.f->buf[0])
  3698. vp9_unref_frame(ctx, &s->s.frames[CUR_FRAME]);
  3699. if ((res = vp9_alloc_frame(ctx, &s->s.frames[CUR_FRAME])) < 0)
  3700. return res;
  3701. f = s->s.frames[CUR_FRAME].tf.f;
  3702. f->key_frame = s->s.h.keyframe;
  3703. f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  3704. ls_y = f->linesize[0];
  3705. ls_uv =f->linesize[1];
  3706. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
  3707. (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
  3708. s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
  3709. vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]);
  3710. }
  3711. // ref frame setup
  3712. for (i = 0; i < 8; i++) {
  3713. if (s->next_refs[i].f->buf[0])
  3714. ff_thread_release_buffer(ctx, &s->next_refs[i]);
  3715. if (s->s.h.refreshrefmask & (1 << i)) {
  3716. res = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
  3717. } else if (s->s.refs[i].f->buf[0]) {
  3718. res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
  3719. }
  3720. if (res < 0)
  3721. return res;
  3722. }
  3723. if (ctx->hwaccel) {
  3724. res = ctx->hwaccel->start_frame(ctx, NULL, 0);
  3725. if (res < 0)
  3726. return res;
  3727. res = ctx->hwaccel->decode_slice(ctx, pkt->data, pkt->size);
  3728. if (res < 0)
  3729. return res;
  3730. res = ctx->hwaccel->end_frame(ctx);
  3731. if (res < 0)
  3732. return res;
  3733. goto finish;
  3734. }
  3735. // main tile decode loop
  3736. bytesperpixel = s->bytesperpixel;
  3737. memset(s->above_partition_ctx, 0, s->cols);
  3738. memset(s->above_skip_ctx, 0, s->cols);
  3739. if (s->s.h.keyframe || s->s.h.intraonly) {
  3740. memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
  3741. } else {
  3742. memset(s->above_mode_ctx, NEARESTMV, s->cols);
  3743. }
  3744. memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
  3745. memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
  3746. memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
  3747. memset(s->above_segpred_ctx, 0, s->cols);
  3748. s->pass = s->s.frames[CUR_FRAME].uses_2pass =
  3749. ctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
  3750. if ((res = update_block_buffers(ctx)) < 0) {
  3751. av_log(ctx, AV_LOG_ERROR,
  3752. "Failed to allocate block buffers\n");
  3753. return res;
  3754. }
  3755. if (s->s.h.refreshctx && s->s.h.parallelmode) {
  3756. int j, k, l, m;
  3757. for (i = 0; i < 4; i++) {
  3758. for (j = 0; j < 2; j++)
  3759. for (k = 0; k < 2; k++)
  3760. for (l = 0; l < 6; l++)
  3761. for (m = 0; m < 6; m++)
  3762. memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
  3763. s->prob.coef[i][j][k][l][m], 3);
  3764. if (s->s.h.txfmmode == i)
  3765. break;
  3766. }
  3767. s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
  3768. ff_thread_finish_setup(ctx);
  3769. } else if (!s->s.h.refreshctx) {
  3770. ff_thread_finish_setup(ctx);
  3771. }
  3772. do {
  3773. yoff = uvoff = 0;
  3774. s->b = s->b_base;
  3775. s->block = s->block_base;
  3776. s->uvblock[0] = s->uvblock_base[0];
  3777. s->uvblock[1] = s->uvblock_base[1];
  3778. s->eob = s->eob_base;
  3779. s->uveob[0] = s->uveob_base[0];
  3780. s->uveob[1] = s->uveob_base[1];
  3781. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  3782. set_tile_offset(&s->tile_row_start, &s->tile_row_end,
  3783. tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
  3784. if (s->pass != 2) {
  3785. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  3786. int64_t tile_size;
  3787. if (tile_col == s->s.h.tiling.tile_cols - 1 &&
  3788. tile_row == s->s.h.tiling.tile_rows - 1) {
  3789. tile_size = size;
  3790. } else {
  3791. tile_size = AV_RB32(data);
  3792. data += 4;
  3793. size -= 4;
  3794. }
  3795. if (tile_size > size) {
  3796. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  3797. return AVERROR_INVALIDDATA;
  3798. }
  3799. ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
  3800. if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit
  3801. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  3802. return AVERROR_INVALIDDATA;
  3803. }
  3804. data += tile_size;
  3805. size -= tile_size;
  3806. }
  3807. }
  3808. for (row = s->tile_row_start; row < s->tile_row_end;
  3809. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
  3810. struct VP9Filter *lflvl_ptr = s->lflvl;
  3811. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  3812. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  3813. set_tile_offset(&s->tile_col_start, &s->tile_col_end,
  3814. tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
  3815. if (s->pass != 2) {
  3816. memset(s->left_partition_ctx, 0, 8);
  3817. memset(s->left_skip_ctx, 0, 8);
  3818. if (s->s.h.keyframe || s->s.h.intraonly) {
  3819. memset(s->left_mode_ctx, DC_PRED, 16);
  3820. } else {
  3821. memset(s->left_mode_ctx, NEARESTMV, 8);
  3822. }
  3823. memset(s->left_y_nnz_ctx, 0, 16);
  3824. memset(s->left_uv_nnz_ctx, 0, 32);
  3825. memset(s->left_segpred_ctx, 0, 8);
  3826. memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
  3827. }
  3828. for (col = s->tile_col_start;
  3829. col < s->tile_col_end;
  3830. col += 8, yoff2 += 64 * bytesperpixel,
  3831. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  3832. // FIXME integrate with lf code (i.e. zero after each
  3833. // use, similar to invtxfm coefficients, or similar)
  3834. if (s->pass != 1) {
  3835. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  3836. }
  3837. if (s->pass == 2) {
  3838. decode_sb_mem(ctx, row, col, lflvl_ptr,
  3839. yoff2, uvoff2, BL_64X64);
  3840. } else {
  3841. decode_sb(ctx, row, col, lflvl_ptr,
  3842. yoff2, uvoff2, BL_64X64);
  3843. }
  3844. }
  3845. if (s->pass != 2) {
  3846. memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
  3847. }
  3848. }
  3849. if (s->pass == 1) {
  3850. continue;
  3851. }
  3852. // backup pre-loopfilter reconstruction data for intra
  3853. // prediction of next row of sb64s
  3854. if (row + 8 < s->rows) {
  3855. memcpy(s->intra_pred_data[0],
  3856. f->data[0] + yoff + 63 * ls_y,
  3857. 8 * s->cols * bytesperpixel);
  3858. memcpy(s->intra_pred_data[1],
  3859. f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  3860. 8 * s->cols * bytesperpixel >> s->ss_h);
  3861. memcpy(s->intra_pred_data[2],
  3862. f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  3863. 8 * s->cols * bytesperpixel >> s->ss_h);
  3864. }
  3865. // loopfilter one row
  3866. if (s->s.h.filter.level) {
  3867. yoff2 = yoff;
  3868. uvoff2 = uvoff;
  3869. lflvl_ptr = s->lflvl;
  3870. for (col = 0; col < s->cols;
  3871. col += 8, yoff2 += 64 * bytesperpixel,
  3872. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  3873. loopfilter_sb(ctx, lflvl_ptr, row, col, yoff2, uvoff2);
  3874. }
  3875. }
  3876. // FIXME maybe we can make this more finegrained by running the
  3877. // loopfilter per-block instead of after each sbrow
  3878. // In fact that would also make intra pred left preparation easier?
  3879. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
  3880. }
  3881. }
  3882. if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
  3883. adapt_probs(s);
  3884. ff_thread_finish_setup(ctx);
  3885. }
  3886. } while (s->pass++ == 1);
  3887. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  3888. finish:
  3889. // ref frame setup
  3890. for (i = 0; i < 8; i++) {
  3891. if (s->s.refs[i].f->buf[0])
  3892. ff_thread_release_buffer(ctx, &s->s.refs[i]);
  3893. if (s->next_refs[i].f->buf[0] &&
  3894. (res = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
  3895. return res;
  3896. }
  3897. if (!s->s.h.invisible) {
  3898. if ((res = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
  3899. return res;
  3900. *got_frame = 1;
  3901. }
  3902. return pkt->size;
  3903. }
  3904. static void vp9_decode_flush(AVCodecContext *ctx)
  3905. {
  3906. VP9Context *s = ctx->priv_data;
  3907. int i;
  3908. for (i = 0; i < 3; i++)
  3909. vp9_unref_frame(ctx, &s->s.frames[i]);
  3910. for (i = 0; i < 8; i++)
  3911. ff_thread_release_buffer(ctx, &s->s.refs[i]);
  3912. }
  3913. static int init_frames(AVCodecContext *ctx)
  3914. {
  3915. VP9Context *s = ctx->priv_data;
  3916. int i;
  3917. for (i = 0; i < 3; i++) {
  3918. s->s.frames[i].tf.f = av_frame_alloc();
  3919. if (!s->s.frames[i].tf.f) {
  3920. vp9_decode_free(ctx);
  3921. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3922. return AVERROR(ENOMEM);
  3923. }
  3924. }
  3925. for (i = 0; i < 8; i++) {
  3926. s->s.refs[i].f = av_frame_alloc();
  3927. s->next_refs[i].f = av_frame_alloc();
  3928. if (!s->s.refs[i].f || !s->next_refs[i].f) {
  3929. vp9_decode_free(ctx);
  3930. av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  3931. return AVERROR(ENOMEM);
  3932. }
  3933. }
  3934. return 0;
  3935. }
  3936. static av_cold int vp9_decode_init(AVCodecContext *ctx)
  3937. {
  3938. VP9Context *s = ctx->priv_data;
  3939. ctx->internal->allocate_progress = 1;
  3940. s->last_bpp = 0;
  3941. s->s.h.filter.sharpness = -1;
  3942. return init_frames(ctx);
  3943. }
  3944. #if HAVE_THREADS
  3945. static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
  3946. {
  3947. return init_frames(avctx);
  3948. }
  3949. static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  3950. {
  3951. int i, res;
  3952. VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
  3953. // detect size changes in other threads
  3954. if (s->intra_pred_data[0] &&
  3955. (!ssrc->intra_pred_data[0] || s->cols != ssrc->cols ||
  3956. s->rows != ssrc->rows || s->bpp != ssrc->bpp || s->pix_fmt != ssrc->pix_fmt)) {
  3957. free_buffers(s);
  3958. }
  3959. for (i = 0; i < 3; i++) {
  3960. if (s->s.frames[i].tf.f->buf[0])
  3961. vp9_unref_frame(dst, &s->s.frames[i]);
  3962. if (ssrc->s.frames[i].tf.f->buf[0]) {
  3963. if ((res = vp9_ref_frame(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
  3964. return res;
  3965. }
  3966. }
  3967. for (i = 0; i < 8; i++) {
  3968. if (s->s.refs[i].f->buf[0])
  3969. ff_thread_release_buffer(dst, &s->s.refs[i]);
  3970. if (ssrc->next_refs[i].f->buf[0]) {
  3971. if ((res = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
  3972. return res;
  3973. }
  3974. }
  3975. s->s.h.invisible = ssrc->s.h.invisible;
  3976. s->s.h.keyframe = ssrc->s.h.keyframe;
  3977. s->s.h.intraonly = ssrc->s.h.intraonly;
  3978. s->ss_v = ssrc->ss_v;
  3979. s->ss_h = ssrc->ss_h;
  3980. s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
  3981. s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
  3982. s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
  3983. s->bytesperpixel = ssrc->bytesperpixel;
  3984. s->bpp = ssrc->bpp;
  3985. s->bpp_index = ssrc->bpp_index;
  3986. s->pix_fmt = ssrc->pix_fmt;
  3987. memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
  3988. memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
  3989. memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
  3990. sizeof(s->s.h.segmentation.feat));
  3991. return 0;
  3992. }
  3993. #endif
  3994. AVCodec ff_vp9_decoder = {
  3995. .name = "vp9",
  3996. .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
  3997. .type = AVMEDIA_TYPE_VIDEO,
  3998. .id = AV_CODEC_ID_VP9,
  3999. .priv_data_size = sizeof(VP9Context),
  4000. .init = vp9_decode_init,
  4001. .close = vp9_decode_free,
  4002. .decode = vp9_decode_frame,
  4003. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  4004. .flush = vp9_decode_flush,
  4005. .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
  4006. .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
  4007. .profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
  4008. };